python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* dwc3-keystone.c - Keystone Specific Glue layer
*
* Copyright (C) 2010-2013 Texas Instruments Incorporated - https://www.ti.com
*
* Author: WingMan Kwok <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
/* USBSS register offsets */
#define USBSS_REVISION 0x0000
#define USBSS_SYSCONFIG 0x0010
#define USBSS_IRQ_EOI 0x0018
#define USBSS_IRQSTATUS_RAW_0 0x0020
#define USBSS_IRQSTATUS_0 0x0024
#define USBSS_IRQENABLE_SET_0 0x0028
#define USBSS_IRQENABLE_CLR_0 0x002c
/* IRQ register bits */
#define USBSS_IRQ_EOI_LINE(n) BIT(n)
#define USBSS_IRQ_EVENT_ST BIT(0)
#define USBSS_IRQ_COREIRQ_EN BIT(0)
#define USBSS_IRQ_COREIRQ_CLR BIT(0)
struct dwc3_keystone {
struct device *dev;
void __iomem *usbss;
struct phy *usb3_phy;
};
static inline u32 kdwc3_readl(void __iomem *base, u32 offset)
{
return readl(base + offset);
}
static inline void kdwc3_writel(void __iomem *base, u32 offset, u32 value)
{
writel(value, base + offset);
}
static void kdwc3_enable_irqs(struct dwc3_keystone *kdwc)
{
u32 val;
val = kdwc3_readl(kdwc->usbss, USBSS_IRQENABLE_SET_0);
val |= USBSS_IRQ_COREIRQ_EN;
kdwc3_writel(kdwc->usbss, USBSS_IRQENABLE_SET_0, val);
}
static void kdwc3_disable_irqs(struct dwc3_keystone *kdwc)
{
u32 val;
val = kdwc3_readl(kdwc->usbss, USBSS_IRQENABLE_SET_0);
val &= ~USBSS_IRQ_COREIRQ_EN;
kdwc3_writel(kdwc->usbss, USBSS_IRQENABLE_SET_0, val);
}
static irqreturn_t dwc3_keystone_interrupt(int irq, void *_kdwc)
{
struct dwc3_keystone *kdwc = _kdwc;
kdwc3_writel(kdwc->usbss, USBSS_IRQENABLE_CLR_0, USBSS_IRQ_COREIRQ_CLR);
kdwc3_writel(kdwc->usbss, USBSS_IRQSTATUS_0, USBSS_IRQ_EVENT_ST);
kdwc3_writel(kdwc->usbss, USBSS_IRQENABLE_SET_0, USBSS_IRQ_COREIRQ_EN);
kdwc3_writel(kdwc->usbss, USBSS_IRQ_EOI, USBSS_IRQ_EOI_LINE(0));
return IRQ_HANDLED;
}
static int kdwc3_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = pdev->dev.of_node;
struct dwc3_keystone *kdwc;
int error, irq;
kdwc = devm_kzalloc(dev, sizeof(*kdwc), GFP_KERNEL);
if (!kdwc)
return -ENOMEM;
platform_set_drvdata(pdev, kdwc);
kdwc->dev = dev;
kdwc->usbss = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(kdwc->usbss))
return PTR_ERR(kdwc->usbss);
/* PSC dependency on AM65 needs SERDES0 to be powered before USB0 */
kdwc->usb3_phy = devm_phy_optional_get(dev, "usb3-phy");
if (IS_ERR(kdwc->usb3_phy))
return dev_err_probe(dev, PTR_ERR(kdwc->usb3_phy), "couldn't get usb3 phy\n");
phy_pm_runtime_get_sync(kdwc->usb3_phy);
error = phy_reset(kdwc->usb3_phy);
if (error < 0) {
dev_err(dev, "usb3 phy reset failed: %d\n", error);
return error;
}
error = phy_init(kdwc->usb3_phy);
if (error < 0) {
dev_err(dev, "usb3 phy init failed: %d\n", error);
return error;
}
error = phy_power_on(kdwc->usb3_phy);
if (error < 0) {
dev_err(dev, "usb3 phy power on failed: %d\n", error);
phy_exit(kdwc->usb3_phy);
return error;
}
pm_runtime_enable(kdwc->dev);
error = pm_runtime_get_sync(kdwc->dev);
if (error < 0) {
dev_err(kdwc->dev, "pm_runtime_get_sync failed, error %d\n",
error);
goto err_irq;
}
/* IRQ processing not required currently for AM65 */
if (of_device_is_compatible(node, "ti,am654-dwc3"))
goto skip_irq;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
error = irq;
goto err_irq;
}
error = devm_request_irq(dev, irq, dwc3_keystone_interrupt, IRQF_SHARED,
dev_name(dev), kdwc);
if (error) {
dev_err(dev, "failed to request IRQ #%d --> %d\n",
irq, error);
goto err_irq;
}
kdwc3_enable_irqs(kdwc);
skip_irq:
error = of_platform_populate(node, NULL, NULL, dev);
if (error) {
dev_err(&pdev->dev, "failed to create dwc3 core\n");
goto err_core;
}
return 0;
err_core:
kdwc3_disable_irqs(kdwc);
err_irq:
pm_runtime_put_sync(kdwc->dev);
pm_runtime_disable(kdwc->dev);
phy_power_off(kdwc->usb3_phy);
phy_exit(kdwc->usb3_phy);
phy_pm_runtime_put_sync(kdwc->usb3_phy);
return error;
}
static int kdwc3_remove_core(struct device *dev, void *c)
{
struct platform_device *pdev = to_platform_device(dev);
platform_device_unregister(pdev);
return 0;
}
static void kdwc3_remove(struct platform_device *pdev)
{
struct dwc3_keystone *kdwc = platform_get_drvdata(pdev);
struct device_node *node = pdev->dev.of_node;
if (!of_device_is_compatible(node, "ti,am654-dwc3"))
kdwc3_disable_irqs(kdwc);
device_for_each_child(&pdev->dev, NULL, kdwc3_remove_core);
pm_runtime_put_sync(kdwc->dev);
pm_runtime_disable(kdwc->dev);
phy_power_off(kdwc->usb3_phy);
phy_exit(kdwc->usb3_phy);
phy_pm_runtime_put_sync(kdwc->usb3_phy);
}
static const struct of_device_id kdwc3_of_match[] = {
{ .compatible = "ti,keystone-dwc3", },
{ .compatible = "ti,am654-dwc3" },
{},
};
MODULE_DEVICE_TABLE(of, kdwc3_of_match);
static struct platform_driver kdwc3_driver = {
.probe = kdwc3_probe,
.remove_new = kdwc3_remove,
.driver = {
.name = "keystone-dwc3",
.of_match_table = kdwc3_of_match,
},
};
module_platform_driver(kdwc3_driver);
MODULE_ALIAS("platform:keystone-dwc3");
MODULE_AUTHOR("WingMan Kwok <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DesignWare USB3 KEYSTONE Glue Layer");
| linux-master | drivers/usb/dwc3/dwc3-keystone.c |
// SPDX-License-Identifier: GPL-2.0
/*
* host.c - DesignWare USB3 DRD Controller Host Glue
*
* Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com
*
* Authors: Felipe Balbi <[email protected]>,
*/
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include "core.h"
static void dwc3_host_fill_xhci_irq_res(struct dwc3 *dwc,
int irq, char *name)
{
struct platform_device *pdev = to_platform_device(dwc->dev);
struct device_node *np = dev_of_node(&pdev->dev);
dwc->xhci_resources[1].start = irq;
dwc->xhci_resources[1].end = irq;
dwc->xhci_resources[1].flags = IORESOURCE_IRQ | irq_get_trigger_type(irq);
if (!name && np)
dwc->xhci_resources[1].name = of_node_full_name(pdev->dev.of_node);
else
dwc->xhci_resources[1].name = name;
}
static int dwc3_host_get_irq(struct dwc3 *dwc)
{
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
int irq;
irq = platform_get_irq_byname_optional(dwc3_pdev, "host");
if (irq > 0) {
dwc3_host_fill_xhci_irq_res(dwc, irq, "host");
goto out;
}
if (irq == -EPROBE_DEFER)
goto out;
irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
if (irq > 0) {
dwc3_host_fill_xhci_irq_res(dwc, irq, "dwc_usb3");
goto out;
}
if (irq == -EPROBE_DEFER)
goto out;
irq = platform_get_irq(dwc3_pdev, 0);
if (irq > 0)
dwc3_host_fill_xhci_irq_res(dwc, irq, NULL);
out:
return irq;
}
int dwc3_host_init(struct dwc3 *dwc)
{
struct property_entry props[4];
struct platform_device *xhci;
int ret, irq;
int prop_idx = 0;
irq = dwc3_host_get_irq(dwc);
if (irq < 0)
return irq;
xhci = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO);
if (!xhci) {
dev_err(dwc->dev, "couldn't allocate xHCI device\n");
return -ENOMEM;
}
xhci->dev.parent = dwc->dev;
dwc->xhci = xhci;
ret = platform_device_add_resources(xhci, dwc->xhci_resources,
DWC3_XHCI_RESOURCES_NUM);
if (ret) {
dev_err(dwc->dev, "couldn't add resources to xHCI device\n");
goto err;
}
memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
if (dwc->usb3_lpm_capable)
props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable");
if (dwc->usb2_lpm_disable)
props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb2-lpm-disable");
/**
* WORKAROUND: dwc3 revisions <=3.00a have a limitation
* where Port Disable command doesn't work.
*
* The suggested workaround is that we avoid Port Disable
* completely.
*
* This following flag tells XHCI to do just that.
*/
if (DWC3_VER_IS_WITHIN(DWC3, ANY, 300A))
props[prop_idx++] = PROPERTY_ENTRY_BOOL("quirk-broken-port-ped");
if (prop_idx) {
ret = device_create_managed_software_node(&xhci->dev, props, NULL);
if (ret) {
dev_err(dwc->dev, "failed to add properties to xHCI\n");
goto err;
}
}
ret = platform_device_add(xhci);
if (ret) {
dev_err(dwc->dev, "failed to register xHCI device\n");
goto err;
}
return 0;
err:
platform_device_put(xhci);
return ret;
}
void dwc3_host_exit(struct dwc3 *dwc)
{
platform_device_unregister(dwc->xhci);
dwc->xhci = NULL;
}
| linux-master | drivers/usb/dwc3/host.c |
// SPDX-License-Identifier: GPL-2.0
/*
* dwc3-omap.c - OMAP Specific Glue layer
*
* Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
*
* Authors: Felipe Balbi <[email protected]>,
* Sebastian Andrzej Siewior <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/extcon.h>
#include <linux/regulator/consumer.h>
#include <linux/usb/otg.h>
/*
* All these registers belong to OMAP's Wrapper around the
* DesignWare USB3 Core.
*/
#define USBOTGSS_REVISION 0x0000
#define USBOTGSS_SYSCONFIG 0x0010
#define USBOTGSS_IRQ_EOI 0x0020
#define USBOTGSS_EOI_OFFSET 0x0008
#define USBOTGSS_IRQSTATUS_RAW_0 0x0024
#define USBOTGSS_IRQSTATUS_0 0x0028
#define USBOTGSS_IRQENABLE_SET_0 0x002c
#define USBOTGSS_IRQENABLE_CLR_0 0x0030
#define USBOTGSS_IRQ0_OFFSET 0x0004
#define USBOTGSS_IRQSTATUS_RAW_1 0x0030
#define USBOTGSS_IRQSTATUS_1 0x0034
#define USBOTGSS_IRQENABLE_SET_1 0x0038
#define USBOTGSS_IRQENABLE_CLR_1 0x003c
#define USBOTGSS_IRQSTATUS_RAW_2 0x0040
#define USBOTGSS_IRQSTATUS_2 0x0044
#define USBOTGSS_IRQENABLE_SET_2 0x0048
#define USBOTGSS_IRQENABLE_CLR_2 0x004c
#define USBOTGSS_IRQSTATUS_RAW_3 0x0050
#define USBOTGSS_IRQSTATUS_3 0x0054
#define USBOTGSS_IRQENABLE_SET_3 0x0058
#define USBOTGSS_IRQENABLE_CLR_3 0x005c
#define USBOTGSS_IRQSTATUS_EOI_MISC 0x0030
#define USBOTGSS_IRQSTATUS_RAW_MISC 0x0034
#define USBOTGSS_IRQSTATUS_MISC 0x0038
#define USBOTGSS_IRQENABLE_SET_MISC 0x003c
#define USBOTGSS_IRQENABLE_CLR_MISC 0x0040
#define USBOTGSS_IRQMISC_OFFSET 0x03fc
#define USBOTGSS_UTMI_OTG_STATUS 0x0080
#define USBOTGSS_UTMI_OTG_CTRL 0x0084
#define USBOTGSS_UTMI_OTG_OFFSET 0x0480
#define USBOTGSS_TXFIFO_DEPTH 0x0508
#define USBOTGSS_RXFIFO_DEPTH 0x050c
#define USBOTGSS_MMRAM_OFFSET 0x0100
#define USBOTGSS_FLADJ 0x0104
#define USBOTGSS_DEBUG_CFG 0x0108
#define USBOTGSS_DEBUG_DATA 0x010c
#define USBOTGSS_DEV_EBC_EN 0x0110
#define USBOTGSS_DEBUG_OFFSET 0x0600
/* SYSCONFIG REGISTER */
#define USBOTGSS_SYSCONFIG_DMADISABLE BIT(16)
/* IRQ_EOI REGISTER */
#define USBOTGSS_IRQ_EOI_LINE_NUMBER BIT(0)
/* IRQS0 BITS */
#define USBOTGSS_IRQO_COREIRQ_ST BIT(0)
/* IRQMISC BITS */
#define USBOTGSS_IRQMISC_DMADISABLECLR BIT(17)
#define USBOTGSS_IRQMISC_OEVT BIT(16)
#define USBOTGSS_IRQMISC_DRVVBUS_RISE BIT(13)
#define USBOTGSS_IRQMISC_CHRGVBUS_RISE BIT(12)
#define USBOTGSS_IRQMISC_DISCHRGVBUS_RISE BIT(11)
#define USBOTGSS_IRQMISC_IDPULLUP_RISE BIT(8)
#define USBOTGSS_IRQMISC_DRVVBUS_FALL BIT(5)
#define USBOTGSS_IRQMISC_CHRGVBUS_FALL BIT(4)
#define USBOTGSS_IRQMISC_DISCHRGVBUS_FALL BIT(3)
#define USBOTGSS_IRQMISC_IDPULLUP_FALL BIT(0)
/* UTMI_OTG_STATUS REGISTER */
#define USBOTGSS_UTMI_OTG_STATUS_DRVVBUS BIT(5)
#define USBOTGSS_UTMI_OTG_STATUS_CHRGVBUS BIT(4)
#define USBOTGSS_UTMI_OTG_STATUS_DISCHRGVBUS BIT(3)
#define USBOTGSS_UTMI_OTG_STATUS_IDPULLUP BIT(0)
/* UTMI_OTG_CTRL REGISTER */
#define USBOTGSS_UTMI_OTG_CTRL_SW_MODE BIT(31)
#define USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT BIT(9)
#define USBOTGSS_UTMI_OTG_CTRL_TXBITSTUFFENABLE BIT(8)
#define USBOTGSS_UTMI_OTG_CTRL_IDDIG BIT(4)
#define USBOTGSS_UTMI_OTG_CTRL_SESSEND BIT(3)
#define USBOTGSS_UTMI_OTG_CTRL_SESSVALID BIT(2)
#define USBOTGSS_UTMI_OTG_CTRL_VBUSVALID BIT(1)
enum dwc3_omap_utmi_mode {
DWC3_OMAP_UTMI_MODE_UNKNOWN = 0,
DWC3_OMAP_UTMI_MODE_HW,
DWC3_OMAP_UTMI_MODE_SW,
};
struct dwc3_omap {
struct device *dev;
int irq;
void __iomem *base;
u32 utmi_otg_ctrl;
u32 utmi_otg_offset;
u32 irqmisc_offset;
u32 irq_eoi_offset;
u32 debug_offset;
u32 irq0_offset;
struct extcon_dev *edev;
struct notifier_block vbus_nb;
struct notifier_block id_nb;
struct regulator *vbus_reg;
};
enum omap_dwc3_vbus_id_status {
OMAP_DWC3_ID_FLOAT,
OMAP_DWC3_ID_GROUND,
OMAP_DWC3_VBUS_OFF,
OMAP_DWC3_VBUS_VALID,
};
static inline u32 dwc3_omap_readl(void __iomem *base, u32 offset)
{
return readl(base + offset);
}
static inline void dwc3_omap_writel(void __iomem *base, u32 offset, u32 value)
{
writel(value, base + offset);
}
static u32 dwc3_omap_read_utmi_ctrl(struct dwc3_omap *omap)
{
return dwc3_omap_readl(omap->base, USBOTGSS_UTMI_OTG_CTRL +
omap->utmi_otg_offset);
}
static void dwc3_omap_write_utmi_ctrl(struct dwc3_omap *omap, u32 value)
{
dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_CTRL +
omap->utmi_otg_offset, value);
}
static u32 dwc3_omap_read_irq0_status(struct dwc3_omap *omap)
{
return dwc3_omap_readl(omap->base, USBOTGSS_IRQSTATUS_RAW_0 -
omap->irq0_offset);
}
static void dwc3_omap_write_irq0_status(struct dwc3_omap *omap, u32 value)
{
dwc3_omap_writel(omap->base, USBOTGSS_IRQSTATUS_0 -
omap->irq0_offset, value);
}
static u32 dwc3_omap_read_irqmisc_status(struct dwc3_omap *omap)
{
return dwc3_omap_readl(omap->base, USBOTGSS_IRQSTATUS_RAW_MISC +
omap->irqmisc_offset);
}
static void dwc3_omap_write_irqmisc_status(struct dwc3_omap *omap, u32 value)
{
dwc3_omap_writel(omap->base, USBOTGSS_IRQSTATUS_MISC +
omap->irqmisc_offset, value);
}
static void dwc3_omap_write_irqmisc_set(struct dwc3_omap *omap, u32 value)
{
dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_SET_MISC +
omap->irqmisc_offset, value);
}
static void dwc3_omap_write_irq0_set(struct dwc3_omap *omap, u32 value)
{
dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_SET_0 -
omap->irq0_offset, value);
}
static void dwc3_omap_write_irqmisc_clr(struct dwc3_omap *omap, u32 value)
{
dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_MISC +
omap->irqmisc_offset, value);
}
static void dwc3_omap_write_irq0_clr(struct dwc3_omap *omap, u32 value)
{
dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_0 -
omap->irq0_offset, value);
}
static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
enum omap_dwc3_vbus_id_status status)
{
int ret;
u32 val;
switch (status) {
case OMAP_DWC3_ID_GROUND:
if (omap->vbus_reg) {
ret = regulator_enable(omap->vbus_reg);
if (ret) {
dev_err(omap->dev, "regulator enable failed\n");
return;
}
}
val = dwc3_omap_read_utmi_ctrl(omap);
val &= ~USBOTGSS_UTMI_OTG_CTRL_IDDIG;
dwc3_omap_write_utmi_ctrl(omap, val);
break;
case OMAP_DWC3_VBUS_VALID:
val = dwc3_omap_read_utmi_ctrl(omap);
val &= ~USBOTGSS_UTMI_OTG_CTRL_SESSEND;
val |= USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
| USBOTGSS_UTMI_OTG_CTRL_SESSVALID;
dwc3_omap_write_utmi_ctrl(omap, val);
break;
case OMAP_DWC3_ID_FLOAT:
if (omap->vbus_reg && regulator_is_enabled(omap->vbus_reg))
regulator_disable(omap->vbus_reg);
val = dwc3_omap_read_utmi_ctrl(omap);
val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG;
dwc3_omap_write_utmi_ctrl(omap, val);
break;
case OMAP_DWC3_VBUS_OFF:
val = dwc3_omap_read_utmi_ctrl(omap);
val &= ~(USBOTGSS_UTMI_OTG_CTRL_SESSVALID
| USBOTGSS_UTMI_OTG_CTRL_VBUSVALID);
val |= USBOTGSS_UTMI_OTG_CTRL_SESSEND;
dwc3_omap_write_utmi_ctrl(omap, val);
break;
default:
dev_WARN(omap->dev, "invalid state\n");
}
}
static void dwc3_omap_enable_irqs(struct dwc3_omap *omap);
static void dwc3_omap_disable_irqs(struct dwc3_omap *omap);
static irqreturn_t dwc3_omap_interrupt(int irq, void *_omap)
{
struct dwc3_omap *omap = _omap;
if (dwc3_omap_read_irqmisc_status(omap) ||
dwc3_omap_read_irq0_status(omap)) {
/* mask irqs */
dwc3_omap_disable_irqs(omap);
return IRQ_WAKE_THREAD;
}
return IRQ_NONE;
}
static irqreturn_t dwc3_omap_interrupt_thread(int irq, void *_omap)
{
struct dwc3_omap *omap = _omap;
u32 reg;
/* clear irq status flags */
reg = dwc3_omap_read_irqmisc_status(omap);
dwc3_omap_write_irqmisc_status(omap, reg);
reg = dwc3_omap_read_irq0_status(omap);
dwc3_omap_write_irq0_status(omap, reg);
/* unmask irqs */
dwc3_omap_enable_irqs(omap);
return IRQ_HANDLED;
}
static void dwc3_omap_enable_irqs(struct dwc3_omap *omap)
{
u32 reg;
/* enable all IRQs */
reg = USBOTGSS_IRQO_COREIRQ_ST;
dwc3_omap_write_irq0_set(omap, reg);
reg = (USBOTGSS_IRQMISC_OEVT |
USBOTGSS_IRQMISC_DRVVBUS_RISE |
USBOTGSS_IRQMISC_CHRGVBUS_RISE |
USBOTGSS_IRQMISC_DISCHRGVBUS_RISE |
USBOTGSS_IRQMISC_IDPULLUP_RISE |
USBOTGSS_IRQMISC_DRVVBUS_FALL |
USBOTGSS_IRQMISC_CHRGVBUS_FALL |
USBOTGSS_IRQMISC_DISCHRGVBUS_FALL |
USBOTGSS_IRQMISC_IDPULLUP_FALL);
dwc3_omap_write_irqmisc_set(omap, reg);
}
static void dwc3_omap_disable_irqs(struct dwc3_omap *omap)
{
u32 reg;
/* disable all IRQs */
reg = USBOTGSS_IRQO_COREIRQ_ST;
dwc3_omap_write_irq0_clr(omap, reg);
reg = (USBOTGSS_IRQMISC_OEVT |
USBOTGSS_IRQMISC_DRVVBUS_RISE |
USBOTGSS_IRQMISC_CHRGVBUS_RISE |
USBOTGSS_IRQMISC_DISCHRGVBUS_RISE |
USBOTGSS_IRQMISC_IDPULLUP_RISE |
USBOTGSS_IRQMISC_DRVVBUS_FALL |
USBOTGSS_IRQMISC_CHRGVBUS_FALL |
USBOTGSS_IRQMISC_DISCHRGVBUS_FALL |
USBOTGSS_IRQMISC_IDPULLUP_FALL);
dwc3_omap_write_irqmisc_clr(omap, reg);
}
static int dwc3_omap_id_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct dwc3_omap *omap = container_of(nb, struct dwc3_omap, id_nb);
if (event)
dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
else
dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
return NOTIFY_DONE;
}
static int dwc3_omap_vbus_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct dwc3_omap *omap = container_of(nb, struct dwc3_omap, vbus_nb);
if (event)
dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
else
dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
return NOTIFY_DONE;
}
static void dwc3_omap_map_offset(struct dwc3_omap *omap)
{
struct device_node *node = omap->dev->of_node;
/*
* Differentiate between OMAP5 and AM437x.
*
* For OMAP5(ES2.0) and AM437x wrapper revision is same, even
* though there are changes in wrapper register offsets.
*
* Using dt compatible to differentiate AM437x.
*/
if (of_device_is_compatible(node, "ti,am437x-dwc3")) {
omap->irq_eoi_offset = USBOTGSS_EOI_OFFSET;
omap->irq0_offset = USBOTGSS_IRQ0_OFFSET;
omap->irqmisc_offset = USBOTGSS_IRQMISC_OFFSET;
omap->utmi_otg_offset = USBOTGSS_UTMI_OTG_OFFSET;
omap->debug_offset = USBOTGSS_DEBUG_OFFSET;
}
}
static void dwc3_omap_set_utmi_mode(struct dwc3_omap *omap)
{
u32 reg;
struct device_node *node = omap->dev->of_node;
u32 utmi_mode = 0;
reg = dwc3_omap_read_utmi_ctrl(omap);
of_property_read_u32(node, "utmi-mode", &utmi_mode);
switch (utmi_mode) {
case DWC3_OMAP_UTMI_MODE_SW:
reg |= USBOTGSS_UTMI_OTG_CTRL_SW_MODE;
break;
case DWC3_OMAP_UTMI_MODE_HW:
reg &= ~USBOTGSS_UTMI_OTG_CTRL_SW_MODE;
break;
default:
dev_WARN(omap->dev, "UNKNOWN utmi mode %d\n", utmi_mode);
}
dwc3_omap_write_utmi_ctrl(omap, reg);
}
static int dwc3_omap_extcon_register(struct dwc3_omap *omap)
{
int ret;
struct device_node *node = omap->dev->of_node;
struct extcon_dev *edev;
if (of_property_read_bool(node, "extcon")) {
edev = extcon_get_edev_by_phandle(omap->dev, 0);
if (IS_ERR(edev)) {
dev_vdbg(omap->dev, "couldn't get extcon device\n");
return -EPROBE_DEFER;
}
omap->vbus_nb.notifier_call = dwc3_omap_vbus_notifier;
ret = devm_extcon_register_notifier(omap->dev, edev,
EXTCON_USB, &omap->vbus_nb);
if (ret < 0)
dev_vdbg(omap->dev, "failed to register notifier for USB\n");
omap->id_nb.notifier_call = dwc3_omap_id_notifier;
ret = devm_extcon_register_notifier(omap->dev, edev,
EXTCON_USB_HOST, &omap->id_nb);
if (ret < 0)
dev_vdbg(omap->dev, "failed to register notifier for USB-HOST\n");
if (extcon_get_state(edev, EXTCON_USB) == true)
dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
else
dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
if (extcon_get_state(edev, EXTCON_USB_HOST) == true)
dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
else
dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
omap->edev = edev;
}
return 0;
}
static int dwc3_omap_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct dwc3_omap *omap;
struct device *dev = &pdev->dev;
struct regulator *vbus_reg = NULL;
int ret;
int irq;
void __iomem *base;
if (!node) {
dev_err(dev, "device node not found\n");
return -EINVAL;
}
omap = devm_kzalloc(dev, sizeof(*omap), GFP_KERNEL);
if (!omap)
return -ENOMEM;
platform_set_drvdata(pdev, omap);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
if (of_property_read_bool(node, "vbus-supply")) {
vbus_reg = devm_regulator_get(dev, "vbus");
if (IS_ERR(vbus_reg)) {
dev_err(dev, "vbus init failed\n");
return PTR_ERR(vbus_reg);
}
}
omap->dev = dev;
omap->irq = irq;
omap->base = base;
omap->vbus_reg = vbus_reg;
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "get_sync failed with err %d\n", ret);
goto err1;
}
dwc3_omap_map_offset(omap);
dwc3_omap_set_utmi_mode(omap);
ret = dwc3_omap_extcon_register(omap);
if (ret < 0)
goto err1;
ret = of_platform_populate(node, NULL, NULL, dev);
if (ret) {
dev_err(&pdev->dev, "failed to create dwc3 core\n");
goto err1;
}
ret = devm_request_threaded_irq(dev, omap->irq, dwc3_omap_interrupt,
dwc3_omap_interrupt_thread, IRQF_SHARED,
"dwc3-omap", omap);
if (ret) {
dev_err(dev, "failed to request IRQ #%d --> %d\n",
omap->irq, ret);
goto err1;
}
dwc3_omap_enable_irqs(omap);
return 0;
err1:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
return ret;
}
static void dwc3_omap_remove(struct platform_device *pdev)
{
struct dwc3_omap *omap = platform_get_drvdata(pdev);
dwc3_omap_disable_irqs(omap);
disable_irq(omap->irq);
of_platform_depopulate(omap->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
}
static const struct of_device_id of_dwc3_match[] = {
{
.compatible = "ti,dwc3"
},
{
.compatible = "ti,am437x-dwc3"
},
{ },
};
MODULE_DEVICE_TABLE(of, of_dwc3_match);
#ifdef CONFIG_PM_SLEEP
static int dwc3_omap_suspend(struct device *dev)
{
struct dwc3_omap *omap = dev_get_drvdata(dev);
omap->utmi_otg_ctrl = dwc3_omap_read_utmi_ctrl(omap);
dwc3_omap_disable_irqs(omap);
return 0;
}
static int dwc3_omap_resume(struct device *dev)
{
struct dwc3_omap *omap = dev_get_drvdata(dev);
dwc3_omap_write_utmi_ctrl(omap, omap->utmi_otg_ctrl);
dwc3_omap_enable_irqs(omap);
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return 0;
}
static void dwc3_omap_complete(struct device *dev)
{
struct dwc3_omap *omap = dev_get_drvdata(dev);
if (extcon_get_state(omap->edev, EXTCON_USB))
dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
else
dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
if (extcon_get_state(omap->edev, EXTCON_USB_HOST))
dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
else
dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
}
static const struct dev_pm_ops dwc3_omap_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dwc3_omap_suspend, dwc3_omap_resume)
.complete = dwc3_omap_complete,
};
#define DEV_PM_OPS (&dwc3_omap_dev_pm_ops)
#else
#define DEV_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
static struct platform_driver dwc3_omap_driver = {
.probe = dwc3_omap_probe,
.remove_new = dwc3_omap_remove,
.driver = {
.name = "omap-dwc3",
.of_match_table = of_dwc3_match,
.pm = DEV_PM_OPS,
},
};
module_platform_driver(dwc3_omap_driver);
MODULE_ALIAS("platform:omap-dwc3");
MODULE_AUTHOR("Felipe Balbi <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DesignWare USB3 OMAP Glue Layer");
| linux-master | drivers/usb/dwc3/dwc3-omap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drd.c - DesignWare USB3 DRD Controller Dual-role support
*
* Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com
*
* Authors: Roger Quadros <[email protected]>
*/
#include <linux/extcon.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include "debug.h"
#include "core.h"
#include "gadget.h"
static void dwc3_otg_disable_events(struct dwc3 *dwc, u32 disable_mask)
{
u32 reg = dwc3_readl(dwc->regs, DWC3_OEVTEN);
reg &= ~(disable_mask);
dwc3_writel(dwc->regs, DWC3_OEVTEN, reg);
}
static void dwc3_otg_enable_events(struct dwc3 *dwc, u32 enable_mask)
{
u32 reg = dwc3_readl(dwc->regs, DWC3_OEVTEN);
reg |= (enable_mask);
dwc3_writel(dwc->regs, DWC3_OEVTEN, reg);
}
static void dwc3_otg_clear_events(struct dwc3 *dwc)
{
u32 reg = dwc3_readl(dwc->regs, DWC3_OEVT);
dwc3_writel(dwc->regs, DWC3_OEVTEN, reg);
}
#define DWC3_OTG_ALL_EVENTS (DWC3_OEVTEN_XHCIRUNSTPSETEN | \
DWC3_OEVTEN_DEVRUNSTPSETEN | DWC3_OEVTEN_HIBENTRYEN | \
DWC3_OEVTEN_CONIDSTSCHNGEN | DWC3_OEVTEN_HRRCONFNOTIFEN | \
DWC3_OEVTEN_HRRINITNOTIFEN | DWC3_OEVTEN_ADEVIDLEEN | \
DWC3_OEVTEN_ADEVBHOSTENDEN | DWC3_OEVTEN_ADEVHOSTEN | \
DWC3_OEVTEN_ADEVHNPCHNGEN | DWC3_OEVTEN_ADEVSRPDETEN | \
DWC3_OEVTEN_ADEVSESSENDDETEN | DWC3_OEVTEN_BDEVBHOSTENDEN | \
DWC3_OEVTEN_BDEVHNPCHNGEN | DWC3_OEVTEN_BDEVSESSVLDDETEN | \
DWC3_OEVTEN_BDEVVBUSCHNGEN)
static irqreturn_t dwc3_otg_thread_irq(int irq, void *_dwc)
{
struct dwc3 *dwc = _dwc;
spin_lock(&dwc->lock);
if (dwc->otg_restart_host) {
dwc3_otg_host_init(dwc);
dwc->otg_restart_host = false;
}
spin_unlock(&dwc->lock);
dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
return IRQ_HANDLED;
}
static irqreturn_t dwc3_otg_irq(int irq, void *_dwc)
{
u32 reg;
struct dwc3 *dwc = _dwc;
irqreturn_t ret = IRQ_NONE;
reg = dwc3_readl(dwc->regs, DWC3_OEVT);
if (reg) {
/* ignore non OTG events, we can't disable them in OEVTEN */
if (!(reg & DWC3_OTG_ALL_EVENTS)) {
dwc3_writel(dwc->regs, DWC3_OEVT, reg);
return IRQ_NONE;
}
if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST &&
!(reg & DWC3_OEVT_DEVICEMODE))
dwc->otg_restart_host = true;
dwc3_writel(dwc->regs, DWC3_OEVT, reg);
ret = IRQ_WAKE_THREAD;
}
return ret;
}
static void dwc3_otgregs_init(struct dwc3 *dwc)
{
u32 reg;
/*
* Prevent host/device reset from resetting OTG core.
* If we don't do this then xhci_reset (USBCMD.HCRST) will reset
* the signal outputs sent to the PHY, the OTG FSM logic of the
* core and also the resets to the VBUS filters inside the core.
*/
reg = dwc3_readl(dwc->regs, DWC3_OCFG);
reg |= DWC3_OCFG_SFTRSTMASK;
dwc3_writel(dwc->regs, DWC3_OCFG, reg);
/* Disable hibernation for simplicity */
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
reg &= ~DWC3_GCTL_GBLHIBERNATIONEN;
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
/*
* Initialize OTG registers as per
* Figure 11-4 OTG Driver Overall Programming Flow
*/
/* OCFG.SRPCap = 0, OCFG.HNPCap = 0 */
reg = dwc3_readl(dwc->regs, DWC3_OCFG);
reg &= ~(DWC3_OCFG_SRPCAP | DWC3_OCFG_HNPCAP);
dwc3_writel(dwc->regs, DWC3_OCFG, reg);
/* OEVT = FFFF */
dwc3_otg_clear_events(dwc);
/* OEVTEN = 0 */
dwc3_otg_disable_events(dwc, DWC3_OTG_ALL_EVENTS);
/* OEVTEN.ConIDStsChngEn = 1. Instead we enable all events */
dwc3_otg_enable_events(dwc, DWC3_OTG_ALL_EVENTS);
/*
* OCTL.PeriMode = 1, OCTL.DevSetHNPEn = 0, OCTL.HstSetHNPEn = 0,
* OCTL.HNPReq = 0
*/
reg = dwc3_readl(dwc->regs, DWC3_OCTL);
reg |= DWC3_OCTL_PERIMODE;
reg &= ~(DWC3_OCTL_DEVSETHNPEN | DWC3_OCTL_HSTSETHNPEN |
DWC3_OCTL_HNPREQ);
dwc3_writel(dwc->regs, DWC3_OCTL, reg);
}
static int dwc3_otg_get_irq(struct dwc3 *dwc)
{
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
int irq;
irq = platform_get_irq_byname_optional(dwc3_pdev, "otg");
if (irq > 0)
goto out;
if (irq == -EPROBE_DEFER)
goto out;
irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
if (irq > 0)
goto out;
if (irq == -EPROBE_DEFER)
goto out;
irq = platform_get_irq(dwc3_pdev, 0);
if (irq > 0)
goto out;
if (!irq)
irq = -EINVAL;
out:
return irq;
}
void dwc3_otg_init(struct dwc3 *dwc)
{
u32 reg;
/*
* As per Figure 11-4 OTG Driver Overall Programming Flow,
* block "Initialize GCTL for OTG operation".
*/
/* GCTL.PrtCapDir=2'b11 */
dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG);
/* GUSB2PHYCFG0.SusPHY=0 */
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
/* Initialize OTG registers */
dwc3_otgregs_init(dwc);
}
void dwc3_otg_exit(struct dwc3 *dwc)
{
/* disable all OTG IRQs */
dwc3_otg_disable_events(dwc, DWC3_OTG_ALL_EVENTS);
/* clear all events */
dwc3_otg_clear_events(dwc);
}
/* should be called before Host controller driver is started */
void dwc3_otg_host_init(struct dwc3 *dwc)
{
u32 reg;
/* As per Figure 11-10 A-Device Flow Diagram */
/* OCFG.HNPCap = 0, OCFG.SRPCap = 0. Already 0 */
/*
* OCTL.PeriMode=0, OCTL.TermSelDLPulse = 0,
* OCTL.DevSetHNPEn = 0, OCTL.HstSetHNPEn = 0
*/
reg = dwc3_readl(dwc->regs, DWC3_OCTL);
reg &= ~(DWC3_OCTL_PERIMODE | DWC3_OCTL_TERMSELIDPULSE |
DWC3_OCTL_DEVSETHNPEN | DWC3_OCTL_HSTSETHNPEN);
dwc3_writel(dwc->regs, DWC3_OCTL, reg);
/*
* OCFG.DisPrtPwrCutoff = 0/1
*/
reg = dwc3_readl(dwc->regs, DWC3_OCFG);
reg &= ~DWC3_OCFG_DISPWRCUTTOFF;
dwc3_writel(dwc->regs, DWC3_OCFG, reg);
/*
* OCFG.SRPCap = 1, OCFG.HNPCap = GHWPARAMS6.HNP_CAP
* We don't want SRP/HNP for simple dual-role so leave
* these disabled.
*/
/*
* OEVTEN.OTGADevHostEvntEn = 1
* OEVTEN.OTGADevSessEndDetEvntEn = 1
* We don't want HNP/role-swap so leave these disabled.
*/
/* GUSB2PHYCFG.ULPIAutoRes = 1/0, GUSB2PHYCFG.SusPHY = 1 */
if (!dwc->dis_u2_susphy_quirk) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
reg |= DWC3_GUSB2PHYCFG_SUSPHY;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
}
/* Set Port Power to enable VBUS: OCTL.PrtPwrCtl = 1 */
reg = dwc3_readl(dwc->regs, DWC3_OCTL);
reg |= DWC3_OCTL_PRTPWRCTL;
dwc3_writel(dwc->regs, DWC3_OCTL, reg);
}
/* should be called after Host controller driver is stopped */
static void dwc3_otg_host_exit(struct dwc3 *dwc)
{
u32 reg;
/*
* Exit from A-device flow as per
* Figure 11-4 OTG Driver Overall Programming Flow
*/
/*
* OEVTEN.OTGADevBHostEndEvntEn=0, OEVTEN.OTGADevHNPChngEvntEn=0
* OEVTEN.OTGADevSessEndDetEvntEn=0,
* OEVTEN.OTGADevHostEvntEn = 0
* But we don't disable any OTG events
*/
/* OCTL.HstSetHNPEn = 0, OCTL.PrtPwrCtl=0 */
reg = dwc3_readl(dwc->regs, DWC3_OCTL);
reg &= ~(DWC3_OCTL_HSTSETHNPEN | DWC3_OCTL_PRTPWRCTL);
dwc3_writel(dwc->regs, DWC3_OCTL, reg);
}
/* should be called before the gadget controller driver is started */
static void dwc3_otg_device_init(struct dwc3 *dwc)
{
u32 reg;
/* As per Figure 11-20 B-Device Flow Diagram */
/*
* OCFG.HNPCap = GHWPARAMS6.HNP_CAP, OCFG.SRPCap = 1
* but we keep them 0 for simple dual-role operation.
*/
reg = dwc3_readl(dwc->regs, DWC3_OCFG);
/* OCFG.OTGSftRstMsk = 0/1 */
reg |= DWC3_OCFG_SFTRSTMASK;
dwc3_writel(dwc->regs, DWC3_OCFG, reg);
/*
* OCTL.PeriMode = 1
* OCTL.TermSelDLPulse = 0/1, OCTL.HNPReq = 0
* OCTL.DevSetHNPEn = 0, OCTL.HstSetHNPEn = 0
*/
reg = dwc3_readl(dwc->regs, DWC3_OCTL);
reg |= DWC3_OCTL_PERIMODE;
reg &= ~(DWC3_OCTL_TERMSELIDPULSE | DWC3_OCTL_HNPREQ |
DWC3_OCTL_DEVSETHNPEN | DWC3_OCTL_HSTSETHNPEN);
dwc3_writel(dwc->regs, DWC3_OCTL, reg);
/* OEVTEN.OTGBDevSesVldDetEvntEn = 1 */
dwc3_otg_enable_events(dwc, DWC3_OEVTEN_BDEVSESSVLDDETEN);
/* GUSB2PHYCFG.ULPIAutoRes = 0, GUSB2PHYCFG0.SusPHY = 1 */
if (!dwc->dis_u2_susphy_quirk) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
reg |= DWC3_GUSB2PHYCFG_SUSPHY;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
}
/* GCTL.GblHibernationEn = 0. Already 0. */
}
/* should be called after the gadget controller driver is stopped */
static void dwc3_otg_device_exit(struct dwc3 *dwc)
{
u32 reg;
/*
* Exit from B-device flow as per
* Figure 11-4 OTG Driver Overall Programming Flow
*/
/*
* OEVTEN.OTGBDevHNPChngEvntEn = 0
* OEVTEN.OTGBDevVBusChngEvntEn = 0
* OEVTEN.OTGBDevBHostEndEvntEn = 0
*/
dwc3_otg_disable_events(dwc, DWC3_OEVTEN_BDEVHNPCHNGEN |
DWC3_OEVTEN_BDEVVBUSCHNGEN |
DWC3_OEVTEN_BDEVBHOSTENDEN);
/* OCTL.DevSetHNPEn = 0, OCTL.HNPReq = 0, OCTL.PeriMode=1 */
reg = dwc3_readl(dwc->regs, DWC3_OCTL);
reg &= ~(DWC3_OCTL_DEVSETHNPEN | DWC3_OCTL_HNPREQ);
reg |= DWC3_OCTL_PERIMODE;
dwc3_writel(dwc->regs, DWC3_OCTL, reg);
}
void dwc3_otg_update(struct dwc3 *dwc, bool ignore_idstatus)
{
int ret;
u32 reg;
int id;
unsigned long flags;
if (dwc->dr_mode != USB_DR_MODE_OTG)
return;
/* don't do anything if debug user changed role to not OTG */
if (dwc->current_dr_role != DWC3_GCTL_PRTCAP_OTG)
return;
if (!ignore_idstatus) {
reg = dwc3_readl(dwc->regs, DWC3_OSTS);
id = !!(reg & DWC3_OSTS_CONIDSTS);
dwc->desired_otg_role = id ? DWC3_OTG_ROLE_DEVICE :
DWC3_OTG_ROLE_HOST;
}
if (dwc->desired_otg_role == dwc->current_otg_role)
return;
switch (dwc->current_otg_role) {
case DWC3_OTG_ROLE_HOST:
dwc3_host_exit(dwc);
spin_lock_irqsave(&dwc->lock, flags);
dwc3_otg_host_exit(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
break;
case DWC3_OTG_ROLE_DEVICE:
dwc3_gadget_exit(dwc);
spin_lock_irqsave(&dwc->lock, flags);
dwc3_event_buffers_cleanup(dwc);
dwc3_otg_device_exit(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
break;
default:
break;
}
spin_lock_irqsave(&dwc->lock, flags);
dwc->current_otg_role = dwc->desired_otg_role;
spin_unlock_irqrestore(&dwc->lock, flags);
switch (dwc->desired_otg_role) {
case DWC3_OTG_ROLE_HOST:
spin_lock_irqsave(&dwc->lock, flags);
dwc3_otgregs_init(dwc);
dwc3_otg_host_init(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
ret = dwc3_host_init(dwc);
if (ret) {
dev_err(dwc->dev, "failed to initialize host\n");
} else {
if (dwc->usb2_phy)
otg_set_vbus(dwc->usb2_phy->otg, true);
if (dwc->usb2_generic_phy)
phy_set_mode(dwc->usb2_generic_phy,
PHY_MODE_USB_HOST);
}
break;
case DWC3_OTG_ROLE_DEVICE:
spin_lock_irqsave(&dwc->lock, flags);
dwc3_otgregs_init(dwc);
dwc3_otg_device_init(dwc);
dwc3_event_buffers_setup(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
if (dwc->usb2_phy)
otg_set_vbus(dwc->usb2_phy->otg, false);
if (dwc->usb2_generic_phy)
phy_set_mode(dwc->usb2_generic_phy,
PHY_MODE_USB_DEVICE);
ret = dwc3_gadget_init(dwc);
if (ret)
dev_err(dwc->dev, "failed to initialize peripheral\n");
break;
default:
break;
}
}
static void dwc3_drd_update(struct dwc3 *dwc)
{
int id;
if (dwc->edev) {
id = extcon_get_state(dwc->edev, EXTCON_USB_HOST);
if (id < 0)
id = 0;
dwc3_set_mode(dwc, id ?
DWC3_GCTL_PRTCAP_HOST :
DWC3_GCTL_PRTCAP_DEVICE);
}
}
static int dwc3_drd_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct dwc3 *dwc = container_of(nb, struct dwc3, edev_nb);
dwc3_set_mode(dwc, event ?
DWC3_GCTL_PRTCAP_HOST :
DWC3_GCTL_PRTCAP_DEVICE);
return NOTIFY_DONE;
}
#if IS_ENABLED(CONFIG_USB_ROLE_SWITCH)
#define ROLE_SWITCH 1
static int dwc3_usb_role_switch_set(struct usb_role_switch *sw,
enum usb_role role)
{
struct dwc3 *dwc = usb_role_switch_get_drvdata(sw);
u32 mode;
switch (role) {
case USB_ROLE_HOST:
mode = DWC3_GCTL_PRTCAP_HOST;
break;
case USB_ROLE_DEVICE:
mode = DWC3_GCTL_PRTCAP_DEVICE;
break;
default:
if (dwc->role_switch_default_mode == USB_DR_MODE_HOST)
mode = DWC3_GCTL_PRTCAP_HOST;
else
mode = DWC3_GCTL_PRTCAP_DEVICE;
break;
}
dwc3_set_mode(dwc, mode);
return 0;
}
static enum usb_role dwc3_usb_role_switch_get(struct usb_role_switch *sw)
{
struct dwc3 *dwc = usb_role_switch_get_drvdata(sw);
unsigned long flags;
enum usb_role role;
spin_lock_irqsave(&dwc->lock, flags);
switch (dwc->current_dr_role) {
case DWC3_GCTL_PRTCAP_HOST:
role = USB_ROLE_HOST;
break;
case DWC3_GCTL_PRTCAP_DEVICE:
role = USB_ROLE_DEVICE;
break;
case DWC3_GCTL_PRTCAP_OTG:
role = dwc->current_otg_role;
break;
default:
if (dwc->role_switch_default_mode == USB_DR_MODE_HOST)
role = USB_ROLE_HOST;
else
role = USB_ROLE_DEVICE;
break;
}
spin_unlock_irqrestore(&dwc->lock, flags);
return role;
}
static int dwc3_setup_role_switch(struct dwc3 *dwc)
{
struct usb_role_switch_desc dwc3_role_switch = {NULL};
u32 mode;
dwc->role_switch_default_mode = usb_get_role_switch_default_mode(dwc->dev);
if (dwc->role_switch_default_mode == USB_DR_MODE_HOST) {
mode = DWC3_GCTL_PRTCAP_HOST;
} else {
dwc->role_switch_default_mode = USB_DR_MODE_PERIPHERAL;
mode = DWC3_GCTL_PRTCAP_DEVICE;
}
dwc3_role_switch.fwnode = dev_fwnode(dwc->dev);
dwc3_role_switch.set = dwc3_usb_role_switch_set;
dwc3_role_switch.get = dwc3_usb_role_switch_get;
dwc3_role_switch.driver_data = dwc;
dwc->role_sw = usb_role_switch_register(dwc->dev, &dwc3_role_switch);
if (IS_ERR(dwc->role_sw))
return PTR_ERR(dwc->role_sw);
if (dwc->dev->of_node) {
/* populate connector entry */
int ret = devm_of_platform_populate(dwc->dev);
if (ret) {
usb_role_switch_unregister(dwc->role_sw);
dwc->role_sw = NULL;
dev_err(dwc->dev, "DWC3 platform devices creation failed: %i\n", ret);
return ret;
}
}
dwc3_set_mode(dwc, mode);
return 0;
}
#else
#define ROLE_SWITCH 0
#define dwc3_setup_role_switch(x) 0
#endif
int dwc3_drd_init(struct dwc3 *dwc)
{
int ret, irq;
if (ROLE_SWITCH &&
device_property_read_bool(dwc->dev, "usb-role-switch"))
return dwc3_setup_role_switch(dwc);
if (dwc->edev) {
dwc->edev_nb.notifier_call = dwc3_drd_notifier;
ret = extcon_register_notifier(dwc->edev, EXTCON_USB_HOST,
&dwc->edev_nb);
if (ret < 0) {
dev_err(dwc->dev, "couldn't register cable notifier\n");
return ret;
}
dwc3_drd_update(dwc);
} else {
dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG);
/* use OTG block to get ID event */
irq = dwc3_otg_get_irq(dwc);
if (irq < 0)
return irq;
dwc->otg_irq = irq;
/* disable all OTG IRQs */
dwc3_otg_disable_events(dwc, DWC3_OTG_ALL_EVENTS);
/* clear all events */
dwc3_otg_clear_events(dwc);
ret = request_threaded_irq(dwc->otg_irq, dwc3_otg_irq,
dwc3_otg_thread_irq,
IRQF_SHARED, "dwc3-otg", dwc);
if (ret) {
dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
dwc->otg_irq, ret);
ret = -ENODEV;
return ret;
}
dwc3_otg_init(dwc);
dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
}
return 0;
}
void dwc3_drd_exit(struct dwc3 *dwc)
{
unsigned long flags;
if (dwc->role_sw)
usb_role_switch_unregister(dwc->role_sw);
if (dwc->edev)
extcon_unregister_notifier(dwc->edev, EXTCON_USB_HOST,
&dwc->edev_nb);
cancel_work_sync(&dwc->drd_work);
/* debug user might have changed role, clean based on current role */
switch (dwc->current_dr_role) {
case DWC3_GCTL_PRTCAP_HOST:
dwc3_host_exit(dwc);
break;
case DWC3_GCTL_PRTCAP_DEVICE:
dwc3_gadget_exit(dwc);
dwc3_event_buffers_cleanup(dwc);
break;
case DWC3_GCTL_PRTCAP_OTG:
dwc3_otg_exit(dwc);
spin_lock_irqsave(&dwc->lock, flags);
dwc->desired_otg_role = DWC3_OTG_ROLE_IDLE;
spin_unlock_irqrestore(&dwc->lock, flags);
dwc3_otg_update(dwc, 1);
break;
default:
break;
}
if (dwc->otg_irq)
free_irq(dwc->otg_irq, dwc);
}
| linux-master | drivers/usb/dwc3/drd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB Glue for Amlogic G12A SoCs
*
* Copyright (c) 2019 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
*/
/*
* The USB is organized with a glue around the DWC3 Controller IP as :
* - Control registers for each USB2 Ports
* - Control registers for the USB PHY layer
* - SuperSpeed PHY can be enabled only if port is used
* - Dynamic OTG switching with ID change interrupt
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/reset.h>
#include <linux/phy/phy.h>
#include <linux/usb/otg.h>
#include <linux/usb/role.h>
#include <linux/regulator/consumer.h>
/* USB2 Ports Control Registers, offsets are per-port */
#define U2P_REG_SIZE 0x20
#define U2P_R0 0x0
#define U2P_R0_HOST_DEVICE BIT(0)
#define U2P_R0_POWER_OK BIT(1)
#define U2P_R0_HAST_MODE BIT(2)
#define U2P_R0_POWER_ON_RESET BIT(3)
#define U2P_R0_ID_PULLUP BIT(4)
#define U2P_R0_DRV_VBUS BIT(5)
#define U2P_R1 0x4
#define U2P_R1_PHY_READY BIT(0)
#define U2P_R1_ID_DIG BIT(1)
#define U2P_R1_OTG_SESSION_VALID BIT(2)
#define U2P_R1_VBUS_VALID BIT(3)
/* USB Glue Control Registers */
#define G12A_GLUE_OFFSET 0x80
#define USB_R0 0x00
#define USB_R0_P30_LANE0_TX2RX_LOOPBACK BIT(17)
#define USB_R0_P30_LANE0_EXT_PCLK_REQ BIT(18)
#define USB_R0_P30_PCS_RX_LOS_MASK_VAL_MASK GENMASK(28, 19)
#define USB_R0_U2D_SS_SCALEDOWN_MODE_MASK GENMASK(30, 29)
#define USB_R0_U2D_ACT BIT(31)
#define USB_R1 0x04
#define USB_R1_U3H_BIGENDIAN_GS BIT(0)
#define USB_R1_U3H_PME_ENABLE BIT(1)
#define USB_R1_U3H_HUB_PORT_OVERCURRENT_MASK GENMASK(4, 2)
#define USB_R1_U3H_HUB_PORT_PERM_ATTACH_MASK GENMASK(9, 7)
#define USB_R1_U3H_HOST_U2_PORT_DISABLE_MASK GENMASK(13, 12)
#define USB_R1_U3H_HOST_U3_PORT_DISABLE BIT(16)
#define USB_R1_U3H_HOST_PORT_POWER_CONTROL_PRESENT BIT(17)
#define USB_R1_U3H_HOST_MSI_ENABLE BIT(18)
#define USB_R1_U3H_FLADJ_30MHZ_REG_MASK GENMASK(24, 19)
#define USB_R1_P30_PCS_TX_SWING_FULL_MASK GENMASK(31, 25)
#define USB_R2 0x08
#define USB_R2_P30_PCS_TX_DEEMPH_3P5DB_MASK GENMASK(25, 20)
#define USB_R2_P30_PCS_TX_DEEMPH_6DB_MASK GENMASK(31, 26)
#define USB_R3 0x0c
#define USB_R3_P30_SSC_ENABLE BIT(0)
#define USB_R3_P30_SSC_RANGE_MASK GENMASK(3, 1)
#define USB_R3_P30_SSC_REF_CLK_SEL_MASK GENMASK(12, 4)
#define USB_R3_P30_REF_SSP_EN BIT(13)
#define USB_R4 0x10
#define USB_R4_P21_PORT_RESET_0 BIT(0)
#define USB_R4_P21_SLEEP_M0 BIT(1)
#define USB_R4_MEM_PD_MASK GENMASK(3, 2)
#define USB_R4_P21_ONLY BIT(4)
#define USB_R5 0x14
#define USB_R5_ID_DIG_SYNC BIT(0)
#define USB_R5_ID_DIG_REG BIT(1)
#define USB_R5_ID_DIG_CFG_MASK GENMASK(3, 2)
#define USB_R5_ID_DIG_EN_0 BIT(4)
#define USB_R5_ID_DIG_EN_1 BIT(5)
#define USB_R5_ID_DIG_CURR BIT(6)
#define USB_R5_ID_DIG_IRQ BIT(7)
#define USB_R5_ID_DIG_TH_MASK GENMASK(15, 8)
#define USB_R5_ID_DIG_CNT_MASK GENMASK(23, 16)
#define PHY_COUNT 3
#define USB2_OTG_PHY 1
static struct clk_bulk_data meson_gxl_clocks[] = {
{ .id = "usb_ctrl" },
{ .id = "ddr" },
};
static struct clk_bulk_data meson_g12a_clocks[] = {
{ .id = NULL },
};
static struct clk_bulk_data meson_a1_clocks[] = {
{ .id = "usb_ctrl" },
{ .id = "usb_bus" },
{ .id = "xtal_usb_ctrl" },
};
static const char * const meson_gxm_phy_names[] = {
"usb2-phy0", "usb2-phy1", "usb2-phy2",
};
static const char * const meson_g12a_phy_names[] = {
"usb2-phy0", "usb2-phy1", "usb3-phy0",
};
/*
* Amlogic A1 has a single physical PHY, in slot 1, but still has the
* two U2 PHY controls register blocks like G12A.
* AXG has the similar scheme, thus needs the same tweak.
* Handling the first PHY on slot 1 would need a large amount of code
* changes, and the current management is generic enough to handle it
* correctly when only the "usb2-phy1" phy is specified on-par with the
* DT bindings.
*/
static const char * const meson_a1_phy_names[] = {
"usb2-phy0", "usb2-phy1"
};
struct dwc3_meson_g12a;
struct dwc3_meson_g12a_drvdata {
bool otg_phy_host_port_disable;
struct clk_bulk_data *clks;
int num_clks;
const char * const *phy_names;
int num_phys;
int (*setup_regmaps)(struct dwc3_meson_g12a *priv, void __iomem *base);
int (*usb2_init_phy)(struct dwc3_meson_g12a *priv, int i,
enum phy_mode mode);
int (*set_phy_mode)(struct dwc3_meson_g12a *priv, int i,
enum phy_mode mode);
int (*usb_init)(struct dwc3_meson_g12a *priv);
int (*usb_post_init)(struct dwc3_meson_g12a *priv);
};
static int dwc3_meson_gxl_setup_regmaps(struct dwc3_meson_g12a *priv,
void __iomem *base);
static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv,
void __iomem *base);
static int dwc3_meson_g12a_usb2_init_phy(struct dwc3_meson_g12a *priv, int i,
enum phy_mode mode);
static int dwc3_meson_gxl_usb2_init_phy(struct dwc3_meson_g12a *priv, int i,
enum phy_mode mode);
static int dwc3_meson_g12a_set_phy_mode(struct dwc3_meson_g12a *priv,
int i, enum phy_mode mode);
static int dwc3_meson_gxl_set_phy_mode(struct dwc3_meson_g12a *priv,
int i, enum phy_mode mode);
static int dwc3_meson_g12a_usb_init(struct dwc3_meson_g12a *priv);
static int dwc3_meson_gxl_usb_init(struct dwc3_meson_g12a *priv);
static int dwc3_meson_gxl_usb_post_init(struct dwc3_meson_g12a *priv);
/*
* For GXL and GXM SoCs:
* USB Phy muxing between the DWC2 Device controller and the DWC3 Host
* controller is buggy when switching from Device to Host when USB port
* is unpopulated, it causes the DWC3 to hard crash.
* When populated (including OTG switching with ID pin), the switch works
* like a charm like on the G12A platforms.
* In order to still switch from Host to Device on an USB Type-A port,
* an U2_PORT_DISABLE bit has been added to disconnect the DWC3 Host
* controller from the port, but when used the DWC3 controller must be
* reset to recover usage of the port.
*/
static const struct dwc3_meson_g12a_drvdata gxl_drvdata = {
.otg_phy_host_port_disable = true,
.clks = meson_gxl_clocks,
.num_clks = ARRAY_SIZE(meson_g12a_clocks),
.phy_names = meson_a1_phy_names,
.num_phys = ARRAY_SIZE(meson_a1_phy_names),
.setup_regmaps = dwc3_meson_gxl_setup_regmaps,
.usb2_init_phy = dwc3_meson_gxl_usb2_init_phy,
.set_phy_mode = dwc3_meson_gxl_set_phy_mode,
.usb_init = dwc3_meson_gxl_usb_init,
.usb_post_init = dwc3_meson_gxl_usb_post_init,
};
static const struct dwc3_meson_g12a_drvdata gxm_drvdata = {
.otg_phy_host_port_disable = true,
.clks = meson_gxl_clocks,
.num_clks = ARRAY_SIZE(meson_g12a_clocks),
.phy_names = meson_gxm_phy_names,
.num_phys = ARRAY_SIZE(meson_gxm_phy_names),
.setup_regmaps = dwc3_meson_gxl_setup_regmaps,
.usb2_init_phy = dwc3_meson_gxl_usb2_init_phy,
.set_phy_mode = dwc3_meson_gxl_set_phy_mode,
.usb_init = dwc3_meson_gxl_usb_init,
.usb_post_init = dwc3_meson_gxl_usb_post_init,
};
static const struct dwc3_meson_g12a_drvdata axg_drvdata = {
.clks = meson_gxl_clocks,
.num_clks = ARRAY_SIZE(meson_gxl_clocks),
.phy_names = meson_a1_phy_names,
.num_phys = ARRAY_SIZE(meson_a1_phy_names),
.setup_regmaps = dwc3_meson_gxl_setup_regmaps,
.usb2_init_phy = dwc3_meson_gxl_usb2_init_phy,
.set_phy_mode = dwc3_meson_gxl_set_phy_mode,
.usb_init = dwc3_meson_g12a_usb_init,
.usb_post_init = dwc3_meson_gxl_usb_post_init,
};
static const struct dwc3_meson_g12a_drvdata g12a_drvdata = {
.clks = meson_g12a_clocks,
.num_clks = ARRAY_SIZE(meson_g12a_clocks),
.phy_names = meson_g12a_phy_names,
.num_phys = ARRAY_SIZE(meson_g12a_phy_names),
.setup_regmaps = dwc3_meson_g12a_setup_regmaps,
.usb2_init_phy = dwc3_meson_g12a_usb2_init_phy,
.set_phy_mode = dwc3_meson_g12a_set_phy_mode,
.usb_init = dwc3_meson_g12a_usb_init,
};
static const struct dwc3_meson_g12a_drvdata a1_drvdata = {
.clks = meson_a1_clocks,
.num_clks = ARRAY_SIZE(meson_a1_clocks),
.phy_names = meson_a1_phy_names,
.num_phys = ARRAY_SIZE(meson_a1_phy_names),
.setup_regmaps = dwc3_meson_g12a_setup_regmaps,
.usb2_init_phy = dwc3_meson_g12a_usb2_init_phy,
.set_phy_mode = dwc3_meson_g12a_set_phy_mode,
.usb_init = dwc3_meson_g12a_usb_init,
};
struct dwc3_meson_g12a {
struct device *dev;
struct regmap *u2p_regmap[PHY_COUNT];
struct regmap *usb_glue_regmap;
struct reset_control *reset;
struct phy *phys[PHY_COUNT];
enum usb_dr_mode otg_mode;
enum phy_mode otg_phy_mode;
unsigned int usb2_ports;
unsigned int usb3_ports;
struct regulator *vbus;
struct usb_role_switch_desc switch_desc;
struct usb_role_switch *role_switch;
const struct dwc3_meson_g12a_drvdata *drvdata;
};
static int dwc3_meson_gxl_set_phy_mode(struct dwc3_meson_g12a *priv,
int i, enum phy_mode mode)
{
return phy_set_mode(priv->phys[i], mode);
}
static int dwc3_meson_gxl_usb2_init_phy(struct dwc3_meson_g12a *priv, int i,
enum phy_mode mode)
{
/* On GXL PHY must be started in device mode for DWC2 init */
return priv->drvdata->set_phy_mode(priv, i,
(i == USB2_OTG_PHY) ? PHY_MODE_USB_DEVICE
: PHY_MODE_USB_HOST);
}
static int dwc3_meson_g12a_set_phy_mode(struct dwc3_meson_g12a *priv,
int i, enum phy_mode mode)
{
if (mode == PHY_MODE_USB_HOST)
regmap_update_bits(priv->u2p_regmap[i], U2P_R0,
U2P_R0_HOST_DEVICE,
U2P_R0_HOST_DEVICE);
else
regmap_update_bits(priv->u2p_regmap[i], U2P_R0,
U2P_R0_HOST_DEVICE, 0);
return 0;
}
static int dwc3_meson_g12a_usb2_init_phy(struct dwc3_meson_g12a *priv, int i,
enum phy_mode mode)
{
int ret;
regmap_update_bits(priv->u2p_regmap[i], U2P_R0,
U2P_R0_POWER_ON_RESET,
U2P_R0_POWER_ON_RESET);
if (i == USB2_OTG_PHY) {
regmap_update_bits(priv->u2p_regmap[i], U2P_R0,
U2P_R0_ID_PULLUP | U2P_R0_DRV_VBUS,
U2P_R0_ID_PULLUP | U2P_R0_DRV_VBUS);
ret = priv->drvdata->set_phy_mode(priv, i, mode);
} else
ret = priv->drvdata->set_phy_mode(priv, i,
PHY_MODE_USB_HOST);
if (ret)
return ret;
regmap_update_bits(priv->u2p_regmap[i], U2P_R0,
U2P_R0_POWER_ON_RESET, 0);
return 0;
}
static int dwc3_meson_g12a_usb2_init(struct dwc3_meson_g12a *priv,
enum phy_mode mode)
{
int i, ret;
for (i = 0; i < priv->drvdata->num_phys; ++i) {
if (!priv->phys[i])
continue;
if (!strstr(priv->drvdata->phy_names[i], "usb2"))
continue;
ret = priv->drvdata->usb2_init_phy(priv, i, mode);
if (ret)
return ret;
}
return 0;
}
static void dwc3_meson_g12a_usb3_init(struct dwc3_meson_g12a *priv)
{
regmap_update_bits(priv->usb_glue_regmap, USB_R3,
USB_R3_P30_SSC_RANGE_MASK |
USB_R3_P30_REF_SSP_EN,
USB_R3_P30_SSC_ENABLE |
FIELD_PREP(USB_R3_P30_SSC_RANGE_MASK, 2) |
USB_R3_P30_REF_SSP_EN);
udelay(2);
regmap_update_bits(priv->usb_glue_regmap, USB_R2,
USB_R2_P30_PCS_TX_DEEMPH_3P5DB_MASK,
FIELD_PREP(USB_R2_P30_PCS_TX_DEEMPH_3P5DB_MASK, 0x15));
regmap_update_bits(priv->usb_glue_regmap, USB_R2,
USB_R2_P30_PCS_TX_DEEMPH_6DB_MASK,
FIELD_PREP(USB_R2_P30_PCS_TX_DEEMPH_6DB_MASK, 0x20));
udelay(2);
regmap_update_bits(priv->usb_glue_regmap, USB_R1,
USB_R1_U3H_HOST_PORT_POWER_CONTROL_PRESENT,
USB_R1_U3H_HOST_PORT_POWER_CONTROL_PRESENT);
regmap_update_bits(priv->usb_glue_regmap, USB_R1,
USB_R1_P30_PCS_TX_SWING_FULL_MASK,
FIELD_PREP(USB_R1_P30_PCS_TX_SWING_FULL_MASK, 127));
}
static void dwc3_meson_g12a_usb_otg_apply_mode(struct dwc3_meson_g12a *priv,
enum phy_mode mode)
{
if (mode == PHY_MODE_USB_DEVICE) {
if (priv->otg_mode != USB_DR_MODE_OTG &&
priv->drvdata->otg_phy_host_port_disable)
/* Isolate the OTG PHY port from the Host Controller */
regmap_update_bits(priv->usb_glue_regmap, USB_R1,
USB_R1_U3H_HOST_U2_PORT_DISABLE_MASK,
FIELD_PREP(USB_R1_U3H_HOST_U2_PORT_DISABLE_MASK,
BIT(USB2_OTG_PHY)));
regmap_update_bits(priv->usb_glue_regmap, USB_R0,
USB_R0_U2D_ACT, USB_R0_U2D_ACT);
regmap_update_bits(priv->usb_glue_regmap, USB_R0,
USB_R0_U2D_SS_SCALEDOWN_MODE_MASK, 0);
regmap_update_bits(priv->usb_glue_regmap, USB_R4,
USB_R4_P21_SLEEP_M0, USB_R4_P21_SLEEP_M0);
} else {
if (priv->otg_mode != USB_DR_MODE_OTG &&
priv->drvdata->otg_phy_host_port_disable) {
regmap_update_bits(priv->usb_glue_regmap, USB_R1,
USB_R1_U3H_HOST_U2_PORT_DISABLE_MASK, 0);
msleep(500);
}
regmap_update_bits(priv->usb_glue_regmap, USB_R0,
USB_R0_U2D_ACT, 0);
regmap_update_bits(priv->usb_glue_regmap, USB_R4,
USB_R4_P21_SLEEP_M0, 0);
}
}
static int dwc3_meson_g12a_usb_init_glue(struct dwc3_meson_g12a *priv,
enum phy_mode mode)
{
int ret;
ret = dwc3_meson_g12a_usb2_init(priv, mode);
if (ret)
return ret;
regmap_update_bits(priv->usb_glue_regmap, USB_R1,
USB_R1_U3H_FLADJ_30MHZ_REG_MASK,
FIELD_PREP(USB_R1_U3H_FLADJ_30MHZ_REG_MASK, 0x20));
regmap_update_bits(priv->usb_glue_regmap, USB_R5,
USB_R5_ID_DIG_EN_0,
USB_R5_ID_DIG_EN_0);
regmap_update_bits(priv->usb_glue_regmap, USB_R5,
USB_R5_ID_DIG_EN_1,
USB_R5_ID_DIG_EN_1);
regmap_update_bits(priv->usb_glue_regmap, USB_R5,
USB_R5_ID_DIG_TH_MASK,
FIELD_PREP(USB_R5_ID_DIG_TH_MASK, 0xff));
/* If we have an actual SuperSpeed port, initialize it */
if (priv->usb3_ports)
dwc3_meson_g12a_usb3_init(priv);
dwc3_meson_g12a_usb_otg_apply_mode(priv, mode);
return 0;
}
static const struct regmap_config phy_meson_g12a_usb_glue_regmap_conf = {
.name = "usb-glue",
.reg_bits = 8,
.val_bits = 32,
.reg_stride = 4,
.max_register = USB_R5,
};
static int dwc3_meson_g12a_get_phys(struct dwc3_meson_g12a *priv)
{
const char *phy_name;
int i;
for (i = 0 ; i < priv->drvdata->num_phys ; ++i) {
phy_name = priv->drvdata->phy_names[i];
priv->phys[i] = devm_phy_optional_get(priv->dev, phy_name);
if (!priv->phys[i])
continue;
if (IS_ERR(priv->phys[i]))
return PTR_ERR(priv->phys[i]);
if (strstr(phy_name, "usb3"))
priv->usb3_ports++;
else
priv->usb2_ports++;
}
dev_info(priv->dev, "USB2 ports: %d\n", priv->usb2_ports);
dev_info(priv->dev, "USB3 ports: %d\n", priv->usb3_ports);
return 0;
}
static enum phy_mode dwc3_meson_g12a_get_id(struct dwc3_meson_g12a *priv)
{
u32 reg;
regmap_read(priv->usb_glue_regmap, USB_R5, ®);
if (reg & (USB_R5_ID_DIG_SYNC | USB_R5_ID_DIG_REG))
return PHY_MODE_USB_DEVICE;
return PHY_MODE_USB_HOST;
}
static int dwc3_meson_g12a_otg_mode_set(struct dwc3_meson_g12a *priv,
enum phy_mode mode)
{
int ret;
if (!priv->phys[USB2_OTG_PHY])
return -EINVAL;
if (mode == PHY_MODE_USB_HOST)
dev_info(priv->dev, "switching to Host Mode\n");
else
dev_info(priv->dev, "switching to Device Mode\n");
if (priv->vbus) {
if (mode == PHY_MODE_USB_DEVICE)
ret = regulator_disable(priv->vbus);
else
ret = regulator_enable(priv->vbus);
if (ret)
return ret;
}
priv->otg_phy_mode = mode;
ret = priv->drvdata->set_phy_mode(priv, USB2_OTG_PHY, mode);
if (ret)
return ret;
dwc3_meson_g12a_usb_otg_apply_mode(priv, mode);
return 0;
}
static int dwc3_meson_g12a_role_set(struct usb_role_switch *sw,
enum usb_role role)
{
struct dwc3_meson_g12a *priv = usb_role_switch_get_drvdata(sw);
enum phy_mode mode;
if (role == USB_ROLE_NONE)
return 0;
mode = (role == USB_ROLE_HOST) ? PHY_MODE_USB_HOST
: PHY_MODE_USB_DEVICE;
if (mode == priv->otg_phy_mode)
return 0;
if (priv->drvdata->otg_phy_host_port_disable)
dev_warn_once(priv->dev, "Broken manual OTG switch\n");
return dwc3_meson_g12a_otg_mode_set(priv, mode);
}
static enum usb_role dwc3_meson_g12a_role_get(struct usb_role_switch *sw)
{
struct dwc3_meson_g12a *priv = usb_role_switch_get_drvdata(sw);
return priv->otg_phy_mode == PHY_MODE_USB_HOST ?
USB_ROLE_HOST : USB_ROLE_DEVICE;
}
static irqreturn_t dwc3_meson_g12a_irq_thread(int irq, void *data)
{
struct dwc3_meson_g12a *priv = data;
enum phy_mode otg_id;
otg_id = dwc3_meson_g12a_get_id(priv);
if (otg_id != priv->otg_phy_mode) {
if (dwc3_meson_g12a_otg_mode_set(priv, otg_id))
dev_warn(priv->dev, "Failed to switch OTG mode\n");
}
regmap_update_bits(priv->usb_glue_regmap, USB_R5,
USB_R5_ID_DIG_IRQ, 0);
return IRQ_HANDLED;
}
static struct device *dwc3_meson_g12_find_child(struct device *dev,
const char *compatible)
{
struct platform_device *pdev;
struct device_node *np;
np = of_get_compatible_child(dev->of_node, compatible);
if (!np)
return NULL;
pdev = of_find_device_by_node(np);
of_node_put(np);
if (!pdev)
return NULL;
return &pdev->dev;
}
static int dwc3_meson_g12a_otg_init(struct platform_device *pdev,
struct dwc3_meson_g12a *priv)
{
enum phy_mode otg_id;
int ret, irq;
struct device *dev = &pdev->dev;
if (priv->otg_mode == USB_DR_MODE_OTG) {
/* Ack irq before registering */
regmap_update_bits(priv->usb_glue_regmap, USB_R5,
USB_R5_ID_DIG_IRQ, 0);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
dwc3_meson_g12a_irq_thread,
IRQF_ONESHOT, pdev->name, priv);
if (ret)
return ret;
}
/* Setup OTG mode corresponding to the ID pin */
if (priv->otg_mode == USB_DR_MODE_OTG) {
otg_id = dwc3_meson_g12a_get_id(priv);
if (otg_id != priv->otg_phy_mode) {
if (dwc3_meson_g12a_otg_mode_set(priv, otg_id))
dev_warn(dev, "Failed to switch OTG mode\n");
}
}
/* Setup role switcher */
priv->switch_desc.usb2_port = dwc3_meson_g12_find_child(dev,
"snps,dwc3");
priv->switch_desc.udc = dwc3_meson_g12_find_child(dev, "snps,dwc2");
priv->switch_desc.allow_userspace_control = true;
priv->switch_desc.set = dwc3_meson_g12a_role_set;
priv->switch_desc.get = dwc3_meson_g12a_role_get;
priv->switch_desc.driver_data = priv;
priv->role_switch = usb_role_switch_register(dev, &priv->switch_desc);
if (IS_ERR(priv->role_switch))
dev_warn(dev, "Unable to register Role Switch\n");
return 0;
}
static int dwc3_meson_gxl_setup_regmaps(struct dwc3_meson_g12a *priv,
void __iomem *base)
{
/* GXL controls the PHY mode in the PHY registers unlike G12A */
priv->usb_glue_regmap = devm_regmap_init_mmio(priv->dev, base,
&phy_meson_g12a_usb_glue_regmap_conf);
return PTR_ERR_OR_ZERO(priv->usb_glue_regmap);
}
static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv,
void __iomem *base)
{
int i;
priv->usb_glue_regmap = devm_regmap_init_mmio(priv->dev,
base + G12A_GLUE_OFFSET,
&phy_meson_g12a_usb_glue_regmap_conf);
if (IS_ERR(priv->usb_glue_regmap))
return PTR_ERR(priv->usb_glue_regmap);
/* Create a regmap for each USB2 PHY control register set */
for (i = 0; i < priv->drvdata->num_phys; i++) {
struct regmap_config u2p_regmap_config = {
.reg_bits = 8,
.val_bits = 32,
.reg_stride = 4,
.max_register = U2P_R1,
};
if (!strstr(priv->drvdata->phy_names[i], "usb2"))
continue;
u2p_regmap_config.name = devm_kasprintf(priv->dev, GFP_KERNEL,
"u2p-%d", i);
if (!u2p_regmap_config.name)
return -ENOMEM;
priv->u2p_regmap[i] = devm_regmap_init_mmio(priv->dev,
base + (i * U2P_REG_SIZE),
&u2p_regmap_config);
if (IS_ERR(priv->u2p_regmap[i]))
return PTR_ERR(priv->u2p_regmap[i]);
}
return 0;
}
static int dwc3_meson_g12a_usb_init(struct dwc3_meson_g12a *priv)
{
return dwc3_meson_g12a_usb_init_glue(priv, priv->otg_phy_mode);
}
static int dwc3_meson_gxl_usb_init(struct dwc3_meson_g12a *priv)
{
return dwc3_meson_g12a_usb_init_glue(priv, PHY_MODE_USB_DEVICE);
}
static int dwc3_meson_gxl_usb_post_init(struct dwc3_meson_g12a *priv)
{
int ret;
ret = priv->drvdata->set_phy_mode(priv, USB2_OTG_PHY,
priv->otg_phy_mode);
if (ret)
return ret;
dwc3_meson_g12a_usb_otg_apply_mode(priv, priv->otg_phy_mode);
return 0;
}
static int dwc3_meson_g12a_probe(struct platform_device *pdev)
{
struct dwc3_meson_g12a *priv;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
void __iomem *base;
int ret, i;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
priv->drvdata = of_device_get_match_data(&pdev->dev);
priv->dev = dev;
priv->vbus = devm_regulator_get_optional(dev, "vbus");
if (IS_ERR(priv->vbus)) {
if (PTR_ERR(priv->vbus) == -EPROBE_DEFER)
return PTR_ERR(priv->vbus);
priv->vbus = NULL;
}
ret = devm_clk_bulk_get(dev,
priv->drvdata->num_clks,
priv->drvdata->clks);
if (ret)
return ret;
ret = clk_bulk_prepare_enable(priv->drvdata->num_clks,
priv->drvdata->clks);
if (ret)
return ret;
platform_set_drvdata(pdev, priv);
priv->reset = devm_reset_control_get_shared(dev, NULL);
if (IS_ERR(priv->reset)) {
ret = PTR_ERR(priv->reset);
dev_err(dev, "failed to get device reset, err=%d\n", ret);
goto err_disable_clks;
}
ret = reset_control_reset(priv->reset);
if (ret)
goto err_disable_clks;
ret = dwc3_meson_g12a_get_phys(priv);
if (ret)
goto err_rearm;
ret = priv->drvdata->setup_regmaps(priv, base);
if (ret)
goto err_rearm;
if (priv->vbus) {
ret = regulator_enable(priv->vbus);
if (ret)
goto err_rearm;
}
/* Get dr_mode */
priv->otg_mode = usb_get_dr_mode(dev);
if (priv->otg_mode == USB_DR_MODE_PERIPHERAL)
priv->otg_phy_mode = PHY_MODE_USB_DEVICE;
else
priv->otg_phy_mode = PHY_MODE_USB_HOST;
ret = priv->drvdata->usb_init(priv);
if (ret)
goto err_disable_regulator;
/* Init PHYs */
for (i = 0 ; i < PHY_COUNT ; ++i) {
ret = phy_init(priv->phys[i]);
if (ret)
goto err_disable_regulator;
}
/* Set PHY Power */
for (i = 0 ; i < PHY_COUNT ; ++i) {
ret = phy_power_on(priv->phys[i]);
if (ret)
goto err_phys_exit;
}
if (priv->drvdata->usb_post_init) {
ret = priv->drvdata->usb_post_init(priv);
if (ret)
goto err_phys_power;
}
ret = of_platform_populate(np, NULL, NULL, dev);
if (ret)
goto err_phys_power;
ret = dwc3_meson_g12a_otg_init(pdev, priv);
if (ret)
goto err_plat_depopulate;
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
return 0;
err_plat_depopulate:
of_platform_depopulate(dev);
err_phys_power:
for (i = 0 ; i < PHY_COUNT ; ++i)
phy_power_off(priv->phys[i]);
err_phys_exit:
for (i = 0 ; i < PHY_COUNT ; ++i)
phy_exit(priv->phys[i]);
err_disable_regulator:
if (priv->vbus)
regulator_disable(priv->vbus);
err_rearm:
reset_control_rearm(priv->reset);
err_disable_clks:
clk_bulk_disable_unprepare(priv->drvdata->num_clks,
priv->drvdata->clks);
return ret;
}
static void dwc3_meson_g12a_remove(struct platform_device *pdev)
{
struct dwc3_meson_g12a *priv = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
int i;
usb_role_switch_unregister(priv->role_switch);
of_platform_depopulate(dev);
for (i = 0 ; i < PHY_COUNT ; ++i) {
phy_power_off(priv->phys[i]);
phy_exit(priv->phys[i]);
}
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
pm_runtime_set_suspended(dev);
reset_control_rearm(priv->reset);
clk_bulk_disable_unprepare(priv->drvdata->num_clks,
priv->drvdata->clks);
}
static int __maybe_unused dwc3_meson_g12a_runtime_suspend(struct device *dev)
{
struct dwc3_meson_g12a *priv = dev_get_drvdata(dev);
clk_bulk_disable_unprepare(priv->drvdata->num_clks,
priv->drvdata->clks);
return 0;
}
static int __maybe_unused dwc3_meson_g12a_runtime_resume(struct device *dev)
{
struct dwc3_meson_g12a *priv = dev_get_drvdata(dev);
return clk_bulk_prepare_enable(priv->drvdata->num_clks,
priv->drvdata->clks);
}
static int __maybe_unused dwc3_meson_g12a_suspend(struct device *dev)
{
struct dwc3_meson_g12a *priv = dev_get_drvdata(dev);
int i, ret;
if (priv->vbus && priv->otg_phy_mode == PHY_MODE_USB_HOST) {
ret = regulator_disable(priv->vbus);
if (ret)
return ret;
}
for (i = 0 ; i < PHY_COUNT ; ++i) {
phy_power_off(priv->phys[i]);
phy_exit(priv->phys[i]);
}
reset_control_rearm(priv->reset);
return 0;
}
static int __maybe_unused dwc3_meson_g12a_resume(struct device *dev)
{
struct dwc3_meson_g12a *priv = dev_get_drvdata(dev);
int i, ret;
ret = reset_control_reset(priv->reset);
if (ret)
return ret;
ret = priv->drvdata->usb_init(priv);
if (ret)
return ret;
/* Init PHYs */
for (i = 0 ; i < PHY_COUNT ; ++i) {
ret = phy_init(priv->phys[i]);
if (ret)
return ret;
}
/* Set PHY Power */
for (i = 0 ; i < PHY_COUNT ; ++i) {
ret = phy_power_on(priv->phys[i]);
if (ret)
return ret;
}
if (priv->vbus && priv->otg_phy_mode == PHY_MODE_USB_HOST) {
ret = regulator_enable(priv->vbus);
if (ret)
return ret;
}
if (priv->drvdata->usb_post_init) {
ret = priv->drvdata->usb_post_init(priv);
if (ret)
return ret;
}
return 0;
}
static const struct dev_pm_ops dwc3_meson_g12a_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dwc3_meson_g12a_suspend, dwc3_meson_g12a_resume)
SET_RUNTIME_PM_OPS(dwc3_meson_g12a_runtime_suspend,
dwc3_meson_g12a_runtime_resume, NULL)
};
static const struct of_device_id dwc3_meson_g12a_match[] = {
{
.compatible = "amlogic,meson-gxl-usb-ctrl",
.data = &gxl_drvdata,
},
{
.compatible = "amlogic,meson-gxm-usb-ctrl",
.data = &gxm_drvdata,
},
{
.compatible = "amlogic,meson-axg-usb-ctrl",
.data = &axg_drvdata,
},
{
.compatible = "amlogic,meson-g12a-usb-ctrl",
.data = &g12a_drvdata,
},
{
.compatible = "amlogic,meson-a1-usb-ctrl",
.data = &a1_drvdata,
},
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, dwc3_meson_g12a_match);
static struct platform_driver dwc3_meson_g12a_driver = {
.probe = dwc3_meson_g12a_probe,
.remove_new = dwc3_meson_g12a_remove,
.driver = {
.name = "dwc3-meson-g12a",
.of_match_table = dwc3_meson_g12a_match,
.pm = &dwc3_meson_g12a_dev_pm_ops,
},
};
module_platform_driver(dwc3_meson_g12a_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Amlogic Meson G12A USB Glue Layer");
MODULE_AUTHOR("Neil Armstrong <[email protected]>");
| linux-master | drivers/usb/dwc3/dwc3-meson-g12a.c |
// SPDX-License-Identifier: GPL-2.0
/*
* dwc3-xilinx.c - Xilinx DWC3 controller specific glue driver
*
* Authors: Manish Narani <[email protected]>
* Anurag Kumar Vulisha <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/gpio/consumer.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/of_address.h>
#include <linux/delay.h>
#include <linux/firmware/xlnx-zynqmp.h>
#include <linux/io.h>
#include <linux/phy/phy.h>
/* USB phy reset mask register */
#define XLNX_USB_PHY_RST_EN 0x001C
#define XLNX_PHY_RST_MASK 0x1
/* Xilinx USB 3.0 IP Register */
#define XLNX_USB_TRAFFIC_ROUTE_CONFIG 0x005C
#define XLNX_USB_TRAFFIC_ROUTE_FPD 0x1
/* Versal USB Reset ID */
#define VERSAL_USB_RESET_ID 0xC104036
#define XLNX_USB_FPD_PIPE_CLK 0x7c
#define PIPE_CLK_DESELECT 1
#define PIPE_CLK_SELECT 0
#define XLNX_USB_FPD_POWER_PRSNT 0x80
#define FPD_POWER_PRSNT_OPTION BIT(0)
struct dwc3_xlnx {
int num_clocks;
struct clk_bulk_data *clks;
struct device *dev;
void __iomem *regs;
int (*pltfm_init)(struct dwc3_xlnx *data);
struct phy *usb3_phy;
};
static void dwc3_xlnx_mask_phy_rst(struct dwc3_xlnx *priv_data, bool mask)
{
u32 reg;
/*
* Enable or disable ULPI PHY reset from USB Controller.
* This does not actually reset the phy, but just controls
* whether USB controller can or cannot reset ULPI PHY.
*/
reg = readl(priv_data->regs + XLNX_USB_PHY_RST_EN);
if (mask)
reg &= ~XLNX_PHY_RST_MASK;
else
reg |= XLNX_PHY_RST_MASK;
writel(reg, priv_data->regs + XLNX_USB_PHY_RST_EN);
}
static int dwc3_xlnx_init_versal(struct dwc3_xlnx *priv_data)
{
struct device *dev = priv_data->dev;
int ret;
dwc3_xlnx_mask_phy_rst(priv_data, false);
/* Assert and De-assert reset */
ret = zynqmp_pm_reset_assert(VERSAL_USB_RESET_ID,
PM_RESET_ACTION_ASSERT);
if (ret < 0) {
dev_err_probe(dev, ret, "failed to assert Reset\n");
return ret;
}
ret = zynqmp_pm_reset_assert(VERSAL_USB_RESET_ID,
PM_RESET_ACTION_RELEASE);
if (ret < 0) {
dev_err_probe(dev, ret, "failed to De-assert Reset\n");
return ret;
}
dwc3_xlnx_mask_phy_rst(priv_data, true);
return 0;
}
static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
{
struct device *dev = priv_data->dev;
struct reset_control *crst, *hibrst, *apbrst;
struct gpio_desc *reset_gpio;
int ret = 0;
u32 reg;
priv_data->usb3_phy = devm_phy_optional_get(dev, "usb3-phy");
if (IS_ERR(priv_data->usb3_phy)) {
ret = PTR_ERR(priv_data->usb3_phy);
dev_err_probe(dev, ret,
"failed to get USB3 PHY\n");
goto err;
}
/*
* The following core resets are not required unless a USB3 PHY
* is used, and the subsequent register settings are not required
* unless a core reset is performed (they should be set properly
* by the first-stage boot loader, but may be reverted by a core
* reset). They may also break the configuration if USB3 is actually
* in use but the usb3-phy entry is missing from the device tree.
* Therefore, skip these operations in this case.
*/
if (!priv_data->usb3_phy)
goto skip_usb3_phy;
crst = devm_reset_control_get_exclusive(dev, "usb_crst");
if (IS_ERR(crst)) {
ret = PTR_ERR(crst);
dev_err_probe(dev, ret,
"failed to get core reset signal\n");
goto err;
}
hibrst = devm_reset_control_get_exclusive(dev, "usb_hibrst");
if (IS_ERR(hibrst)) {
ret = PTR_ERR(hibrst);
dev_err_probe(dev, ret,
"failed to get hibernation reset signal\n");
goto err;
}
apbrst = devm_reset_control_get_exclusive(dev, "usb_apbrst");
if (IS_ERR(apbrst)) {
ret = PTR_ERR(apbrst);
dev_err_probe(dev, ret,
"failed to get APB reset signal\n");
goto err;
}
ret = reset_control_assert(crst);
if (ret < 0) {
dev_err(dev, "Failed to assert core reset\n");
goto err;
}
ret = reset_control_assert(hibrst);
if (ret < 0) {
dev_err(dev, "Failed to assert hibernation reset\n");
goto err;
}
ret = reset_control_assert(apbrst);
if (ret < 0) {
dev_err(dev, "Failed to assert APB reset\n");
goto err;
}
ret = phy_init(priv_data->usb3_phy);
if (ret < 0) {
phy_exit(priv_data->usb3_phy);
goto err;
}
ret = reset_control_deassert(apbrst);
if (ret < 0) {
dev_err(dev, "Failed to release APB reset\n");
goto err;
}
/* Set PIPE Power Present signal in FPD Power Present Register*/
writel(FPD_POWER_PRSNT_OPTION, priv_data->regs + XLNX_USB_FPD_POWER_PRSNT);
/* Set the PIPE Clock Select bit in FPD PIPE Clock register */
writel(PIPE_CLK_SELECT, priv_data->regs + XLNX_USB_FPD_PIPE_CLK);
ret = reset_control_deassert(crst);
if (ret < 0) {
dev_err(dev, "Failed to release core reset\n");
goto err;
}
ret = reset_control_deassert(hibrst);
if (ret < 0) {
dev_err(dev, "Failed to release hibernation reset\n");
goto err;
}
ret = phy_power_on(priv_data->usb3_phy);
if (ret < 0) {
phy_exit(priv_data->usb3_phy);
goto err;
}
skip_usb3_phy:
/* ulpi reset via gpio-modepin or gpio-framework driver */
reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(reset_gpio)) {
return dev_err_probe(dev, PTR_ERR(reset_gpio),
"Failed to request reset GPIO\n");
}
if (reset_gpio) {
/* Toggle ulpi to reset the phy. */
gpiod_set_value_cansleep(reset_gpio, 1);
usleep_range(5000, 10000);
gpiod_set_value_cansleep(reset_gpio, 0);
usleep_range(5000, 10000);
}
/*
* This routes the USB DMA traffic to go through FPD path instead
* of reaching DDR directly. This traffic routing is needed to
* make SMMU and CCI work with USB DMA.
*/
if (of_dma_is_coherent(dev->of_node) || device_iommu_mapped(dev)) {
reg = readl(priv_data->regs + XLNX_USB_TRAFFIC_ROUTE_CONFIG);
reg |= XLNX_USB_TRAFFIC_ROUTE_FPD;
writel(reg, priv_data->regs + XLNX_USB_TRAFFIC_ROUTE_CONFIG);
}
err:
return ret;
}
static const struct of_device_id dwc3_xlnx_of_match[] = {
{
.compatible = "xlnx,zynqmp-dwc3",
.data = &dwc3_xlnx_init_zynqmp,
},
{
.compatible = "xlnx,versal-dwc3",
.data = &dwc3_xlnx_init_versal,
},
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, dwc3_xlnx_of_match);
static int dwc3_xlnx_probe(struct platform_device *pdev)
{
struct dwc3_xlnx *priv_data;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
const struct of_device_id *match;
void __iomem *regs;
int ret;
priv_data = devm_kzalloc(dev, sizeof(*priv_data), GFP_KERNEL);
if (!priv_data)
return -ENOMEM;
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs)) {
ret = PTR_ERR(regs);
dev_err_probe(dev, ret, "failed to map registers\n");
return ret;
}
match = of_match_node(dwc3_xlnx_of_match, pdev->dev.of_node);
priv_data->pltfm_init = match->data;
priv_data->regs = regs;
priv_data->dev = dev;
platform_set_drvdata(pdev, priv_data);
ret = devm_clk_bulk_get_all(priv_data->dev, &priv_data->clks);
if (ret < 0)
return ret;
priv_data->num_clocks = ret;
ret = clk_bulk_prepare_enable(priv_data->num_clocks, priv_data->clks);
if (ret)
return ret;
ret = priv_data->pltfm_init(priv_data);
if (ret)
goto err_clk_put;
ret = of_platform_populate(np, NULL, NULL, dev);
if (ret)
goto err_clk_put;
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
pm_suspend_ignore_children(dev, false);
pm_runtime_get_sync(dev);
return 0;
err_clk_put:
clk_bulk_disable_unprepare(priv_data->num_clocks, priv_data->clks);
return ret;
}
static void dwc3_xlnx_remove(struct platform_device *pdev)
{
struct dwc3_xlnx *priv_data = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
of_platform_depopulate(dev);
clk_bulk_disable_unprepare(priv_data->num_clocks, priv_data->clks);
priv_data->num_clocks = 0;
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
pm_runtime_set_suspended(dev);
}
static int __maybe_unused dwc3_xlnx_runtime_suspend(struct device *dev)
{
struct dwc3_xlnx *priv_data = dev_get_drvdata(dev);
clk_bulk_disable(priv_data->num_clocks, priv_data->clks);
return 0;
}
static int __maybe_unused dwc3_xlnx_runtime_resume(struct device *dev)
{
struct dwc3_xlnx *priv_data = dev_get_drvdata(dev);
return clk_bulk_enable(priv_data->num_clocks, priv_data->clks);
}
static int __maybe_unused dwc3_xlnx_runtime_idle(struct device *dev)
{
pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
return 0;
}
static int __maybe_unused dwc3_xlnx_suspend(struct device *dev)
{
struct dwc3_xlnx *priv_data = dev_get_drvdata(dev);
phy_exit(priv_data->usb3_phy);
/* Disable the clocks */
clk_bulk_disable(priv_data->num_clocks, priv_data->clks);
return 0;
}
static int __maybe_unused dwc3_xlnx_resume(struct device *dev)
{
struct dwc3_xlnx *priv_data = dev_get_drvdata(dev);
int ret;
ret = clk_bulk_enable(priv_data->num_clocks, priv_data->clks);
if (ret)
return ret;
ret = phy_init(priv_data->usb3_phy);
if (ret < 0)
return ret;
ret = phy_power_on(priv_data->usb3_phy);
if (ret < 0) {
phy_exit(priv_data->usb3_phy);
return ret;
}
return 0;
}
static const struct dev_pm_ops dwc3_xlnx_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dwc3_xlnx_suspend, dwc3_xlnx_resume)
SET_RUNTIME_PM_OPS(dwc3_xlnx_runtime_suspend,
dwc3_xlnx_runtime_resume, dwc3_xlnx_runtime_idle)
};
static struct platform_driver dwc3_xlnx_driver = {
.probe = dwc3_xlnx_probe,
.remove_new = dwc3_xlnx_remove,
.driver = {
.name = "dwc3-xilinx",
.of_match_table = dwc3_xlnx_of_match,
.pm = &dwc3_xlnx_dev_pm_ops,
},
};
module_platform_driver(dwc3_xlnx_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Xilinx DWC3 controller specific glue driver");
MODULE_AUTHOR("Manish Narani <[email protected]>");
MODULE_AUTHOR("Anurag Kumar Vulisha <[email protected]>");
| linux-master | drivers/usb/dwc3/dwc3-xilinx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* dwc3-haps.c - Synopsys HAPS PCI Specific glue layer
*
* Copyright (C) 2018 Synopsys, Inc.
*
* Authors: Thinh Nguyen <[email protected]>,
* John Youn <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/property.h>
/**
* struct dwc3_haps - Driver private structure
* @dwc3: child dwc3 platform_device
* @pci: our link to PCI bus
*/
struct dwc3_haps {
struct platform_device *dwc3;
struct pci_dev *pci;
};
static const struct property_entry initial_properties[] = {
PROPERTY_ENTRY_BOOL("snps,usb3_lpm_capable"),
PROPERTY_ENTRY_BOOL("snps,has-lpm-erratum"),
PROPERTY_ENTRY_BOOL("snps,dis_enblslpm_quirk"),
PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{ },
};
static const struct software_node dwc3_haps_swnode = {
.properties = initial_properties,
};
static int dwc3_haps_probe(struct pci_dev *pci,
const struct pci_device_id *id)
{
struct dwc3_haps *dwc;
struct device *dev = &pci->dev;
struct resource res[2];
int ret;
ret = pcim_enable_device(pci);
if (ret) {
dev_err(dev, "failed to enable pci device\n");
return -ENODEV;
}
pci_set_master(pci);
dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
if (!dwc)
return -ENOMEM;
dwc->dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO);
if (!dwc->dwc3)
return -ENOMEM;
memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
res[0].start = pci_resource_start(pci, 0);
res[0].end = pci_resource_end(pci, 0);
res[0].name = "dwc_usb3";
res[0].flags = IORESOURCE_MEM;
res[1].start = pci->irq;
res[1].name = "dwc_usb3";
res[1].flags = IORESOURCE_IRQ;
ret = platform_device_add_resources(dwc->dwc3, res, ARRAY_SIZE(res));
if (ret) {
dev_err(dev, "couldn't add resources to dwc3 device\n");
goto err;
}
dwc->pci = pci;
dwc->dwc3->dev.parent = dev;
ret = device_add_software_node(&dwc->dwc3->dev, &dwc3_haps_swnode);
if (ret)
goto err;
ret = platform_device_add(dwc->dwc3);
if (ret) {
dev_err(dev, "failed to register dwc3 device\n");
goto err;
}
pci_set_drvdata(pci, dwc);
return 0;
err:
device_remove_software_node(&dwc->dwc3->dev);
platform_device_put(dwc->dwc3);
return ret;
}
static void dwc3_haps_remove(struct pci_dev *pci)
{
struct dwc3_haps *dwc = pci_get_drvdata(pci);
device_remove_software_node(&dwc->dwc3->dev);
platform_device_unregister(dwc->dwc3);
}
static const struct pci_device_id dwc3_haps_id_table[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
/*
* i.MX6QP and i.MX7D platform use a PCIe controller with the
* same VID and PID as this USB controller. The system may
* incorrectly match this driver to that PCIe controller. To
* workaround this, specifically use class type USB to prevent
* incorrect driver matching.
*/
.class = (PCI_CLASS_SERIAL_USB << 8),
.class_mask = 0xffff00,
},
{
PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI),
},
{
PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31),
},
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(pci, dwc3_haps_id_table);
static struct pci_driver dwc3_haps_driver = {
.name = "dwc3-haps",
.id_table = dwc3_haps_id_table,
.probe = dwc3_haps_probe,
.remove = dwc3_haps_remove,
};
MODULE_AUTHOR("Thinh Nguyen <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Synopsys HAPS PCI Glue Layer");
module_pci_driver(dwc3_haps_driver);
| linux-master | drivers/usb/dwc3/dwc3-haps.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* dwc3-st.c Support for dwc3 platform devices on ST Microelectronics platforms
*
* This is a small driver for the dwc3 to provide the glue logic
* to configure the controller. Tested on STi platforms.
*
* Copyright (C) 2014 Stmicroelectronics
*
* Author: Giuseppe Cavallaro <[email protected]>
* Contributors: Aymen Bouattay <[email protected]>
* Peter Griffin <[email protected]>
*
* Inspired by dwc3-omap.c and dwc3-exynos.c.
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/pinctrl/consumer.h>
#include <linux/usb/of.h>
#include "core.h"
#include "io.h"
/* glue registers */
#define CLKRST_CTRL 0x00
#define AUX_CLK_EN BIT(0)
#define SW_PIPEW_RESET_N BIT(4)
#define EXT_CFG_RESET_N BIT(8)
/*
* 1'b0 : The host controller complies with the xHCI revision 0.96
* 1'b1 : The host controller complies with the xHCI revision 1.0
*/
#define XHCI_REVISION BIT(12)
#define USB2_VBUS_MNGMNT_SEL1 0x2C
/*
* For all fields in USB2_VBUS_MNGMNT_SEL1
* 2’b00 : Override value from Reg 0x30 is selected
* 2’b01 : utmiotg_<signal_name> from usb3_top is selected
* 2’b10 : pipew_<signal_name> from PIPEW instance is selected
* 2’b11 : value is 1'b0
*/
#define USB2_VBUS_REG30 0x0
#define USB2_VBUS_UTMIOTG 0x1
#define USB2_VBUS_PIPEW 0x2
#define USB2_VBUS_ZERO 0x3
#define SEL_OVERRIDE_VBUSVALID(n) (n << 0)
#define SEL_OVERRIDE_POWERPRESENT(n) (n << 4)
#define SEL_OVERRIDE_BVALID(n) (n << 8)
/* Static DRD configuration */
#define USB3_CONTROL_MASK 0xf77
#define USB3_DEVICE_NOT_HOST BIT(0)
#define USB3_FORCE_VBUSVALID BIT(1)
#define USB3_DELAY_VBUSVALID BIT(2)
#define USB3_SEL_FORCE_OPMODE BIT(4)
#define USB3_FORCE_OPMODE(n) (n << 5)
#define USB3_SEL_FORCE_DPPULLDOWN2 BIT(8)
#define USB3_FORCE_DPPULLDOWN2 BIT(9)
#define USB3_SEL_FORCE_DMPULLDOWN2 BIT(10)
#define USB3_FORCE_DMPULLDOWN2 BIT(11)
/**
* struct st_dwc3 - dwc3-st driver private structure
* @dev: device pointer
* @glue_base: ioaddr for the glue registers
* @regmap: regmap pointer for getting syscfg
* @syscfg_reg_off: usb syscfg control offset
* @dr_mode: drd static host/device config
* @rstc_pwrdn: rest controller for powerdown signal
* @rstc_rst: reset controller for softreset signal
*/
struct st_dwc3 {
struct device *dev;
void __iomem *glue_base;
struct regmap *regmap;
int syscfg_reg_off;
enum usb_dr_mode dr_mode;
struct reset_control *rstc_pwrdn;
struct reset_control *rstc_rst;
};
static inline u32 st_dwc3_readl(void __iomem *base, u32 offset)
{
return readl_relaxed(base + offset);
}
static inline void st_dwc3_writel(void __iomem *base, u32 offset, u32 value)
{
writel_relaxed(value, base + offset);
}
/**
* st_dwc3_drd_init: program the port
* @dwc3_data: driver private structure
* Description: this function is to program the port as either host or device
* according to the static configuration passed from devicetree.
* OTG and dual role are not yet supported!
*/
static int st_dwc3_drd_init(struct st_dwc3 *dwc3_data)
{
u32 val;
int err;
err = regmap_read(dwc3_data->regmap, dwc3_data->syscfg_reg_off, &val);
if (err)
return err;
val &= USB3_CONTROL_MASK;
switch (dwc3_data->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
val &= ~(USB3_DELAY_VBUSVALID
| USB3_SEL_FORCE_OPMODE | USB3_FORCE_OPMODE(0x3)
| USB3_SEL_FORCE_DPPULLDOWN2 | USB3_FORCE_DPPULLDOWN2
| USB3_SEL_FORCE_DMPULLDOWN2 | USB3_FORCE_DMPULLDOWN2);
/*
* USB3_PORT2_FORCE_VBUSVALID When '1' and when
* USB3_PORT2_DEVICE_NOT_HOST = 1, forces VBUSVLDEXT2 input
* of the pico PHY to 1.
*/
val |= USB3_DEVICE_NOT_HOST | USB3_FORCE_VBUSVALID;
break;
case USB_DR_MODE_HOST:
val &= ~(USB3_DEVICE_NOT_HOST | USB3_FORCE_VBUSVALID
| USB3_SEL_FORCE_OPMODE | USB3_FORCE_OPMODE(0x3)
| USB3_SEL_FORCE_DPPULLDOWN2 | USB3_FORCE_DPPULLDOWN2
| USB3_SEL_FORCE_DMPULLDOWN2 | USB3_FORCE_DMPULLDOWN2);
/*
* USB3_DELAY_VBUSVALID is ANDed with USB_C_VBUSVALID. Thus,
* when set to ‘0‘, it can delay the arrival of VBUSVALID
* information to VBUSVLDEXT2 input of the pico PHY.
* We don't want to do that so we set the bit to '1'.
*/
val |= USB3_DELAY_VBUSVALID;
break;
default:
dev_err(dwc3_data->dev, "Unsupported mode of operation %d\n",
dwc3_data->dr_mode);
return -EINVAL;
}
return regmap_write(dwc3_data->regmap, dwc3_data->syscfg_reg_off, val);
}
/**
* st_dwc3_init: init the controller via glue logic
* @dwc3_data: driver private structure
*/
static void st_dwc3_init(struct st_dwc3 *dwc3_data)
{
u32 reg = st_dwc3_readl(dwc3_data->glue_base, CLKRST_CTRL);
reg |= AUX_CLK_EN | EXT_CFG_RESET_N | XHCI_REVISION;
reg &= ~SW_PIPEW_RESET_N;
st_dwc3_writel(dwc3_data->glue_base, CLKRST_CTRL, reg);
/* configure mux for vbus, powerpresent and bvalid signals */
reg = st_dwc3_readl(dwc3_data->glue_base, USB2_VBUS_MNGMNT_SEL1);
reg |= SEL_OVERRIDE_VBUSVALID(USB2_VBUS_UTMIOTG) |
SEL_OVERRIDE_POWERPRESENT(USB2_VBUS_UTMIOTG) |
SEL_OVERRIDE_BVALID(USB2_VBUS_UTMIOTG);
st_dwc3_writel(dwc3_data->glue_base, USB2_VBUS_MNGMNT_SEL1, reg);
reg = st_dwc3_readl(dwc3_data->glue_base, CLKRST_CTRL);
reg |= SW_PIPEW_RESET_N;
st_dwc3_writel(dwc3_data->glue_base, CLKRST_CTRL, reg);
}
static int st_dwc3_probe(struct platform_device *pdev)
{
struct st_dwc3 *dwc3_data;
struct resource *res;
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node, *child;
struct platform_device *child_pdev;
struct regmap *regmap;
int ret;
dwc3_data = devm_kzalloc(dev, sizeof(*dwc3_data), GFP_KERNEL);
if (!dwc3_data)
return -ENOMEM;
dwc3_data->glue_base =
devm_platform_ioremap_resource_byname(pdev, "reg-glue");
if (IS_ERR(dwc3_data->glue_base))
return PTR_ERR(dwc3_data->glue_base);
regmap = syscon_regmap_lookup_by_phandle(node, "st,syscfg");
if (IS_ERR(regmap))
return PTR_ERR(regmap);
dwc3_data->dev = dev;
dwc3_data->regmap = regmap;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "syscfg-reg");
if (!res) {
ret = -ENXIO;
goto undo_platform_dev_alloc;
}
dwc3_data->syscfg_reg_off = res->start;
dev_vdbg(&pdev->dev, "glue-logic addr 0x%pK, syscfg-reg offset 0x%x\n",
dwc3_data->glue_base, dwc3_data->syscfg_reg_off);
dwc3_data->rstc_pwrdn =
devm_reset_control_get_exclusive(dev, "powerdown");
if (IS_ERR(dwc3_data->rstc_pwrdn)) {
dev_err(&pdev->dev, "could not get power controller\n");
ret = PTR_ERR(dwc3_data->rstc_pwrdn);
goto undo_platform_dev_alloc;
}
/* Manage PowerDown */
reset_control_deassert(dwc3_data->rstc_pwrdn);
dwc3_data->rstc_rst =
devm_reset_control_get_shared(dev, "softreset");
if (IS_ERR(dwc3_data->rstc_rst)) {
dev_err(&pdev->dev, "could not get reset controller\n");
ret = PTR_ERR(dwc3_data->rstc_rst);
goto undo_powerdown;
}
/* Manage SoftReset */
reset_control_deassert(dwc3_data->rstc_rst);
child = of_get_compatible_child(node, "snps,dwc3");
if (!child) {
dev_err(&pdev->dev, "failed to find dwc3 core node\n");
ret = -ENODEV;
goto err_node_put;
}
/* Allocate and initialize the core */
ret = of_platform_populate(node, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to add dwc3 core\n");
goto err_node_put;
}
child_pdev = of_find_device_by_node(child);
if (!child_pdev) {
dev_err(dev, "failed to find dwc3 core device\n");
ret = -ENODEV;
goto err_node_put;
}
dwc3_data->dr_mode = usb_get_dr_mode(&child_pdev->dev);
of_node_put(child);
platform_device_put(child_pdev);
/*
* Configure the USB port as device or host according to the static
* configuration passed from DT.
* DRD is the only mode currently supported so this will be enhanced
* as soon as OTG is available.
*/
ret = st_dwc3_drd_init(dwc3_data);
if (ret) {
dev_err(dev, "drd initialisation failed\n");
goto undo_softreset;
}
/* ST glue logic init */
st_dwc3_init(dwc3_data);
platform_set_drvdata(pdev, dwc3_data);
return 0;
err_node_put:
of_node_put(child);
undo_softreset:
reset_control_assert(dwc3_data->rstc_rst);
undo_powerdown:
reset_control_assert(dwc3_data->rstc_pwrdn);
undo_platform_dev_alloc:
platform_device_put(pdev);
return ret;
}
static void st_dwc3_remove(struct platform_device *pdev)
{
struct st_dwc3 *dwc3_data = platform_get_drvdata(pdev);
of_platform_depopulate(&pdev->dev);
reset_control_assert(dwc3_data->rstc_pwrdn);
reset_control_assert(dwc3_data->rstc_rst);
}
#ifdef CONFIG_PM_SLEEP
static int st_dwc3_suspend(struct device *dev)
{
struct st_dwc3 *dwc3_data = dev_get_drvdata(dev);
reset_control_assert(dwc3_data->rstc_pwrdn);
reset_control_assert(dwc3_data->rstc_rst);
pinctrl_pm_select_sleep_state(dev);
return 0;
}
static int st_dwc3_resume(struct device *dev)
{
struct st_dwc3 *dwc3_data = dev_get_drvdata(dev);
int ret;
pinctrl_pm_select_default_state(dev);
reset_control_deassert(dwc3_data->rstc_pwrdn);
reset_control_deassert(dwc3_data->rstc_rst);
ret = st_dwc3_drd_init(dwc3_data);
if (ret) {
dev_err(dev, "drd initialisation failed\n");
return ret;
}
/* ST glue logic init */
st_dwc3_init(dwc3_data);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(st_dwc3_dev_pm_ops, st_dwc3_suspend, st_dwc3_resume);
static const struct of_device_id st_dwc3_match[] = {
{ .compatible = "st,stih407-dwc3" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, st_dwc3_match);
static struct platform_driver st_dwc3_driver = {
.probe = st_dwc3_probe,
.remove_new = st_dwc3_remove,
.driver = {
.name = "usb-st-dwc3",
.of_match_table = st_dwc3_match,
.pm = &st_dwc3_dev_pm_ops,
},
};
module_platform_driver(st_dwc3_driver);
MODULE_AUTHOR("Giuseppe Cavallaro <[email protected]>");
MODULE_DESCRIPTION("DesignWare USB3 STi Glue Layer");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/dwc3/dwc3-st.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Copyright 2008 Openmoko, Inc.
* Copyright 2008 Simtec Electronics
* Ben Dooks <[email protected]>
* http://armlinux.simtec.co.uk/
*
* S3C USB2.0 High-speed / OtG driver
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/mutex.h>
#include <linux/seq_file.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/phy.h>
#include <linux/usb/composite.h>
#include "core.h"
#include "hw.h"
/* conversion functions */
static inline struct dwc2_hsotg_req *our_req(struct usb_request *req)
{
return container_of(req, struct dwc2_hsotg_req, req);
}
static inline struct dwc2_hsotg_ep *our_ep(struct usb_ep *ep)
{
return container_of(ep, struct dwc2_hsotg_ep, ep);
}
static inline struct dwc2_hsotg *to_hsotg(struct usb_gadget *gadget)
{
return container_of(gadget, struct dwc2_hsotg, gadget);
}
static inline void dwc2_set_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val)
{
dwc2_writel(hsotg, dwc2_readl(hsotg, offset) | val, offset);
}
static inline void dwc2_clear_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val)
{
dwc2_writel(hsotg, dwc2_readl(hsotg, offset) & ~val, offset);
}
static inline struct dwc2_hsotg_ep *index_to_ep(struct dwc2_hsotg *hsotg,
u32 ep_index, u32 dir_in)
{
if (dir_in)
return hsotg->eps_in[ep_index];
else
return hsotg->eps_out[ep_index];
}
/* forward declaration of functions */
static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg);
/**
* using_dma - return the DMA status of the driver.
* @hsotg: The driver state.
*
* Return true if we're using DMA.
*
* Currently, we have the DMA support code worked into everywhere
* that needs it, but the AMBA DMA implementation in the hardware can
* only DMA from 32bit aligned addresses. This means that gadgets such
* as the CDC Ethernet cannot work as they often pass packets which are
* not 32bit aligned.
*
* Unfortunately the choice to use DMA or not is global to the controller
* and seems to be only settable when the controller is being put through
* a core reset. This means we either need to fix the gadgets to take
* account of DMA alignment, or add bounce buffers (yuerk).
*
* g_using_dma is set depending on dts flag.
*/
static inline bool using_dma(struct dwc2_hsotg *hsotg)
{
return hsotg->params.g_dma;
}
/*
* using_desc_dma - return the descriptor DMA status of the driver.
* @hsotg: The driver state.
*
* Return true if we're using descriptor DMA.
*/
static inline bool using_desc_dma(struct dwc2_hsotg *hsotg)
{
return hsotg->params.g_dma_desc;
}
/**
* dwc2_gadget_incr_frame_num - Increments the targeted frame number.
* @hs_ep: The endpoint
*
* This function will also check if the frame number overruns DSTS_SOFFN_LIMIT.
* If an overrun occurs it will wrap the value and set the frame_overrun flag.
*/
static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
{
struct dwc2_hsotg *hsotg = hs_ep->parent;
u16 limit = DSTS_SOFFN_LIMIT;
if (hsotg->gadget.speed != USB_SPEED_HIGH)
limit >>= 3;
hs_ep->target_frame += hs_ep->interval;
if (hs_ep->target_frame > limit) {
hs_ep->frame_overrun = true;
hs_ep->target_frame &= limit;
} else {
hs_ep->frame_overrun = false;
}
}
/**
* dwc2_gadget_dec_frame_num_by_one - Decrements the targeted frame number
* by one.
* @hs_ep: The endpoint.
*
* This function used in service interval based scheduling flow to calculate
* descriptor frame number filed value. For service interval mode frame
* number in descriptor should point to last (u)frame in the interval.
*
*/
static inline void dwc2_gadget_dec_frame_num_by_one(struct dwc2_hsotg_ep *hs_ep)
{
struct dwc2_hsotg *hsotg = hs_ep->parent;
u16 limit = DSTS_SOFFN_LIMIT;
if (hsotg->gadget.speed != USB_SPEED_HIGH)
limit >>= 3;
if (hs_ep->target_frame)
hs_ep->target_frame -= 1;
else
hs_ep->target_frame = limit;
}
/**
* dwc2_hsotg_en_gsint - enable one or more of the general interrupt
* @hsotg: The device state
* @ints: A bitmask of the interrupts to enable
*/
static void dwc2_hsotg_en_gsint(struct dwc2_hsotg *hsotg, u32 ints)
{
u32 gsintmsk = dwc2_readl(hsotg, GINTMSK);
u32 new_gsintmsk;
new_gsintmsk = gsintmsk | ints;
if (new_gsintmsk != gsintmsk) {
dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
dwc2_writel(hsotg, new_gsintmsk, GINTMSK);
}
}
/**
* dwc2_hsotg_disable_gsint - disable one or more of the general interrupt
* @hsotg: The device state
* @ints: A bitmask of the interrupts to enable
*/
static void dwc2_hsotg_disable_gsint(struct dwc2_hsotg *hsotg, u32 ints)
{
u32 gsintmsk = dwc2_readl(hsotg, GINTMSK);
u32 new_gsintmsk;
new_gsintmsk = gsintmsk & ~ints;
if (new_gsintmsk != gsintmsk)
dwc2_writel(hsotg, new_gsintmsk, GINTMSK);
}
/**
* dwc2_hsotg_ctrl_epint - enable/disable an endpoint irq
* @hsotg: The device state
* @ep: The endpoint index
* @dir_in: True if direction is in.
* @en: The enable value, true to enable
*
* Set or clear the mask for an individual endpoint's interrupt
* request.
*/
static void dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg,
unsigned int ep, unsigned int dir_in,
unsigned int en)
{
unsigned long flags;
u32 bit = 1 << ep;
u32 daint;
if (!dir_in)
bit <<= 16;
local_irq_save(flags);
daint = dwc2_readl(hsotg, DAINTMSK);
if (en)
daint |= bit;
else
daint &= ~bit;
dwc2_writel(hsotg, daint, DAINTMSK);
local_irq_restore(flags);
}
/**
* dwc2_hsotg_tx_fifo_count - return count of TX FIFOs in device mode
*
* @hsotg: Programming view of the DWC_otg controller
*/
int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg)
{
if (hsotg->hw_params.en_multiple_tx_fifo)
/* In dedicated FIFO mode we need count of IN EPs */
return hsotg->hw_params.num_dev_in_eps;
else
/* In shared FIFO mode we need count of Periodic IN EPs */
return hsotg->hw_params.num_dev_perio_in_ep;
}
/**
* dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for
* device mode TX FIFOs
*
* @hsotg: Programming view of the DWC_otg controller
*/
int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
{
int addr;
int tx_addr_max;
u32 np_tx_fifo_size;
np_tx_fifo_size = min_t(u32, hsotg->hw_params.dev_nperio_tx_fifo_size,
hsotg->params.g_np_tx_fifo_size);
/* Get Endpoint Info Control block size in DWORDs. */
tx_addr_max = hsotg->hw_params.total_fifo_size;
addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size;
if (tx_addr_max <= addr)
return 0;
return tx_addr_max - addr;
}
/**
* dwc2_gadget_wkup_alert_handler - Handler for WKUP_ALERT interrupt
*
* @hsotg: Programming view of the DWC_otg controller
*
*/
static void dwc2_gadget_wkup_alert_handler(struct dwc2_hsotg *hsotg)
{
u32 gintsts2;
u32 gintmsk2;
gintsts2 = dwc2_readl(hsotg, GINTSTS2);
gintmsk2 = dwc2_readl(hsotg, GINTMSK2);
gintsts2 &= gintmsk2;
if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) {
dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__);
dwc2_set_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT);
dwc2_set_bit(hsotg, DCTL, DCTL_RMTWKUPSIG);
}
}
/**
* dwc2_hsotg_tx_fifo_average_depth - returns average depth of device mode
* TX FIFOs
*
* @hsotg: Programming view of the DWC_otg controller
*/
int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg)
{
int tx_fifo_count;
int tx_fifo_depth;
tx_fifo_depth = dwc2_hsotg_tx_fifo_total_depth(hsotg);
tx_fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
if (!tx_fifo_count)
return tx_fifo_depth;
else
return tx_fifo_depth / tx_fifo_count;
}
/**
* dwc2_hsotg_init_fifo - initialise non-periodic FIFOs
* @hsotg: The device instance.
*/
static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
{
unsigned int ep;
unsigned int addr;
int timeout;
u32 val;
u32 *txfsz = hsotg->params.g_tx_fifo_size;
/* Reset fifo map if not correctly cleared during previous session */
WARN_ON(hsotg->fifo_map);
hsotg->fifo_map = 0;
/* set RX/NPTX FIFO sizes */
dwc2_writel(hsotg, hsotg->params.g_rx_fifo_size, GRXFSIZ);
dwc2_writel(hsotg, (hsotg->params.g_rx_fifo_size <<
FIFOSIZE_STARTADDR_SHIFT) |
(hsotg->params.g_np_tx_fifo_size << FIFOSIZE_DEPTH_SHIFT),
GNPTXFSIZ);
/*
* arange all the rest of the TX FIFOs, as some versions of this
* block have overlapping default addresses. This also ensures
* that if the settings have been changed, then they are set to
* known values.
*/
/* start at the end of the GNPTXFSIZ, rounded up */
addr = hsotg->params.g_rx_fifo_size + hsotg->params.g_np_tx_fifo_size;
/*
* Configure fifos sizes from provided configuration and assign
* them to endpoints dynamically according to maxpacket size value of
* given endpoint.
*/
for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) {
if (!txfsz[ep])
continue;
val = addr;
val |= txfsz[ep] << FIFOSIZE_DEPTH_SHIFT;
WARN_ONCE(addr + txfsz[ep] > hsotg->fifo_mem,
"insufficient fifo memory");
addr += txfsz[ep];
dwc2_writel(hsotg, val, DPTXFSIZN(ep));
val = dwc2_readl(hsotg, DPTXFSIZN(ep));
}
dwc2_writel(hsotg, hsotg->hw_params.total_fifo_size |
addr << GDFIFOCFG_EPINFOBASE_SHIFT,
GDFIFOCFG);
/*
* according to p428 of the design guide, we need to ensure that
* all fifos are flushed before continuing
*/
dwc2_writel(hsotg, GRSTCTL_TXFNUM(0x10) | GRSTCTL_TXFFLSH |
GRSTCTL_RXFFLSH, GRSTCTL);
/* wait until the fifos are both flushed */
timeout = 100;
while (1) {
val = dwc2_readl(hsotg, GRSTCTL);
if ((val & (GRSTCTL_TXFFLSH | GRSTCTL_RXFFLSH)) == 0)
break;
if (--timeout == 0) {
dev_err(hsotg->dev,
"%s: timeout flushing fifos (GRSTCTL=%08x)\n",
__func__, val);
break;
}
udelay(1);
}
dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout);
}
/**
* dwc2_hsotg_ep_alloc_request - allocate USB rerequest structure
* @ep: USB endpoint to allocate request for.
* @flags: Allocation flags
*
* Allocate a new USB request structure appropriate for the specified endpoint
*/
static struct usb_request *dwc2_hsotg_ep_alloc_request(struct usb_ep *ep,
gfp_t flags)
{
struct dwc2_hsotg_req *req;
req = kzalloc(sizeof(*req), flags);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
/**
* is_ep_periodic - return true if the endpoint is in periodic mode.
* @hs_ep: The endpoint to query.
*
* Returns true if the endpoint is in periodic mode, meaning it is being
* used for an Interrupt or ISO transfer.
*/
static inline int is_ep_periodic(struct dwc2_hsotg_ep *hs_ep)
{
return hs_ep->periodic;
}
/**
* dwc2_hsotg_unmap_dma - unmap the DMA memory being used for the request
* @hsotg: The device state.
* @hs_ep: The endpoint for the request
* @hs_req: The request being processed.
*
* This is the reverse of dwc2_hsotg_map_dma(), called for the completion
* of a request to ensure the buffer is ready for access by the caller.
*/
static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *hs_ep,
struct dwc2_hsotg_req *hs_req)
{
struct usb_request *req = &hs_req->req;
usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->map_dir);
}
/*
* dwc2_gadget_alloc_ctrl_desc_chains - allocate DMA descriptor chains
* for Control endpoint
* @hsotg: The device state.
*
* This function will allocate 4 descriptor chains for EP 0: 2 for
* Setup stage, per one for IN and OUT data/status transactions.
*/
static int dwc2_gadget_alloc_ctrl_desc_chains(struct dwc2_hsotg *hsotg)
{
hsotg->setup_desc[0] =
dmam_alloc_coherent(hsotg->dev,
sizeof(struct dwc2_dma_desc),
&hsotg->setup_desc_dma[0],
GFP_KERNEL);
if (!hsotg->setup_desc[0])
goto fail;
hsotg->setup_desc[1] =
dmam_alloc_coherent(hsotg->dev,
sizeof(struct dwc2_dma_desc),
&hsotg->setup_desc_dma[1],
GFP_KERNEL);
if (!hsotg->setup_desc[1])
goto fail;
hsotg->ctrl_in_desc =
dmam_alloc_coherent(hsotg->dev,
sizeof(struct dwc2_dma_desc),
&hsotg->ctrl_in_desc_dma,
GFP_KERNEL);
if (!hsotg->ctrl_in_desc)
goto fail;
hsotg->ctrl_out_desc =
dmam_alloc_coherent(hsotg->dev,
sizeof(struct dwc2_dma_desc),
&hsotg->ctrl_out_desc_dma,
GFP_KERNEL);
if (!hsotg->ctrl_out_desc)
goto fail;
return 0;
fail:
return -ENOMEM;
}
/**
* dwc2_hsotg_write_fifo - write packet Data to the TxFIFO
* @hsotg: The controller state.
* @hs_ep: The endpoint we're going to write for.
* @hs_req: The request to write data for.
*
* This is called when the TxFIFO has some space in it to hold a new
* transmission and we have something to give it. The actual setup of
* the data size is done elsewhere, so all we have to do is to actually
* write the data.
*
* The return value is zero if there is more space (or nothing was done)
* otherwise -ENOSPC is returned if the FIFO space was used up.
*
* This routine is only needed for PIO
*/
static int dwc2_hsotg_write_fifo(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *hs_ep,
struct dwc2_hsotg_req *hs_req)
{
bool periodic = is_ep_periodic(hs_ep);
u32 gnptxsts = dwc2_readl(hsotg, GNPTXSTS);
int buf_pos = hs_req->req.actual;
int to_write = hs_ep->size_loaded;
void *data;
int can_write;
int pkt_round;
int max_transfer;
to_write -= (buf_pos - hs_ep->last_load);
/* if there's nothing to write, get out early */
if (to_write == 0)
return 0;
if (periodic && !hsotg->dedicated_fifos) {
u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index));
int size_left;
int size_done;
/*
* work out how much data was loaded so we can calculate
* how much data is left in the fifo.
*/
size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
/*
* if shared fifo, we cannot write anything until the
* previous data has been completely sent.
*/
if (hs_ep->fifo_load != 0) {
dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP);
return -ENOSPC;
}
dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",
__func__, size_left,
hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);
/* how much of the data has moved */
size_done = hs_ep->size_loaded - size_left;
/* how much data is left in the fifo */
can_write = hs_ep->fifo_load - size_done;
dev_dbg(hsotg->dev, "%s: => can_write1=%d\n",
__func__, can_write);
can_write = hs_ep->fifo_size - can_write;
dev_dbg(hsotg->dev, "%s: => can_write2=%d\n",
__func__, can_write);
if (can_write <= 0) {
dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP);
return -ENOSPC;
}
} else if (hsotg->dedicated_fifos && hs_ep->index != 0) {
can_write = dwc2_readl(hsotg,
DTXFSTS(hs_ep->fifo_index));
can_write &= 0xffff;
can_write *= 4;
} else {
if (GNPTXSTS_NP_TXQ_SPC_AVAIL_GET(gnptxsts) == 0) {
dev_dbg(hsotg->dev,
"%s: no queue slots available (0x%08x)\n",
__func__, gnptxsts);
dwc2_hsotg_en_gsint(hsotg, GINTSTS_NPTXFEMP);
return -ENOSPC;
}
can_write = GNPTXSTS_NP_TXF_SPC_AVAIL_GET(gnptxsts);
can_write *= 4; /* fifo size is in 32bit quantities. */
}
max_transfer = hs_ep->ep.maxpacket * hs_ep->mc;
dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, max_transfer %d\n",
__func__, gnptxsts, can_write, to_write, max_transfer);
/*
* limit to 512 bytes of data, it seems at least on the non-periodic
* FIFO, requests of >512 cause the endpoint to get stuck with a
* fragment of the end of the transfer in it.
*/
if (can_write > 512 && !periodic)
can_write = 512;
/*
* limit the write to one max-packet size worth of data, but allow
* the transfer to return that it did not run out of fifo space
* doing it.
*/
if (to_write > max_transfer) {
to_write = max_transfer;
/* it's needed only when we do not use dedicated fifos */
if (!hsotg->dedicated_fifos)
dwc2_hsotg_en_gsint(hsotg,
periodic ? GINTSTS_PTXFEMP :
GINTSTS_NPTXFEMP);
}
/* see if we can write data */
if (to_write > can_write) {
to_write = can_write;
pkt_round = to_write % max_transfer;
/*
* Round the write down to an
* exact number of packets.
*
* Note, we do not currently check to see if we can ever
* write a full packet or not to the FIFO.
*/
if (pkt_round)
to_write -= pkt_round;
/*
* enable correct FIFO interrupt to alert us when there
* is more room left.
*/
/* it's needed only when we do not use dedicated fifos */
if (!hsotg->dedicated_fifos)
dwc2_hsotg_en_gsint(hsotg,
periodic ? GINTSTS_PTXFEMP :
GINTSTS_NPTXFEMP);
}
dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",
to_write, hs_req->req.length, can_write, buf_pos);
if (to_write <= 0)
return -ENOSPC;
hs_req->req.actual = buf_pos + to_write;
hs_ep->total_data += to_write;
if (periodic)
hs_ep->fifo_load += to_write;
to_write = DIV_ROUND_UP(to_write, 4);
data = hs_req->req.buf + buf_pos;
dwc2_writel_rep(hsotg, EPFIFO(hs_ep->index), data, to_write);
return (to_write >= can_write) ? -ENOSPC : 0;
}
/**
* get_ep_limit - get the maximum data legnth for this endpoint
* @hs_ep: The endpoint
*
* Return the maximum data that can be queued in one go on a given endpoint
* so that transfers that are too long can be split.
*/
static unsigned int get_ep_limit(struct dwc2_hsotg_ep *hs_ep)
{
int index = hs_ep->index;
unsigned int maxsize;
unsigned int maxpkt;
if (index != 0) {
maxsize = DXEPTSIZ_XFERSIZE_LIMIT + 1;
maxpkt = DXEPTSIZ_PKTCNT_LIMIT + 1;
} else {
maxsize = 64 + 64;
if (hs_ep->dir_in)
maxpkt = DIEPTSIZ0_PKTCNT_LIMIT + 1;
else
maxpkt = 2;
}
/* we made the constant loading easier above by using +1 */
maxpkt--;
maxsize--;
/*
* constrain by packet count if maxpkts*pktsize is greater
* than the length register size.
*/
if ((maxpkt * hs_ep->ep.maxpacket) < maxsize)
maxsize = maxpkt * hs_ep->ep.maxpacket;
return maxsize;
}
/**
* dwc2_hsotg_read_frameno - read current frame number
* @hsotg: The device instance
*
* Return the current frame number
*/
static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
{
u32 dsts;
dsts = dwc2_readl(hsotg, DSTS);
dsts &= DSTS_SOFFN_MASK;
dsts >>= DSTS_SOFFN_SHIFT;
return dsts;
}
/**
* dwc2_gadget_get_chain_limit - get the maximum data payload value of the
* DMA descriptor chain prepared for specific endpoint
* @hs_ep: The endpoint
*
* Return the maximum data that can be queued in one go on a given endpoint
* depending on its descriptor chain capacity so that transfers that
* are too long can be split.
*/
static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
{
const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
int is_isoc = hs_ep->isochronous;
unsigned int maxsize;
u32 mps = hs_ep->ep.maxpacket;
int dir_in = hs_ep->dir_in;
if (is_isoc)
maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
DEV_DMA_ISOC_RX_NBYTES_LIMIT) *
MAX_DMA_DESC_NUM_HS_ISOC;
else
maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC;
/* Interrupt OUT EP with mps not multiple of 4 */
if (hs_ep->index)
if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
maxsize = mps * MAX_DMA_DESC_NUM_GENERIC;
return maxsize;
}
/*
* dwc2_gadget_get_desc_params - get DMA descriptor parameters.
* @hs_ep: The endpoint
* @mask: RX/TX bytes mask to be defined
*
* Returns maximum data payload for one descriptor after analyzing endpoint
* characteristics.
* DMA descriptor transfer bytes limit depends on EP type:
* Control out - MPS,
* Isochronous - descriptor rx/tx bytes bitfield limit,
* Control In/Bulk/Interrupt - multiple of mps. This will allow to not
* have concatenations from various descriptors within one packet.
* Interrupt OUT - if mps not multiple of 4 then a single packet corresponds
* to a single descriptor.
*
* Selects corresponding mask for RX/TX bytes as well.
*/
static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
{
const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
u32 mps = hs_ep->ep.maxpacket;
int dir_in = hs_ep->dir_in;
u32 desc_size = 0;
if (!hs_ep->index && !dir_in) {
desc_size = mps;
*mask = DEV_DMA_NBYTES_MASK;
} else if (hs_ep->isochronous) {
if (dir_in) {
desc_size = DEV_DMA_ISOC_TX_NBYTES_LIMIT;
*mask = DEV_DMA_ISOC_TX_NBYTES_MASK;
} else {
desc_size = DEV_DMA_ISOC_RX_NBYTES_LIMIT;
*mask = DEV_DMA_ISOC_RX_NBYTES_MASK;
}
} else {
desc_size = DEV_DMA_NBYTES_LIMIT;
*mask = DEV_DMA_NBYTES_MASK;
/* Round down desc_size to be mps multiple */
desc_size -= desc_size % mps;
}
/* Interrupt OUT EP with mps not multiple of 4 */
if (hs_ep->index)
if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) {
desc_size = mps;
*mask = DEV_DMA_NBYTES_MASK;
}
return desc_size;
}
static void dwc2_gadget_fill_nonisoc_xfer_ddma_one(struct dwc2_hsotg_ep *hs_ep,
struct dwc2_dma_desc **desc,
dma_addr_t dma_buff,
unsigned int len,
bool true_last)
{
int dir_in = hs_ep->dir_in;
u32 mps = hs_ep->ep.maxpacket;
u32 maxsize = 0;
u32 offset = 0;
u32 mask = 0;
int i;
maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
hs_ep->desc_count = (len / maxsize) +
((len % maxsize) ? 1 : 0);
if (len == 0)
hs_ep->desc_count = 1;
for (i = 0; i < hs_ep->desc_count; ++i) {
(*desc)->status = 0;
(*desc)->status |= (DEV_DMA_BUFF_STS_HBUSY
<< DEV_DMA_BUFF_STS_SHIFT);
if (len > maxsize) {
if (!hs_ep->index && !dir_in)
(*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC);
(*desc)->status |=
maxsize << DEV_DMA_NBYTES_SHIFT & mask;
(*desc)->buf = dma_buff + offset;
len -= maxsize;
offset += maxsize;
} else {
if (true_last)
(*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC);
if (dir_in)
(*desc)->status |= (len % mps) ? DEV_DMA_SHORT :
((hs_ep->send_zlp && true_last) ?
DEV_DMA_SHORT : 0);
(*desc)->status |=
len << DEV_DMA_NBYTES_SHIFT & mask;
(*desc)->buf = dma_buff + offset;
}
(*desc)->status &= ~DEV_DMA_BUFF_STS_MASK;
(*desc)->status |= (DEV_DMA_BUFF_STS_HREADY
<< DEV_DMA_BUFF_STS_SHIFT);
(*desc)++;
}
}
/*
* dwc2_gadget_config_nonisoc_xfer_ddma - prepare non ISOC DMA desc chain.
* @hs_ep: The endpoint
* @ureq: Request to transfer
* @offset: offset in bytes
* @len: Length of the transfer
*
* This function will iterate over descriptor chain and fill its entries
* with corresponding information based on transfer data.
*/
static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep,
dma_addr_t dma_buff,
unsigned int len)
{
struct usb_request *ureq = NULL;
struct dwc2_dma_desc *desc = hs_ep->desc_list;
struct scatterlist *sg;
int i;
u8 desc_count = 0;
if (hs_ep->req)
ureq = &hs_ep->req->req;
/* non-DMA sg buffer */
if (!ureq || !ureq->num_sgs) {
dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc,
dma_buff, len, true);
return;
}
/* DMA sg buffer */
for_each_sg(ureq->sg, sg, ureq->num_sgs, i) {
dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc,
sg_dma_address(sg) + sg->offset, sg_dma_len(sg),
sg_is_last(sg));
desc_count += hs_ep->desc_count;
}
hs_ep->desc_count = desc_count;
}
/*
* dwc2_gadget_fill_isoc_desc - fills next isochronous descriptor in chain.
* @hs_ep: The isochronous endpoint.
* @dma_buff: usb requests dma buffer.
* @len: usb request transfer length.
*
* Fills next free descriptor with the data of the arrived usb request,
* frame info, sets Last and IOC bits increments next_desc. If filled
* descriptor is not the first one, removes L bit from the previous descriptor
* status.
*/
static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
dma_addr_t dma_buff, unsigned int len)
{
struct dwc2_dma_desc *desc;
struct dwc2_hsotg *hsotg = hs_ep->parent;
u32 index;
u32 mask = 0;
u8 pid = 0;
dwc2_gadget_get_desc_params(hs_ep, &mask);
index = hs_ep->next_desc;
desc = &hs_ep->desc_list[index];
/* Check if descriptor chain full */
if ((desc->status >> DEV_DMA_BUFF_STS_SHIFT) ==
DEV_DMA_BUFF_STS_HREADY) {
dev_dbg(hsotg->dev, "%s: desc chain full\n", __func__);
return 1;
}
/* Clear L bit of previous desc if more than one entries in the chain */
if (hs_ep->next_desc)
hs_ep->desc_list[index - 1].status &= ~DEV_DMA_L;
dev_dbg(hsotg->dev, "%s: Filling ep %d, dir %s isoc desc # %d\n",
__func__, hs_ep->index, hs_ep->dir_in ? "in" : "out", index);
desc->status = 0;
desc->status |= (DEV_DMA_BUFF_STS_HBUSY << DEV_DMA_BUFF_STS_SHIFT);
desc->buf = dma_buff;
desc->status |= (DEV_DMA_L | DEV_DMA_IOC |
((len << DEV_DMA_NBYTES_SHIFT) & mask));
if (hs_ep->dir_in) {
if (len)
pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket);
else
pid = 1;
desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) &
DEV_DMA_ISOC_PID_MASK) |
((len % hs_ep->ep.maxpacket) ?
DEV_DMA_SHORT : 0) |
((hs_ep->target_frame <<
DEV_DMA_ISOC_FRNUM_SHIFT) &
DEV_DMA_ISOC_FRNUM_MASK);
}
desc->status &= ~DEV_DMA_BUFF_STS_MASK;
desc->status |= (DEV_DMA_BUFF_STS_HREADY << DEV_DMA_BUFF_STS_SHIFT);
/* Increment frame number by interval for IN */
if (hs_ep->dir_in)
dwc2_gadget_incr_frame_num(hs_ep);
/* Update index of last configured entry in the chain */
hs_ep->next_desc++;
if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_HS_ISOC)
hs_ep->next_desc = 0;
return 0;
}
/*
* dwc2_gadget_start_isoc_ddma - start isochronous transfer in DDMA
* @hs_ep: The isochronous endpoint.
*
* Prepare descriptor chain for isochronous endpoints. Afterwards
* write DMA address to HW and enable the endpoint.
*/
static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
{
struct dwc2_hsotg *hsotg = hs_ep->parent;
struct dwc2_hsotg_req *hs_req, *treq;
int index = hs_ep->index;
int ret;
int i;
u32 dma_reg;
u32 depctl;
u32 ctrl;
struct dwc2_dma_desc *desc;
if (list_empty(&hs_ep->queue)) {
hs_ep->target_frame = TARGET_FRAME_INITIAL;
dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__);
return;
}
/* Initialize descriptor chain by Host Busy status */
for (i = 0; i < MAX_DMA_DESC_NUM_HS_ISOC; i++) {
desc = &hs_ep->desc_list[i];
desc->status = 0;
desc->status |= (DEV_DMA_BUFF_STS_HBUSY
<< DEV_DMA_BUFF_STS_SHIFT);
}
hs_ep->next_desc = 0;
list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) {
dma_addr_t dma_addr = hs_req->req.dma;
if (hs_req->req.num_sgs) {
WARN_ON(hs_req->req.num_sgs > 1);
dma_addr = sg_dma_address(hs_req->req.sg);
}
ret = dwc2_gadget_fill_isoc_desc(hs_ep, dma_addr,
hs_req->req.length);
if (ret)
break;
}
hs_ep->compl_desc = 0;
depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index);
/* write descriptor chain address to control register */
dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg);
ctrl = dwc2_readl(hsotg, depctl);
ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK;
dwc2_writel(hsotg, ctrl, depctl);
}
static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep);
static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *hs_ep,
struct dwc2_hsotg_req *hs_req,
int result);
/**
* dwc2_hsotg_start_req - start a USB request from an endpoint's queue
* @hsotg: The controller state.
* @hs_ep: The endpoint to process a request for
* @hs_req: The request to start.
* @continuing: True if we are doing more for the current request.
*
* Start the given request running by setting the endpoint registers
* appropriately, and writing any data to the FIFOs.
*/
static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *hs_ep,
struct dwc2_hsotg_req *hs_req,
bool continuing)
{
struct usb_request *ureq = &hs_req->req;
int index = hs_ep->index;
int dir_in = hs_ep->dir_in;
u32 epctrl_reg;
u32 epsize_reg;
u32 epsize;
u32 ctrl;
unsigned int length;
unsigned int packets;
unsigned int maxreq;
unsigned int dma_reg;
if (index != 0) {
if (hs_ep->req && !continuing) {
dev_err(hsotg->dev, "%s: active request\n", __func__);
WARN_ON(1);
return;
} else if (hs_ep->req != hs_req && continuing) {
dev_err(hsotg->dev,
"%s: continue different req\n", __func__);
WARN_ON(1);
return;
}
}
dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
__func__, dwc2_readl(hsotg, epctrl_reg), index,
hs_ep->dir_in ? "in" : "out");
/* If endpoint is stalled, we will restart request later */
ctrl = dwc2_readl(hsotg, epctrl_reg);
if (index && ctrl & DXEPCTL_STALL) {
dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);
return;
}
length = ureq->length - ureq->actual;
dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n",
ureq->length, ureq->actual);
if (!using_desc_dma(hsotg))
maxreq = get_ep_limit(hs_ep);
else
maxreq = dwc2_gadget_get_chain_limit(hs_ep);
if (length > maxreq) {
int round = maxreq % hs_ep->ep.maxpacket;
dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n",
__func__, length, maxreq, round);
/* round down to multiple of packets */
if (round)
maxreq -= round;
length = maxreq;
}
if (length)
packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket);
else
packets = 1; /* send one packet if length is zero. */
if (dir_in && index != 0)
if (hs_ep->isochronous)
epsize = DXEPTSIZ_MC(packets);
else
epsize = DXEPTSIZ_MC(1);
else
epsize = 0;
/*
* zero length packet should be programmed on its own and should not
* be counted in DIEPTSIZ.PktCnt with other packets.
*/
if (dir_in && ureq->zero && !continuing) {
/* Test if zlp is actually required. */
if ((ureq->length >= hs_ep->ep.maxpacket) &&
!(ureq->length % hs_ep->ep.maxpacket))
hs_ep->send_zlp = 1;
}
epsize |= DXEPTSIZ_PKTCNT(packets);
epsize |= DXEPTSIZ_XFERSIZE(length);
dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",
__func__, packets, length, ureq->length, epsize, epsize_reg);
/* store the request as the current one we're doing */
hs_ep->req = hs_req;
if (using_desc_dma(hsotg)) {
u32 offset = 0;
u32 mps = hs_ep->ep.maxpacket;
/* Adjust length: EP0 - MPS, other OUT EPs - multiple of MPS */
if (!dir_in) {
if (!index)
length = mps;
else if (length % mps)
length += (mps - (length % mps));
}
if (continuing)
offset = ureq->actual;
/* Fill DDMA chain entries */
dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq->dma + offset,
length);
/* write descriptor chain address to control register */
dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg);
dev_dbg(hsotg->dev, "%s: %08x pad => 0x%08x\n",
__func__, (u32)hs_ep->desc_list_dma, dma_reg);
} else {
/* write size / packets */
dwc2_writel(hsotg, epsize, epsize_reg);
if (using_dma(hsotg) && !continuing && (length != 0)) {
/*
* write DMA address to control register, buffer
* already synced by dwc2_hsotg_ep_queue().
*/
dwc2_writel(hsotg, ureq->dma, dma_reg);
dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n",
__func__, &ureq->dma, dma_reg);
}
}
if (hs_ep->isochronous) {
if (!dwc2_gadget_target_frame_elapsed(hs_ep)) {
if (hs_ep->interval == 1) {
if (hs_ep->target_frame & 0x1)
ctrl |= DXEPCTL_SETODDFR;
else
ctrl |= DXEPCTL_SETEVENFR;
}
ctrl |= DXEPCTL_CNAK;
} else {
hs_req->req.frame_number = hs_ep->target_frame;
hs_req->req.actual = 0;
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
return;
}
}
ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
dev_dbg(hsotg->dev, "ep0 state:%d\n", hsotg->ep0_state);
/* For Setup request do not clear NAK */
if (!(index == 0 && hsotg->ep0_state == DWC2_EP0_SETUP))
ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */
dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
dwc2_writel(hsotg, ctrl, epctrl_reg);
/*
* set these, it seems that DMA support increments past the end
* of the packet buffer so we need to calculate the length from
* this information.
*/
hs_ep->size_loaded = length;
hs_ep->last_load = ureq->actual;
if (dir_in && !using_dma(hsotg)) {
/* set these anyway, we may need them for non-periodic in */
hs_ep->fifo_load = 0;
dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
}
/*
* Note, trying to clear the NAK here causes problems with transmit
* on the S3C6400 ending up with the TXFIFO becoming full.
*/
/* check ep is enabled */
if (!(dwc2_readl(hsotg, epctrl_reg) & DXEPCTL_EPENA))
dev_dbg(hsotg->dev,
"ep%d: failed to become enabled (DXEPCTL=0x%08x)?\n",
index, dwc2_readl(hsotg, epctrl_reg));
dev_dbg(hsotg->dev, "%s: DXEPCTL=0x%08x\n",
__func__, dwc2_readl(hsotg, epctrl_reg));
/* enable ep interrupts */
dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 1);
}
/**
* dwc2_hsotg_map_dma - map the DMA memory being used for the request
* @hsotg: The device state.
* @hs_ep: The endpoint the request is on.
* @req: The request being processed.
*
* We've been asked to queue a request, so ensure that the memory buffer
* is correctly setup for DMA. If we've been passed an extant DMA address
* then ensure the buffer has been synced to memory. If our buffer has no
* DMA memory, then we map the memory and mark our request to allow us to
* cleanup on completion.
*/
static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *hs_ep,
struct usb_request *req)
{
int ret;
hs_ep->map_dir = hs_ep->dir_in;
ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
if (ret)
goto dma_error;
return 0;
dma_error:
dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n",
__func__, req->buf, req->length);
return -EIO;
}
static int dwc2_hsotg_handle_unaligned_buf_start(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *hs_ep,
struct dwc2_hsotg_req *hs_req)
{
void *req_buf = hs_req->req.buf;
/* If dma is not being used or buffer is aligned */
if (!using_dma(hsotg) || !((long)req_buf & 3))
return 0;
WARN_ON(hs_req->saved_req_buf);
dev_dbg(hsotg->dev, "%s: %s: buf=%p length=%d\n", __func__,
hs_ep->ep.name, req_buf, hs_req->req.length);
hs_req->req.buf = kmalloc(hs_req->req.length, GFP_ATOMIC);
if (!hs_req->req.buf) {
hs_req->req.buf = req_buf;
dev_err(hsotg->dev,
"%s: unable to allocate memory for bounce buffer\n",
__func__);
return -ENOMEM;
}
/* Save actual buffer */
hs_req->saved_req_buf = req_buf;
if (hs_ep->dir_in)
memcpy(hs_req->req.buf, req_buf, hs_req->req.length);
return 0;
}
static void
dwc2_hsotg_handle_unaligned_buf_complete(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *hs_ep,
struct dwc2_hsotg_req *hs_req)
{
/* If dma is not being used or buffer was aligned */
if (!using_dma(hsotg) || !hs_req->saved_req_buf)
return;
dev_dbg(hsotg->dev, "%s: %s: status=%d actual-length=%d\n", __func__,
hs_ep->ep.name, hs_req->req.status, hs_req->req.actual);
/* Copy data from bounce buffer on successful out transfer */
if (!hs_ep->dir_in && !hs_req->req.status)
memcpy(hs_req->saved_req_buf, hs_req->req.buf,
hs_req->req.actual);
/* Free bounce buffer */
kfree(hs_req->req.buf);
hs_req->req.buf = hs_req->saved_req_buf;
hs_req->saved_req_buf = NULL;
}
/**
* dwc2_gadget_target_frame_elapsed - Checks target frame
* @hs_ep: The driver endpoint to check
*
* Returns 1 if targeted frame elapsed. If returned 1 then we need to drop
* corresponding transfer.
*/
static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
{
struct dwc2_hsotg *hsotg = hs_ep->parent;
u32 target_frame = hs_ep->target_frame;
u32 current_frame = hsotg->frame_number;
bool frame_overrun = hs_ep->frame_overrun;
u16 limit = DSTS_SOFFN_LIMIT;
if (hsotg->gadget.speed != USB_SPEED_HIGH)
limit >>= 3;
if (!frame_overrun && current_frame >= target_frame)
return true;
if (frame_overrun && current_frame >= target_frame &&
((current_frame - target_frame) < limit / 2))
return true;
return false;
}
/*
* dwc2_gadget_set_ep0_desc_chain - Set EP's desc chain pointers
* @hsotg: The driver state
* @hs_ep: the ep descriptor chain is for
*
* Called to update EP0 structure's pointers depend on stage of
* control transfer.
*/
static int dwc2_gadget_set_ep0_desc_chain(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *hs_ep)
{
switch (hsotg->ep0_state) {
case DWC2_EP0_SETUP:
case DWC2_EP0_STATUS_OUT:
hs_ep->desc_list = hsotg->setup_desc[0];
hs_ep->desc_list_dma = hsotg->setup_desc_dma[0];
break;
case DWC2_EP0_DATA_IN:
case DWC2_EP0_STATUS_IN:
hs_ep->desc_list = hsotg->ctrl_in_desc;
hs_ep->desc_list_dma = hsotg->ctrl_in_desc_dma;
break;
case DWC2_EP0_DATA_OUT:
hs_ep->desc_list = hsotg->ctrl_out_desc;
hs_ep->desc_list_dma = hsotg->ctrl_out_desc_dma;
break;
default:
dev_err(hsotg->dev, "invalid EP 0 state in queue %d\n",
hsotg->ep0_state);
return -EINVAL;
}
return 0;
}
static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
gfp_t gfp_flags)
{
struct dwc2_hsotg_req *hs_req = our_req(req);
struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
struct dwc2_hsotg *hs = hs_ep->parent;
bool first;
int ret;
u32 maxsize = 0;
u32 mask = 0;
dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
ep->name, req, req->length, req->buf, req->no_interrupt,
req->zero, req->short_not_ok);
/* Prevent new request submission when controller is suspended */
if (hs->lx_state != DWC2_L0) {
dev_dbg(hs->dev, "%s: submit request only in active state\n",
__func__);
return -EAGAIN;
}
/* initialise status of the request */
INIT_LIST_HEAD(&hs_req->queue);
req->actual = 0;
req->status = -EINPROGRESS;
/* Don't queue ISOC request if length greater than mps*mc */
if (hs_ep->isochronous &&
req->length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
dev_err(hs->dev, "req length > maxpacket*mc\n");
return -EINVAL;
}
/* In DDMA mode for ISOC's don't queue request if length greater
* than descriptor limits.
*/
if (using_desc_dma(hs) && hs_ep->isochronous) {
maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
if (hs_ep->dir_in && req->length > maxsize) {
dev_err(hs->dev, "wrong length %d (maxsize=%d)\n",
req->length, maxsize);
return -EINVAL;
}
if (!hs_ep->dir_in && req->length > hs_ep->ep.maxpacket) {
dev_err(hs->dev, "ISOC OUT: wrong length %d (mps=%d)\n",
req->length, hs_ep->ep.maxpacket);
return -EINVAL;
}
}
ret = dwc2_hsotg_handle_unaligned_buf_start(hs, hs_ep, hs_req);
if (ret)
return ret;
/* if we're using DMA, sync the buffers as necessary */
if (using_dma(hs)) {
ret = dwc2_hsotg_map_dma(hs, hs_ep, req);
if (ret)
return ret;
}
/* If using descriptor DMA configure EP0 descriptor chain pointers */
if (using_desc_dma(hs) && !hs_ep->index) {
ret = dwc2_gadget_set_ep0_desc_chain(hs, hs_ep);
if (ret)
return ret;
}
first = list_empty(&hs_ep->queue);
list_add_tail(&hs_req->queue, &hs_ep->queue);
/*
* Handle DDMA isochronous transfers separately - just add new entry
* to the descriptor chain.
* Transfer will be started once SW gets either one of NAK or
* OutTknEpDis interrupts.
*/
if (using_desc_dma(hs) && hs_ep->isochronous) {
if (hs_ep->target_frame != TARGET_FRAME_INITIAL) {
dma_addr_t dma_addr = hs_req->req.dma;
if (hs_req->req.num_sgs) {
WARN_ON(hs_req->req.num_sgs > 1);
dma_addr = sg_dma_address(hs_req->req.sg);
}
dwc2_gadget_fill_isoc_desc(hs_ep, dma_addr,
hs_req->req.length);
}
return 0;
}
/* Change EP direction if status phase request is after data out */
if (!hs_ep->index && !req->length && !hs_ep->dir_in &&
hs->ep0_state == DWC2_EP0_DATA_OUT)
hs_ep->dir_in = 1;
if (first) {
if (!hs_ep->isochronous) {
dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
return 0;
}
/* Update current frame number value. */
hs->frame_number = dwc2_hsotg_read_frameno(hs);
while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
dwc2_gadget_incr_frame_num(hs_ep);
/* Update current frame number value once more as it
* changes here.
*/
hs->frame_number = dwc2_hsotg_read_frameno(hs);
}
if (hs_ep->target_frame != TARGET_FRAME_INITIAL)
dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
}
return 0;
}
static int dwc2_hsotg_ep_queue_lock(struct usb_ep *ep, struct usb_request *req,
gfp_t gfp_flags)
{
struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
struct dwc2_hsotg *hs = hs_ep->parent;
unsigned long flags;
int ret;
spin_lock_irqsave(&hs->lock, flags);
ret = dwc2_hsotg_ep_queue(ep, req, gfp_flags);
spin_unlock_irqrestore(&hs->lock, flags);
return ret;
}
static void dwc2_hsotg_ep_free_request(struct usb_ep *ep,
struct usb_request *req)
{
struct dwc2_hsotg_req *hs_req = our_req(req);
kfree(hs_req);
}
/**
* dwc2_hsotg_complete_oursetup - setup completion callback
* @ep: The endpoint the request was on.
* @req: The request completed.
*
* Called on completion of any requests the driver itself
* submitted that need cleaning up.
*/
static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep,
struct usb_request *req)
{
struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
struct dwc2_hsotg *hsotg = hs_ep->parent;
dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
dwc2_hsotg_ep_free_request(ep, req);
}
/**
* ep_from_windex - convert control wIndex value to endpoint
* @hsotg: The driver state.
* @windex: The control request wIndex field (in host order).
*
* Convert the given wIndex into a pointer to an driver endpoint
* structure, or return NULL if it is not a valid endpoint.
*/
static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
u32 windex)
{
int dir = (windex & USB_DIR_IN) ? 1 : 0;
int idx = windex & 0x7F;
if (windex >= 0x100)
return NULL;
if (idx > hsotg->num_of_eps)
return NULL;
return index_to_ep(hsotg, idx, dir);
}
/**
* dwc2_hsotg_set_test_mode - Enable usb Test Modes
* @hsotg: The driver state.
* @testmode: requested usb test mode
* Enable usb Test Mode requested by the Host.
*/
int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode)
{
int dctl = dwc2_readl(hsotg, DCTL);
dctl &= ~DCTL_TSTCTL_MASK;
switch (testmode) {
case USB_TEST_J:
case USB_TEST_K:
case USB_TEST_SE0_NAK:
case USB_TEST_PACKET:
case USB_TEST_FORCE_ENABLE:
dctl |= testmode << DCTL_TSTCTL_SHIFT;
break;
default:
return -EINVAL;
}
dwc2_writel(hsotg, dctl, DCTL);
return 0;
}
/**
* dwc2_hsotg_send_reply - send reply to control request
* @hsotg: The device state
* @ep: Endpoint 0
* @buff: Buffer for request
* @length: Length of reply.
*
* Create a request and queue it on the given endpoint. This is useful as
* an internal method of sending replies to certain control requests, etc.
*/
static int dwc2_hsotg_send_reply(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *ep,
void *buff,
int length)
{
struct usb_request *req;
int ret;
dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length);
req = dwc2_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);
hsotg->ep0_reply = req;
if (!req) {
dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__);
return -ENOMEM;
}
req->buf = hsotg->ep0_buff;
req->length = length;
/*
* zero flag is for sending zlp in DATA IN stage. It has no impact on
* STATUS stage.
*/
req->zero = 0;
req->complete = dwc2_hsotg_complete_oursetup;
if (length)
memcpy(req->buf, buff, length);
ret = dwc2_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
if (ret) {
dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__);
return ret;
}
return 0;
}
/**
* dwc2_hsotg_process_req_status - process request GET_STATUS
* @hsotg: The device state
* @ctrl: USB control request
*/
static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
struct usb_ctrlrequest *ctrl)
{
struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
struct dwc2_hsotg_ep *ep;
__le16 reply;
u16 status;
int ret;
dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
if (!ep0->dir_in) {
dev_warn(hsotg->dev, "%s: direction out?\n", __func__);
return -EINVAL;
}
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
status = hsotg->gadget.is_selfpowered <<
USB_DEVICE_SELF_POWERED;
status |= hsotg->remote_wakeup_allowed <<
USB_DEVICE_REMOTE_WAKEUP;
reply = cpu_to_le16(status);
break;
case USB_RECIP_INTERFACE:
/* currently, the data result should be zero */
reply = cpu_to_le16(0);
break;
case USB_RECIP_ENDPOINT:
ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
if (!ep)
return -ENOENT;
reply = cpu_to_le16(ep->halted ? 1 : 0);
break;
default:
return 0;
}
if (le16_to_cpu(ctrl->wLength) != 2)
return -EINVAL;
ret = dwc2_hsotg_send_reply(hsotg, ep0, &reply, 2);
if (ret) {
dev_err(hsotg->dev, "%s: failed to send reply\n", __func__);
return ret;
}
return 1;
}
static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now);
/**
* get_ep_head - return the first request on the endpoint
* @hs_ep: The controller endpoint to get
*
* Get the first request on the endpoint.
*/
static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
{
return list_first_entry_or_null(&hs_ep->queue, struct dwc2_hsotg_req,
queue);
}
/**
* dwc2_gadget_start_next_request - Starts next request from ep queue
* @hs_ep: Endpoint structure
*
* If queue is empty and EP is ISOC-OUT - unmasks OUTTKNEPDIS which is masked
* in its handler. Hence we need to unmask it here to be able to do
* resynchronization.
*/
static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
{
struct dwc2_hsotg *hsotg = hs_ep->parent;
int dir_in = hs_ep->dir_in;
struct dwc2_hsotg_req *hs_req;
if (!list_empty(&hs_ep->queue)) {
hs_req = get_ep_head(hs_ep);
dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, false);
return;
}
if (!hs_ep->isochronous)
return;
if (dir_in) {
dev_dbg(hsotg->dev, "%s: No more ISOC-IN requests\n",
__func__);
} else {
dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n",
__func__);
}
}
/**
* dwc2_hsotg_process_req_feature - process request {SET,CLEAR}_FEATURE
* @hsotg: The device state
* @ctrl: USB control request
*/
static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
struct usb_ctrlrequest *ctrl)
{
struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
struct dwc2_hsotg_req *hs_req;
bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
struct dwc2_hsotg_ep *ep;
int ret;
bool halted;
u32 recip;
u32 wValue;
u32 wIndex;
dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
__func__, set ? "SET" : "CLEAR");
wValue = le16_to_cpu(ctrl->wValue);
wIndex = le16_to_cpu(ctrl->wIndex);
recip = ctrl->bRequestType & USB_RECIP_MASK;
switch (recip) {
case USB_RECIP_DEVICE:
switch (wValue) {
case USB_DEVICE_REMOTE_WAKEUP:
if (set)
hsotg->remote_wakeup_allowed = 1;
else
hsotg->remote_wakeup_allowed = 0;
break;
case USB_DEVICE_TEST_MODE:
if ((wIndex & 0xff) != 0)
return -EINVAL;
if (!set)
return -EINVAL;
hsotg->test_mode = wIndex >> 8;
break;
default:
return -ENOENT;
}
ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
if (ret) {
dev_err(hsotg->dev,
"%s: failed to send reply\n", __func__);
return ret;
}
break;
case USB_RECIP_ENDPOINT:
ep = ep_from_windex(hsotg, wIndex);
if (!ep) {
dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n",
__func__, wIndex);
return -ENOENT;
}
switch (wValue) {
case USB_ENDPOINT_HALT:
halted = ep->halted;
if (!ep->wedged)
dwc2_hsotg_ep_sethalt(&ep->ep, set, true);
ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
if (ret) {
dev_err(hsotg->dev,
"%s: failed to send reply\n", __func__);
return ret;
}
/*
* we have to complete all requests for ep if it was
* halted, and the halt was cleared by CLEAR_FEATURE
*/
if (!set && halted) {
/*
* If we have request in progress,
* then complete it
*/
if (ep->req) {
hs_req = ep->req;
ep->req = NULL;
list_del_init(&hs_req->queue);
if (hs_req->req.complete) {
spin_unlock(&hsotg->lock);
usb_gadget_giveback_request(
&ep->ep, &hs_req->req);
spin_lock(&hsotg->lock);
}
}
/* If we have pending request, then start it */
if (!ep->req)
dwc2_gadget_start_next_request(ep);
}
break;
default:
return -ENOENT;
}
break;
default:
return -ENOENT;
}
return 1;
}
static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg);
/**
* dwc2_hsotg_stall_ep0 - stall ep0
* @hsotg: The device state
*
* Set stall for ep0 as response for setup request.
*/
static void dwc2_hsotg_stall_ep0(struct dwc2_hsotg *hsotg)
{
struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
u32 reg;
u32 ctrl;
dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0;
/*
* DxEPCTL_Stall will be cleared by EP once it has
* taken effect, so no need to clear later.
*/
ctrl = dwc2_readl(hsotg, reg);
ctrl |= DXEPCTL_STALL;
ctrl |= DXEPCTL_CNAK;
dwc2_writel(hsotg, ctrl, reg);
dev_dbg(hsotg->dev,
"written DXEPCTL=0x%08x to %08x (DXEPCTL=0x%08x)\n",
ctrl, reg, dwc2_readl(hsotg, reg));
/*
* complete won't be called, so we enqueue
* setup request here
*/
dwc2_hsotg_enqueue_setup(hsotg);
}
/**
* dwc2_hsotg_process_control - process a control request
* @hsotg: The device state
* @ctrl: The control request received
*
* The controller has received the SETUP phase of a control request, and
* needs to work out what to do next (and whether to pass it on to the
* gadget driver).
*/
static void dwc2_hsotg_process_control(struct dwc2_hsotg *hsotg,
struct usb_ctrlrequest *ctrl)
{
struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
int ret = 0;
u32 dcfg;
dev_dbg(hsotg->dev,
"ctrl Type=%02x, Req=%02x, V=%04x, I=%04x, L=%04x\n",
ctrl->bRequestType, ctrl->bRequest, ctrl->wValue,
ctrl->wIndex, ctrl->wLength);
if (ctrl->wLength == 0) {
ep0->dir_in = 1;
hsotg->ep0_state = DWC2_EP0_STATUS_IN;
} else if (ctrl->bRequestType & USB_DIR_IN) {
ep0->dir_in = 1;
hsotg->ep0_state = DWC2_EP0_DATA_IN;
} else {
ep0->dir_in = 0;
hsotg->ep0_state = DWC2_EP0_DATA_OUT;
}
if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
switch (ctrl->bRequest) {
case USB_REQ_SET_ADDRESS:
hsotg->connected = 1;
dcfg = dwc2_readl(hsotg, DCFG);
dcfg &= ~DCFG_DEVADDR_MASK;
dcfg |= (le16_to_cpu(ctrl->wValue) <<
DCFG_DEVADDR_SHIFT) & DCFG_DEVADDR_MASK;
dwc2_writel(hsotg, dcfg, DCFG);
dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
return;
case USB_REQ_GET_STATUS:
ret = dwc2_hsotg_process_req_status(hsotg, ctrl);
break;
case USB_REQ_CLEAR_FEATURE:
case USB_REQ_SET_FEATURE:
ret = dwc2_hsotg_process_req_feature(hsotg, ctrl);
break;
}
}
/* as a fallback, try delivering it to the driver to deal with */
if (ret == 0 && hsotg->driver) {
spin_unlock(&hsotg->lock);
ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
spin_lock(&hsotg->lock);
if (ret < 0)
dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
}
hsotg->delayed_status = false;
if (ret == USB_GADGET_DELAYED_STATUS)
hsotg->delayed_status = true;
/*
* the request is either unhandlable, or is not formatted correctly
* so respond with a STALL for the status stage to indicate failure.
*/
if (ret < 0)
dwc2_hsotg_stall_ep0(hsotg);
}
/**
* dwc2_hsotg_complete_setup - completion of a setup transfer
* @ep: The endpoint the request was on.
* @req: The request completed.
*
* Called on completion of any requests the driver itself submitted for
* EP0 setup packets
*/
static void dwc2_hsotg_complete_setup(struct usb_ep *ep,
struct usb_request *req)
{
struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
struct dwc2_hsotg *hsotg = hs_ep->parent;
if (req->status < 0) {
dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);
return;
}
spin_lock(&hsotg->lock);
if (req->actual == 0)
dwc2_hsotg_enqueue_setup(hsotg);
else
dwc2_hsotg_process_control(hsotg, req->buf);
spin_unlock(&hsotg->lock);
}
/**
* dwc2_hsotg_enqueue_setup - start a request for EP0 packets
* @hsotg: The device state.
*
* Enqueue a request on EP0 if necessary to received any SETUP packets
* received from the host.
*/
static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg)
{
struct usb_request *req = hsotg->ctrl_req;
struct dwc2_hsotg_req *hs_req = our_req(req);
int ret;
dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__);
req->zero = 0;
req->length = 8;
req->buf = hsotg->ctrl_buff;
req->complete = dwc2_hsotg_complete_setup;
if (!list_empty(&hs_req->queue)) {
dev_dbg(hsotg->dev, "%s already queued???\n", __func__);
return;
}
hsotg->eps_out[0]->dir_in = 0;
hsotg->eps_out[0]->send_zlp = 0;
hsotg->ep0_state = DWC2_EP0_SETUP;
ret = dwc2_hsotg_ep_queue(&hsotg->eps_out[0]->ep, req, GFP_ATOMIC);
if (ret < 0) {
dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);
/*
* Don't think there's much we can do other than watch the
* driver fail.
*/
}
}
static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *hs_ep)
{
u32 ctrl;
u8 index = hs_ep->index;
u32 epctl_reg = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
u32 epsiz_reg = hs_ep->dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
if (hs_ep->dir_in)
dev_dbg(hsotg->dev, "Sending zero-length packet on ep%d\n",
index);
else
dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n",
index);
if (using_desc_dma(hsotg)) {
/* Not specific buffer needed for ep0 ZLP */
dma_addr_t dma = hs_ep->desc_list_dma;
if (!index)
dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep);
dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0);
} else {
dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
DXEPTSIZ_XFERSIZE(0),
epsiz_reg);
}
ctrl = dwc2_readl(hsotg, epctl_reg);
ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */
ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
ctrl |= DXEPCTL_USBACTEP;
dwc2_writel(hsotg, ctrl, epctl_reg);
}
/**
* dwc2_hsotg_complete_request - complete a request given to us
* @hsotg: The device state.
* @hs_ep: The endpoint the request was on.
* @hs_req: The request to complete.
* @result: The result code (0 => Ok, otherwise errno)
*
* The given request has finished, so call the necessary completion
* if it has one and then look to see if we can start a new request
* on the endpoint.
*
* Note, expects the ep to already be locked as appropriate.
*/
static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *hs_ep,
struct dwc2_hsotg_req *hs_req,
int result)
{
if (!hs_req) {
dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
return;
}
dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n",
hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
/*
* only replace the status if we've not already set an error
* from a previous transaction
*/
if (hs_req->req.status == -EINPROGRESS)
hs_req->req.status = result;
if (using_dma(hsotg))
dwc2_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
dwc2_hsotg_handle_unaligned_buf_complete(hsotg, hs_ep, hs_req);
hs_ep->req = NULL;
list_del_init(&hs_req->queue);
/*
* call the complete request with the locks off, just in case the
* request tries to queue more work for this endpoint.
*/
if (hs_req->req.complete) {
spin_unlock(&hsotg->lock);
usb_gadget_giveback_request(&hs_ep->ep, &hs_req->req);
spin_lock(&hsotg->lock);
}
/* In DDMA don't need to proceed to starting of next ISOC request */
if (using_desc_dma(hsotg) && hs_ep->isochronous)
return;
/*
* Look to see if there is anything else to do. Note, the completion
* of the previous request may have caused a new request to be started
* so be careful when doing this.
*/
if (!hs_ep->req && result >= 0)
dwc2_gadget_start_next_request(hs_ep);
}
/*
* dwc2_gadget_complete_isoc_request_ddma - complete an isoc request in DDMA
* @hs_ep: The endpoint the request was on.
*
* Get first request from the ep queue, determine descriptor on which complete
* happened. SW discovers which descriptor currently in use by HW, adjusts
* dma_address and calculates index of completed descriptor based on the value
* of DEPDMA register. Update actual length of request, giveback to gadget.
*/
static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep)
{
struct dwc2_hsotg *hsotg = hs_ep->parent;
struct dwc2_hsotg_req *hs_req;
struct usb_request *ureq;
u32 desc_sts;
u32 mask;
desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
/* Process only descriptors with buffer status set to DMA done */
while ((desc_sts & DEV_DMA_BUFF_STS_MASK) >>
DEV_DMA_BUFF_STS_SHIFT == DEV_DMA_BUFF_STS_DMADONE) {
hs_req = get_ep_head(hs_ep);
if (!hs_req) {
dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__);
return;
}
ureq = &hs_req->req;
/* Check completion status */
if ((desc_sts & DEV_DMA_STS_MASK) >> DEV_DMA_STS_SHIFT ==
DEV_DMA_STS_SUCC) {
mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK :
DEV_DMA_ISOC_RX_NBYTES_MASK;
ureq->actual = ureq->length - ((desc_sts & mask) >>
DEV_DMA_ISOC_NBYTES_SHIFT);
/* Adjust actual len for ISOC Out if len is
* not align of 4
*/
if (!hs_ep->dir_in && ureq->length & 0x3)
ureq->actual += 4 - (ureq->length & 0x3);
/* Set actual frame number for completed transfers */
ureq->frame_number =
(desc_sts & DEV_DMA_ISOC_FRNUM_MASK) >>
DEV_DMA_ISOC_FRNUM_SHIFT;
}
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
hs_ep->compl_desc++;
if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_HS_ISOC - 1))
hs_ep->compl_desc = 0;
desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
}
}
/*
* dwc2_gadget_handle_isoc_bna - handle BNA interrupt for ISOC.
* @hs_ep: The isochronous endpoint.
*
* If EP ISOC OUT then need to flush RX FIFO to remove source of BNA
* interrupt. Reset target frame and next_desc to allow to start
* ISOC's on NAK interrupt for IN direction or on OUTTKNEPDIS
* interrupt for OUT direction.
*/
static void dwc2_gadget_handle_isoc_bna(struct dwc2_hsotg_ep *hs_ep)
{
struct dwc2_hsotg *hsotg = hs_ep->parent;
if (!hs_ep->dir_in)
dwc2_flush_rx_fifo(hsotg);
dwc2_hsotg_complete_request(hsotg, hs_ep, get_ep_head(hs_ep), 0);
hs_ep->target_frame = TARGET_FRAME_INITIAL;
hs_ep->next_desc = 0;
hs_ep->compl_desc = 0;
}
/**
* dwc2_hsotg_rx_data - receive data from the FIFO for an endpoint
* @hsotg: The device state.
* @ep_idx: The endpoint index for the data
* @size: The size of data in the fifo, in bytes
*
* The FIFO status shows there is data to read from the FIFO for a given
* endpoint, so sort out whether we need to read the data into a request
* that has been made for that endpoint.
*/
static void dwc2_hsotg_rx_data(struct dwc2_hsotg *hsotg, int ep_idx, int size)
{
struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[ep_idx];
struct dwc2_hsotg_req *hs_req = hs_ep->req;
int to_read;
int max_req;
int read_ptr;
if (!hs_req) {
u32 epctl = dwc2_readl(hsotg, DOEPCTL(ep_idx));
int ptr;
dev_dbg(hsotg->dev,
"%s: FIFO %d bytes on ep%d but no req (DXEPCTl=0x%08x)\n",
__func__, size, ep_idx, epctl);
/* dump the data from the FIFO, we've nothing we can do */
for (ptr = 0; ptr < size; ptr += 4)
(void)dwc2_readl(hsotg, EPFIFO(ep_idx));
return;
}
to_read = size;
read_ptr = hs_req->req.actual;
max_req = hs_req->req.length - read_ptr;
dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
__func__, to_read, max_req, read_ptr, hs_req->req.length);
if (to_read > max_req) {
/*
* more data appeared than we where willing
* to deal with in this request.
*/
/* currently we don't deal this */
WARN_ON_ONCE(1);
}
hs_ep->total_data += to_read;
hs_req->req.actual += to_read;
to_read = DIV_ROUND_UP(to_read, 4);
/*
* note, we might over-write the buffer end by 3 bytes depending on
* alignment of the data.
*/
dwc2_readl_rep(hsotg, EPFIFO(ep_idx),
hs_req->req.buf + read_ptr, to_read);
}
/**
* dwc2_hsotg_ep0_zlp - send/receive zero-length packet on control endpoint
* @hsotg: The device instance
* @dir_in: If IN zlp
*
* Generate a zero-length IN packet request for terminating a SETUP
* transaction.
*
* Note, since we don't write any data to the TxFIFO, then it is
* currently believed that we do not need to wait for any space in
* the TxFIFO.
*/
static void dwc2_hsotg_ep0_zlp(struct dwc2_hsotg *hsotg, bool dir_in)
{
/* eps_out[0] is used in both directions */
hsotg->eps_out[0]->dir_in = dir_in;
hsotg->ep0_state = dir_in ? DWC2_EP0_STATUS_IN : DWC2_EP0_STATUS_OUT;
dwc2_hsotg_program_zlp(hsotg, hsotg->eps_out[0]);
}
/*
* dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc
* @hs_ep - The endpoint on which transfer went
*
* Iterate over endpoints descriptor chain and get info on bytes remained
* in DMA descriptors after transfer has completed. Used for non isoc EPs.
*/
static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
{
const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
struct dwc2_hsotg *hsotg = hs_ep->parent;
unsigned int bytes_rem = 0;
unsigned int bytes_rem_correction = 0;
struct dwc2_dma_desc *desc = hs_ep->desc_list;
int i;
u32 status;
u32 mps = hs_ep->ep.maxpacket;
int dir_in = hs_ep->dir_in;
if (!desc)
return -EINVAL;
/* Interrupt OUT EP with mps not multiple of 4 */
if (hs_ep->index)
if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
bytes_rem_correction = 4 - (mps % 4);
for (i = 0; i < hs_ep->desc_count; ++i) {
status = desc->status;
bytes_rem += status & DEV_DMA_NBYTES_MASK;
bytes_rem -= bytes_rem_correction;
if (status & DEV_DMA_STS_MASK)
dev_err(hsotg->dev, "descriptor %d closed with %x\n",
i, status & DEV_DMA_STS_MASK);
if (status & DEV_DMA_L)
break;
desc++;
}
return bytes_rem;
}
/**
* dwc2_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
* @hsotg: The device instance
* @epnum: The endpoint received from
*
* The RXFIFO has delivered an OutDone event, which means that the data
* transfer for an OUT endpoint has been completed, either by a short
* packet or by the finish of a transfer.
*/
static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
{
u32 epsize = dwc2_readl(hsotg, DOEPTSIZ(epnum));
struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[epnum];
struct dwc2_hsotg_req *hs_req = hs_ep->req;
struct usb_request *req = &hs_req->req;
unsigned int size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
int result = 0;
if (!hs_req) {
dev_dbg(hsotg->dev, "%s: no request active\n", __func__);
return;
}
if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_OUT) {
dev_dbg(hsotg->dev, "zlp packet received\n");
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
dwc2_hsotg_enqueue_setup(hsotg);
return;
}
if (using_desc_dma(hsotg))
size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
if (using_dma(hsotg)) {
unsigned int size_done;
/*
* Calculate the size of the transfer by checking how much
* is left in the endpoint size register and then working it
* out from the amount we loaded for the transfer.
*
* We need to do this as DMA pointers are always 32bit aligned
* so may overshoot/undershoot the transfer.
*/
size_done = hs_ep->size_loaded - size_left;
size_done += hs_ep->last_load;
req->actual = size_done;
}
/* if there is more request to do, schedule new transfer */
if (req->actual < req->length && size_left == 0) {
dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
return;
}
if (req->actual < req->length && req->short_not_ok) {
dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",
__func__, req->actual, req->length);
/*
* todo - what should we return here? there's no one else
* even bothering to check the status.
*/
}
/* DDMA IN status phase will start from StsPhseRcvd interrupt */
if (!using_desc_dma(hsotg) && epnum == 0 &&
hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
/* Move to STATUS IN */
if (!hsotg->delayed_status)
dwc2_hsotg_ep0_zlp(hsotg, true);
}
/* Set actual frame number for completed transfers */
if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
req->frame_number = hs_ep->target_frame;
dwc2_gadget_incr_frame_num(hs_ep);
}
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
}
/**
* dwc2_hsotg_handle_rx - RX FIFO has data
* @hsotg: The device instance
*
* The IRQ handler has detected that the RX FIFO has some data in it
* that requires processing, so find out what is in there and do the
* appropriate read.
*
* The RXFIFO is a true FIFO, the packets coming out are still in packet
* chunks, so if you have x packets received on an endpoint you'll get x
* FIFO events delivered, each with a packet's worth of data in it.
*
* When using DMA, we should not be processing events from the RXFIFO
* as the actual data should be sent to the memory directly and we turn
* on the completion interrupts to get notifications of transfer completion.
*/
static void dwc2_hsotg_handle_rx(struct dwc2_hsotg *hsotg)
{
u32 grxstsr = dwc2_readl(hsotg, GRXSTSP);
u32 epnum, status, size;
WARN_ON(using_dma(hsotg));
epnum = grxstsr & GRXSTS_EPNUM_MASK;
status = grxstsr & GRXSTS_PKTSTS_MASK;
size = grxstsr & GRXSTS_BYTECNT_MASK;
size >>= GRXSTS_BYTECNT_SHIFT;
dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
__func__, grxstsr, size, epnum);
switch ((status & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT) {
case GRXSTS_PKTSTS_GLOBALOUTNAK:
dev_dbg(hsotg->dev, "GLOBALOUTNAK\n");
break;
case GRXSTS_PKTSTS_OUTDONE:
dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n",
dwc2_hsotg_read_frameno(hsotg));
if (!using_dma(hsotg))
dwc2_hsotg_handle_outdone(hsotg, epnum);
break;
case GRXSTS_PKTSTS_SETUPDONE:
dev_dbg(hsotg->dev,
"SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
dwc2_hsotg_read_frameno(hsotg),
dwc2_readl(hsotg, DOEPCTL(0)));
/*
* Call dwc2_hsotg_handle_outdone here if it was not called from
* GRXSTS_PKTSTS_OUTDONE. That is, if the core didn't
* generate GRXSTS_PKTSTS_OUTDONE for setup packet.
*/
if (hsotg->ep0_state == DWC2_EP0_SETUP)
dwc2_hsotg_handle_outdone(hsotg, epnum);
break;
case GRXSTS_PKTSTS_OUTRX:
dwc2_hsotg_rx_data(hsotg, epnum, size);
break;
case GRXSTS_PKTSTS_SETUPRX:
dev_dbg(hsotg->dev,
"SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
dwc2_hsotg_read_frameno(hsotg),
dwc2_readl(hsotg, DOEPCTL(0)));
WARN_ON(hsotg->ep0_state != DWC2_EP0_SETUP);
dwc2_hsotg_rx_data(hsotg, epnum, size);
break;
default:
dev_warn(hsotg->dev, "%s: unknown status %08x\n",
__func__, grxstsr);
dwc2_hsotg_dump(hsotg);
break;
}
}
/**
* dwc2_hsotg_ep0_mps - turn max packet size into register setting
* @mps: The maximum packet size in bytes.
*/
static u32 dwc2_hsotg_ep0_mps(unsigned int mps)
{
switch (mps) {
case 64:
return D0EPCTL_MPS_64;
case 32:
return D0EPCTL_MPS_32;
case 16:
return D0EPCTL_MPS_16;
case 8:
return D0EPCTL_MPS_8;
}
/* bad max packet size, warn and return invalid result */
WARN_ON(1);
return (u32)-1;
}
/**
* dwc2_hsotg_set_ep_maxpacket - set endpoint's max-packet field
* @hsotg: The driver state.
* @ep: The index number of the endpoint
* @mps: The maximum packet size in bytes
* @mc: The multicount value
* @dir_in: True if direction is in.
*
* Configure the maximum packet size for the given endpoint, updating
* the hardware control registers to reflect this.
*/
static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
unsigned int ep, unsigned int mps,
unsigned int mc, unsigned int dir_in)
{
struct dwc2_hsotg_ep *hs_ep;
u32 reg;
hs_ep = index_to_ep(hsotg, ep, dir_in);
if (!hs_ep)
return;
if (ep == 0) {
u32 mps_bytes = mps;
/* EP0 is a special case */
mps = dwc2_hsotg_ep0_mps(mps_bytes);
if (mps > 3)
goto bad_mps;
hs_ep->ep.maxpacket = mps_bytes;
hs_ep->mc = 1;
} else {
if (mps > 1024)
goto bad_mps;
hs_ep->mc = mc;
if (mc > 3)
goto bad_mps;
hs_ep->ep.maxpacket = mps;
}
if (dir_in) {
reg = dwc2_readl(hsotg, DIEPCTL(ep));
reg &= ~DXEPCTL_MPS_MASK;
reg |= mps;
dwc2_writel(hsotg, reg, DIEPCTL(ep));
} else {
reg = dwc2_readl(hsotg, DOEPCTL(ep));
reg &= ~DXEPCTL_MPS_MASK;
reg |= mps;
dwc2_writel(hsotg, reg, DOEPCTL(ep));
}
return;
bad_mps:
dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps);
}
/**
* dwc2_hsotg_txfifo_flush - flush Tx FIFO
* @hsotg: The driver state
* @idx: The index for the endpoint (0..15)
*/
static void dwc2_hsotg_txfifo_flush(struct dwc2_hsotg *hsotg, unsigned int idx)
{
dwc2_writel(hsotg, GRSTCTL_TXFNUM(idx) | GRSTCTL_TXFFLSH,
GRSTCTL);
/* wait until the fifo is flushed */
if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_TXFFLSH, 100))
dev_warn(hsotg->dev, "%s: timeout flushing fifo GRSTCTL_TXFFLSH\n",
__func__);
}
/**
* dwc2_hsotg_trytx - check to see if anything needs transmitting
* @hsotg: The driver state
* @hs_ep: The driver endpoint to check.
*
* Check to see if there is a request that has data to send, and if so
* make an attempt to write data into the FIFO.
*/
static int dwc2_hsotg_trytx(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *hs_ep)
{
struct dwc2_hsotg_req *hs_req = hs_ep->req;
if (!hs_ep->dir_in || !hs_req) {
/**
* if request is not enqueued, we disable interrupts
* for endpoints, excepting ep0
*/
if (hs_ep->index != 0)
dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index,
hs_ep->dir_in, 0);
return 0;
}
if (hs_req->req.actual < hs_req->req.length) {
dev_dbg(hsotg->dev, "trying to write more for ep%d\n",
hs_ep->index);
return dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
}
return 0;
}
/**
* dwc2_hsotg_complete_in - complete IN transfer
* @hsotg: The device state.
* @hs_ep: The endpoint that has just completed.
*
* An IN transfer has been completed, update the transfer's state and then
* call the relevant completion routines.
*/
static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *hs_ep)
{
struct dwc2_hsotg_req *hs_req = hs_ep->req;
u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index));
int size_left, size_done;
if (!hs_req) {
dev_dbg(hsotg->dev, "XferCompl but no req\n");
return;
}
/* Finish ZLP handling for IN EP0 transactions */
if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_IN) {
dev_dbg(hsotg->dev, "zlp packet sent\n");
/*
* While send zlp for DWC2_EP0_STATUS_IN EP direction was
* changed to IN. Change back to complete OUT transfer request
*/
hs_ep->dir_in = 0;
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
if (hsotg->test_mode) {
int ret;
ret = dwc2_hsotg_set_test_mode(hsotg, hsotg->test_mode);
if (ret < 0) {
dev_dbg(hsotg->dev, "Invalid Test #%d\n",
hsotg->test_mode);
dwc2_hsotg_stall_ep0(hsotg);
return;
}
}
dwc2_hsotg_enqueue_setup(hsotg);
return;
}
/*
* Calculate the size of the transfer by checking how much is left
* in the endpoint size register and then working it out from
* the amount we loaded for the transfer.
*
* We do this even for DMA, as the transfer may have incremented
* past the end of the buffer (DMA transfers are always 32bit
* aligned).
*/
if (using_desc_dma(hsotg)) {
size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
if (size_left < 0)
dev_err(hsotg->dev, "error parsing DDMA results %d\n",
size_left);
} else {
size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
}
size_done = hs_ep->size_loaded - size_left;
size_done += hs_ep->last_load;
if (hs_req->req.actual != size_done)
dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n",
__func__, hs_req->req.actual, size_done);
hs_req->req.actual = size_done;
dev_dbg(hsotg->dev, "req->length:%d req->actual:%d req->zero:%d\n",
hs_req->req.length, hs_req->req.actual, hs_req->req.zero);
if (!size_left && hs_req->req.actual < hs_req->req.length) {
dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);
dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
return;
}
/* Zlp for all endpoints in non DDMA, for ep0 only in DATA IN stage */
if (hs_ep->send_zlp) {
hs_ep->send_zlp = 0;
if (!using_desc_dma(hsotg)) {
dwc2_hsotg_program_zlp(hsotg, hs_ep);
/* transfer will be completed on next complete interrupt */
return;
}
}
if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) {
/* Move to STATUS OUT */
dwc2_hsotg_ep0_zlp(hsotg, false);
return;
}
/* Set actual frame number for completed transfers */
if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
hs_req->req.frame_number = hs_ep->target_frame;
dwc2_gadget_incr_frame_num(hs_ep);
}
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
}
/**
* dwc2_gadget_read_ep_interrupts - reads interrupts for given ep
* @hsotg: The device state.
* @idx: Index of ep.
* @dir_in: Endpoint direction 1-in 0-out.
*
* Reads for endpoint with given index and direction, by masking
* epint_reg with coresponding mask.
*/
static u32 dwc2_gadget_read_ep_interrupts(struct dwc2_hsotg *hsotg,
unsigned int idx, int dir_in)
{
u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
u32 ints;
u32 mask;
u32 diepempmsk;
mask = dwc2_readl(hsotg, epmsk_reg);
diepempmsk = dwc2_readl(hsotg, DIEPEMPMSK);
mask |= ((diepempmsk >> idx) & 0x1) ? DIEPMSK_TXFIFOEMPTY : 0;
mask |= DXEPINT_SETUP_RCVD;
ints = dwc2_readl(hsotg, epint_reg);
ints &= mask;
return ints;
}
/**
* dwc2_gadget_handle_ep_disabled - handle DXEPINT_EPDISBLD
* @hs_ep: The endpoint on which interrupt is asserted.
*
* This interrupt indicates that the endpoint has been disabled per the
* application's request.
*
* For IN endpoints flushes txfifo, in case of BULK clears DCTL_CGNPINNAK,
* in case of ISOC completes current request.
*
* For ISOC-OUT endpoints completes expired requests. If there is remaining
* request starts it.
*/
static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
{
struct dwc2_hsotg *hsotg = hs_ep->parent;
struct dwc2_hsotg_req *hs_req;
unsigned char idx = hs_ep->index;
int dir_in = hs_ep->dir_in;
u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
int dctl = dwc2_readl(hsotg, DCTL);
dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
if (dir_in) {
int epctl = dwc2_readl(hsotg, epctl_reg);
dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) {
int dctl = dwc2_readl(hsotg, DCTL);
dctl |= DCTL_CGNPINNAK;
dwc2_writel(hsotg, dctl, DCTL);
}
} else {
if (dctl & DCTL_GOUTNAKSTS) {
dctl |= DCTL_CGOUTNAK;
dwc2_writel(hsotg, dctl, DCTL);
}
}
if (!hs_ep->isochronous)
return;
if (list_empty(&hs_ep->queue)) {
dev_dbg(hsotg->dev, "%s: complete_ep 0x%p, ep->queue empty!\n",
__func__, hs_ep);
return;
}
do {
hs_req = get_ep_head(hs_ep);
if (hs_req) {
hs_req->req.frame_number = hs_ep->target_frame;
hs_req->req.actual = 0;
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
-ENODATA);
}
dwc2_gadget_incr_frame_num(hs_ep);
/* Update current frame number value. */
hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
} while (dwc2_gadget_target_frame_elapsed(hs_ep));
}
/**
* dwc2_gadget_handle_out_token_ep_disabled - handle DXEPINT_OUTTKNEPDIS
* @ep: The endpoint on which interrupt is asserted.
*
* This is starting point for ISOC-OUT transfer, synchronization done with
* first out token received from host while corresponding EP is disabled.
*
* Device does not know initial frame in which out token will come. For this
* HW generates OUTTKNEPDIS - out token is received while EP is disabled. Upon
* getting this interrupt SW starts calculation for next transfer frame.
*/
static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
{
struct dwc2_hsotg *hsotg = ep->parent;
struct dwc2_hsotg_req *hs_req;
int dir_in = ep->dir_in;
if (dir_in || !ep->isochronous)
return;
if (using_desc_dma(hsotg)) {
if (ep->target_frame == TARGET_FRAME_INITIAL) {
/* Start first ISO Out */
ep->target_frame = hsotg->frame_number;
dwc2_gadget_start_isoc_ddma(ep);
}
return;
}
if (ep->target_frame == TARGET_FRAME_INITIAL) {
u32 ctrl;
ep->target_frame = hsotg->frame_number;
if (ep->interval > 1) {
ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
if (ep->target_frame & 0x1)
ctrl |= DXEPCTL_SETODDFR;
else
ctrl |= DXEPCTL_SETEVENFR;
dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
}
}
while (dwc2_gadget_target_frame_elapsed(ep)) {
hs_req = get_ep_head(ep);
if (hs_req) {
hs_req->req.frame_number = ep->target_frame;
hs_req->req.actual = 0;
dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
}
dwc2_gadget_incr_frame_num(ep);
/* Update current frame number value. */
hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
}
if (!ep->req)
dwc2_gadget_start_next_request(ep);
}
static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *hs_ep);
/**
* dwc2_gadget_handle_nak - handle NAK interrupt
* @hs_ep: The endpoint on which interrupt is asserted.
*
* This is starting point for ISOC-IN transfer, synchronization done with
* first IN token received from host while corresponding EP is disabled.
*
* Device does not know when first one token will arrive from host. On first
* token arrival HW generates 2 interrupts: 'in token received while FIFO empty'
* and 'NAK'. NAK interrupt for ISOC-IN means that token has arrived and ZLP was
* sent in response to that as there was no data in FIFO. SW is basing on this
* interrupt to obtain frame in which token has come and then based on the
* interval calculates next frame for transfer.
*/
static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
{
struct dwc2_hsotg *hsotg = hs_ep->parent;
struct dwc2_hsotg_req *hs_req;
int dir_in = hs_ep->dir_in;
u32 ctrl;
if (!dir_in || !hs_ep->isochronous)
return;
if (hs_ep->target_frame == TARGET_FRAME_INITIAL) {
if (using_desc_dma(hsotg)) {
hs_ep->target_frame = hsotg->frame_number;
dwc2_gadget_incr_frame_num(hs_ep);
/* In service interval mode target_frame must
* be set to last (u)frame of the service interval.
*/
if (hsotg->params.service_interval) {
/* Set target_frame to the first (u)frame of
* the service interval
*/
hs_ep->target_frame &= ~hs_ep->interval + 1;
/* Set target_frame to the last (u)frame of
* the service interval
*/
dwc2_gadget_incr_frame_num(hs_ep);
dwc2_gadget_dec_frame_num_by_one(hs_ep);
}
dwc2_gadget_start_isoc_ddma(hs_ep);
return;
}
hs_ep->target_frame = hsotg->frame_number;
if (hs_ep->interval > 1) {
u32 ctrl = dwc2_readl(hsotg,
DIEPCTL(hs_ep->index));
if (hs_ep->target_frame & 0x1)
ctrl |= DXEPCTL_SETODDFR;
else
ctrl |= DXEPCTL_SETEVENFR;
dwc2_writel(hsotg, ctrl, DIEPCTL(hs_ep->index));
}
}
if (using_desc_dma(hsotg))
return;
ctrl = dwc2_readl(hsotg, DIEPCTL(hs_ep->index));
if (ctrl & DXEPCTL_EPENA)
dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
else
dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
hs_req = get_ep_head(hs_ep);
if (hs_req) {
hs_req->req.frame_number = hs_ep->target_frame;
hs_req->req.actual = 0;
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
}
dwc2_gadget_incr_frame_num(hs_ep);
/* Update current frame number value. */
hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
}
if (!hs_ep->req)
dwc2_gadget_start_next_request(hs_ep);
}
/**
* dwc2_hsotg_epint - handle an in/out endpoint interrupt
* @hsotg: The driver state
* @idx: The index for the endpoint (0..15)
* @dir_in: Set if this is an IN endpoint
*
* Process and clear any interrupt pending for an individual endpoint
*/
static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
int dir_in)
{
struct dwc2_hsotg_ep *hs_ep = index_to_ep(hsotg, idx, dir_in);
u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
u32 epsiz_reg = dir_in ? DIEPTSIZ(idx) : DOEPTSIZ(idx);
u32 ints;
ints = dwc2_gadget_read_ep_interrupts(hsotg, idx, dir_in);
/* Clear endpoint interrupts */
dwc2_writel(hsotg, ints, epint_reg);
if (!hs_ep) {
dev_err(hsotg->dev, "%s:Interrupt for unconfigured ep%d(%s)\n",
__func__, idx, dir_in ? "in" : "out");
return;
}
dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",
__func__, idx, dir_in ? "in" : "out", ints);
/* Don't process XferCompl interrupt if it is a setup packet */
if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD)))
ints &= ~DXEPINT_XFERCOMPL;
/*
* Don't process XferCompl interrupt in DDMA if EP0 is still in SETUP
* stage and xfercomplete was generated without SETUP phase done
* interrupt. SW should parse received setup packet only after host's
* exit from setup phase of control transfer.
*/
if (using_desc_dma(hsotg) && idx == 0 && !hs_ep->dir_in &&
hsotg->ep0_state == DWC2_EP0_SETUP && !(ints & DXEPINT_SETUP))
ints &= ~DXEPINT_XFERCOMPL;
if (ints & DXEPINT_XFERCOMPL) {
dev_dbg(hsotg->dev,
"%s: XferCompl: DxEPCTL=0x%08x, DXEPTSIZ=%08x\n",
__func__, dwc2_readl(hsotg, epctl_reg),
dwc2_readl(hsotg, epsiz_reg));
/* In DDMA handle isochronous requests separately */
if (using_desc_dma(hsotg) && hs_ep->isochronous) {
dwc2_gadget_complete_isoc_request_ddma(hs_ep);
} else if (dir_in) {
/*
* We get OutDone from the FIFO, so we only
* need to look at completing IN requests here
* if operating slave mode
*/
if (!hs_ep->isochronous || !(ints & DXEPINT_NAKINTRPT))
dwc2_hsotg_complete_in(hsotg, hs_ep);
if (idx == 0 && !hs_ep->req)
dwc2_hsotg_enqueue_setup(hsotg);
} else if (using_dma(hsotg)) {
/*
* We're using DMA, we need to fire an OutDone here
* as we ignore the RXFIFO.
*/
if (!hs_ep->isochronous || !(ints & DXEPINT_OUTTKNEPDIS))
dwc2_hsotg_handle_outdone(hsotg, idx);
}
}
if (ints & DXEPINT_EPDISBLD)
dwc2_gadget_handle_ep_disabled(hs_ep);
if (ints & DXEPINT_OUTTKNEPDIS)
dwc2_gadget_handle_out_token_ep_disabled(hs_ep);
if (ints & DXEPINT_NAKINTRPT)
dwc2_gadget_handle_nak(hs_ep);
if (ints & DXEPINT_AHBERR)
dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
if (ints & DXEPINT_SETUP) { /* Setup or Timeout */
dev_dbg(hsotg->dev, "%s: Setup/Timeout\n", __func__);
if (using_dma(hsotg) && idx == 0) {
/*
* this is the notification we've received a
* setup packet. In non-DMA mode we'd get this
* from the RXFIFO, instead we need to process
* the setup here.
*/
if (dir_in)
WARN_ON_ONCE(1);
else
dwc2_hsotg_handle_outdone(hsotg, 0);
}
}
if (ints & DXEPINT_STSPHSERCVD) {
dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__);
/* Safety check EP0 state when STSPHSERCVD asserted */
if (hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
/* Move to STATUS IN for DDMA */
if (using_desc_dma(hsotg)) {
if (!hsotg->delayed_status)
dwc2_hsotg_ep0_zlp(hsotg, true);
else
/* In case of 3 stage Control Write with delayed
* status, when Status IN transfer started
* before STSPHSERCVD asserted, NAKSTS bit not
* cleared by CNAK in dwc2_hsotg_start_req()
* function. Clear now NAKSTS to allow complete
* transfer.
*/
dwc2_set_bit(hsotg, DIEPCTL(0),
DXEPCTL_CNAK);
}
}
}
if (ints & DXEPINT_BACK2BACKSETUP)
dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
if (ints & DXEPINT_BNAINTR) {
dev_dbg(hsotg->dev, "%s: BNA interrupt\n", __func__);
if (hs_ep->isochronous)
dwc2_gadget_handle_isoc_bna(hs_ep);
}
if (dir_in && !hs_ep->isochronous) {
/* not sure if this is important, but we'll clear it anyway */
if (ints & DXEPINT_INTKNTXFEMP) {
dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
__func__, idx);
}
/* this probably means something bad is happening */
if (ints & DXEPINT_INTKNEPMIS) {
dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
__func__, idx);
}
/* FIFO has space or is empty (see GAHBCFG) */
if (hsotg->dedicated_fifos &&
ints & DXEPINT_TXFEMP) {
dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
__func__, idx);
if (!using_dma(hsotg))
dwc2_hsotg_trytx(hsotg, hs_ep);
}
}
}
/**
* dwc2_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done)
* @hsotg: The device state.
*
* Handle updating the device settings after the enumeration phase has
* been completed.
*/
static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
{
u32 dsts = dwc2_readl(hsotg, DSTS);
int ep0_mps = 0, ep_mps = 8;
/*
* This should signal the finish of the enumeration phase
* of the USB handshaking, so we should now know what rate
* we connected at.
*/
dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts);
/*
* note, since we're limited by the size of transfer on EP0, and
* it seems IN transfers must be a even number of packets we do
* not advertise a 64byte MPS on EP0.
*/
/* catch both EnumSpd_FS and EnumSpd_FS48 */
switch ((dsts & DSTS_ENUMSPD_MASK) >> DSTS_ENUMSPD_SHIFT) {
case DSTS_ENUMSPD_FS:
case DSTS_ENUMSPD_FS48:
hsotg->gadget.speed = USB_SPEED_FULL;
ep0_mps = EP0_MPS_LIMIT;
ep_mps = 1023;
break;
case DSTS_ENUMSPD_HS:
hsotg->gadget.speed = USB_SPEED_HIGH;
ep0_mps = EP0_MPS_LIMIT;
ep_mps = 1024;
break;
case DSTS_ENUMSPD_LS:
hsotg->gadget.speed = USB_SPEED_LOW;
ep0_mps = 8;
ep_mps = 8;
/*
* note, we don't actually support LS in this driver at the
* moment, and the documentation seems to imply that it isn't
* supported by the PHYs on some of the devices.
*/
break;
}
dev_info(hsotg->dev, "new device is %s\n",
usb_speed_string(hsotg->gadget.speed));
/*
* we should now know the maximum packet size for an
* endpoint, so set the endpoints to a default value.
*/
if (ep0_mps) {
int i;
/* Initialize ep0 for both in and out directions */
dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 1);
dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 0);
for (i = 1; i < hsotg->num_of_eps; i++) {
if (hsotg->eps_in[i])
dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
0, 1);
if (hsotg->eps_out[i])
dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
0, 0);
}
}
/* ensure after enumeration our EP0 is active */
dwc2_hsotg_enqueue_setup(hsotg);
dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
dwc2_readl(hsotg, DIEPCTL0),
dwc2_readl(hsotg, DOEPCTL0));
}
/**
* kill_all_requests - remove all requests from the endpoint's queue
* @hsotg: The device state.
* @ep: The endpoint the requests may be on.
* @result: The result code to use.
*
* Go through the requests on the given endpoint and mark them
* completed with the given result code.
*/
static void kill_all_requests(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *ep,
int result)
{
unsigned int size;
ep->req = NULL;
while (!list_empty(&ep->queue)) {
struct dwc2_hsotg_req *req = get_ep_head(ep);
dwc2_hsotg_complete_request(hsotg, ep, req, result);
}
if (!hsotg->dedicated_fifos)
return;
size = (dwc2_readl(hsotg, DTXFSTS(ep->fifo_index)) & 0xffff) * 4;
if (size < ep->fifo_size)
dwc2_hsotg_txfifo_flush(hsotg, ep->fifo_index);
}
/**
* dwc2_hsotg_disconnect - disconnect service
* @hsotg: The device state.
*
* The device has been disconnected. Remove all current
* transactions and signal the gadget driver that this
* has happened.
*/
void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg)
{
unsigned int ep;
if (!hsotg->connected)
return;
hsotg->connected = 0;
hsotg->test_mode = 0;
/* all endpoints should be shutdown */
for (ep = 0; ep < hsotg->num_of_eps; ep++) {
if (hsotg->eps_in[ep])
kill_all_requests(hsotg, hsotg->eps_in[ep],
-ESHUTDOWN);
if (hsotg->eps_out[ep])
kill_all_requests(hsotg, hsotg->eps_out[ep],
-ESHUTDOWN);
}
call_gadget(hsotg, disconnect);
hsotg->lx_state = DWC2_L3;
usb_gadget_set_state(&hsotg->gadget, USB_STATE_NOTATTACHED);
}
/**
* dwc2_hsotg_irq_fifoempty - TX FIFO empty interrupt handler
* @hsotg: The device state:
* @periodic: True if this is a periodic FIFO interrupt
*/
static void dwc2_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic)
{
struct dwc2_hsotg_ep *ep;
int epno, ret;
/* look through for any more data to transmit */
for (epno = 0; epno < hsotg->num_of_eps; epno++) {
ep = index_to_ep(hsotg, epno, 1);
if (!ep)
continue;
if (!ep->dir_in)
continue;
if ((periodic && !ep->periodic) ||
(!periodic && ep->periodic))
continue;
ret = dwc2_hsotg_trytx(hsotg, ep);
if (ret < 0)
break;
}
}
/* IRQ flags which will trigger a retry around the IRQ loop */
#define IRQ_RETRY_MASK (GINTSTS_NPTXFEMP | \
GINTSTS_PTXFEMP | \
GINTSTS_RXFLVL)
static int dwc2_hsotg_ep_disable(struct usb_ep *ep);
/**
* dwc2_hsotg_core_init_disconnected - issue softreset to the core
* @hsotg: The device state
* @is_usb_reset: Usb resetting flag
*
* Issue a soft reset to the core, and await the core finishing it.
*/
void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
bool is_usb_reset)
{
u32 intmsk;
u32 val;
u32 usbcfg;
u32 dcfg = 0;
int ep;
/* Kill any ep0 requests as controller will be reinitialized */
kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
if (!is_usb_reset) {
if (dwc2_core_reset(hsotg, true))
return;
} else {
/* all endpoints should be shutdown */
for (ep = 1; ep < hsotg->num_of_eps; ep++) {
if (hsotg->eps_in[ep])
dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
if (hsotg->eps_out[ep])
dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
}
}
/*
* we must now enable ep0 ready for host detection and then
* set configuration.
*/
/* keep other bits untouched (so e.g. forced modes are not lost) */
usbcfg = dwc2_readl(hsotg, GUSBCFG);
usbcfg &= ~GUSBCFG_TOUTCAL_MASK;
usbcfg |= GUSBCFG_TOUTCAL(7);
/* remove the HNP/SRP and set the PHY */
usbcfg &= ~(GUSBCFG_SRPCAP | GUSBCFG_HNPCAP);
dwc2_writel(hsotg, usbcfg, GUSBCFG);
dwc2_phy_init(hsotg, true);
dwc2_hsotg_init_fifo(hsotg);
if (!is_usb_reset)
dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
dcfg |= DCFG_EPMISCNT(1);
switch (hsotg->params.speed) {
case DWC2_SPEED_PARAM_LOW:
dcfg |= DCFG_DEVSPD_LS;
break;
case DWC2_SPEED_PARAM_FULL:
if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS)
dcfg |= DCFG_DEVSPD_FS48;
else
dcfg |= DCFG_DEVSPD_FS;
break;
default:
dcfg |= DCFG_DEVSPD_HS;
}
if (hsotg->params.ipg_isoc_en)
dcfg |= DCFG_IPG_ISOC_SUPPORDED;
dwc2_writel(hsotg, dcfg, DCFG);
/* Clear any pending OTG interrupts */
dwc2_writel(hsotg, 0xffffffff, GOTGINT);
/* Clear any pending interrupts */
dwc2_writel(hsotg, 0xffffffff, GINTSTS);
intmsk = GINTSTS_ERLYSUSP | GINTSTS_SESSREQINT |
GINTSTS_GOUTNAKEFF | GINTSTS_GINNAKEFF |
GINTSTS_USBRST | GINTSTS_RESETDET |
GINTSTS_ENUMDONE | GINTSTS_OTGINT |
GINTSTS_USBSUSP | GINTSTS_WKUPINT |
GINTSTS_LPMTRANRCVD;
if (!using_desc_dma(hsotg))
intmsk |= GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT;
if (!hsotg->params.external_id_pin_ctl)
intmsk |= GINTSTS_CONIDSTSCHNG;
dwc2_writel(hsotg, intmsk, GINTMSK);
if (using_dma(hsotg)) {
dwc2_writel(hsotg, GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN |
hsotg->params.ahbcfg,
GAHBCFG);
/* Set DDMA mode support in the core if needed */
if (using_desc_dma(hsotg))
dwc2_set_bit(hsotg, DCFG, DCFG_DESCDMA_EN);
} else {
dwc2_writel(hsotg, ((hsotg->dedicated_fifos) ?
(GAHBCFG_NP_TXF_EMP_LVL |
GAHBCFG_P_TXF_EMP_LVL) : 0) |
GAHBCFG_GLBL_INTR_EN, GAHBCFG);
}
/*
* If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts
* when we have no data to transfer. Otherwise we get being flooded by
* interrupts.
*/
dwc2_writel(hsotg, ((hsotg->dedicated_fifos && !using_dma(hsotg)) ?
DIEPMSK_TXFIFOEMPTY | DIEPMSK_INTKNTXFEMPMSK : 0) |
DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK |
DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK,
DIEPMSK);
/*
* don't need XferCompl, we get that from RXFIFO in slave mode. In
* DMA mode we may need this and StsPhseRcvd.
*/
dwc2_writel(hsotg, (using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK |
DOEPMSK_STSPHSERCVDMSK) : 0) |
DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK |
DOEPMSK_SETUPMSK,
DOEPMSK);
/* Enable BNA interrupt for DDMA */
if (using_desc_dma(hsotg)) {
dwc2_set_bit(hsotg, DOEPMSK, DOEPMSK_BNAMSK);
dwc2_set_bit(hsotg, DIEPMSK, DIEPMSK_BNAININTRMSK);
}
/* Enable Service Interval mode if supported */
if (using_desc_dma(hsotg) && hsotg->params.service_interval)
dwc2_set_bit(hsotg, DCTL, DCTL_SERVICE_INTERVAL_SUPPORTED);
dwc2_writel(hsotg, 0, DAINTMSK);
dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
dwc2_readl(hsotg, DIEPCTL0),
dwc2_readl(hsotg, DOEPCTL0));
/* enable in and out endpoint interrupts */
dwc2_hsotg_en_gsint(hsotg, GINTSTS_OEPINT | GINTSTS_IEPINT);
/*
* Enable the RXFIFO when in slave mode, as this is how we collect
* the data. In DMA mode, we get events from the FIFO but also
* things we cannot process, so do not use it.
*/
if (!using_dma(hsotg))
dwc2_hsotg_en_gsint(hsotg, GINTSTS_RXFLVL);
/* Enable interrupts for EP0 in and out */
dwc2_hsotg_ctrl_epint(hsotg, 0, 0, 1);
dwc2_hsotg_ctrl_epint(hsotg, 0, 1, 1);
if (!is_usb_reset) {
dwc2_set_bit(hsotg, DCTL, DCTL_PWRONPRGDONE);
udelay(10); /* see openiboot */
dwc2_clear_bit(hsotg, DCTL, DCTL_PWRONPRGDONE);
}
dev_dbg(hsotg->dev, "DCTL=0x%08x\n", dwc2_readl(hsotg, DCTL));
/*
* DxEPCTL_USBActEp says RO in manual, but seems to be set by
* writing to the EPCTL register..
*/
/* set to read 1 8byte packet */
dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
DXEPTSIZ_XFERSIZE(8), DOEPTSIZ0);
dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
DXEPCTL_CNAK | DXEPCTL_EPENA |
DXEPCTL_USBACTEP,
DOEPCTL0);
/* enable, but don't activate EP0in */
dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
DXEPCTL_USBACTEP, DIEPCTL0);
/* clear global NAKs */
val = DCTL_CGOUTNAK | DCTL_CGNPINNAK;
if (!is_usb_reset)
val |= DCTL_SFTDISCON;
dwc2_set_bit(hsotg, DCTL, val);
/* configure the core to support LPM */
dwc2_gadget_init_lpm(hsotg);
/* program GREFCLK register if needed */
if (using_desc_dma(hsotg) && hsotg->params.service_interval)
dwc2_gadget_program_ref_clk(hsotg);
/* must be at-least 3ms to allow bus to see disconnect */
mdelay(3);
hsotg->lx_state = DWC2_L0;
dwc2_hsotg_enqueue_setup(hsotg);
dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
dwc2_readl(hsotg, DIEPCTL0),
dwc2_readl(hsotg, DOEPCTL0));
}
void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
{
/* set the soft-disconnect bit */
dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
}
void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg)
{
/* remove the soft-disconnect and let's go */
if (!hsotg->role_sw || (dwc2_readl(hsotg, GOTGCTL) & GOTGCTL_BSESVLD))
dwc2_clear_bit(hsotg, DCTL, DCTL_SFTDISCON);
}
/**
* dwc2_gadget_handle_incomplete_isoc_in - handle incomplete ISO IN Interrupt.
* @hsotg: The device state:
*
* This interrupt indicates one of the following conditions occurred while
* transmitting an ISOC transaction.
* - Corrupted IN Token for ISOC EP.
* - Packet not complete in FIFO.
*
* The following actions will be taken:
* - Determine the EP
* - Disable EP; when 'Endpoint Disabled' interrupt is received Flush FIFO
*/
static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg)
{
struct dwc2_hsotg_ep *hs_ep;
u32 epctrl;
u32 daintmsk;
u32 idx;
dev_dbg(hsotg->dev, "Incomplete isoc in interrupt received:\n");
daintmsk = dwc2_readl(hsotg, DAINTMSK);
for (idx = 1; idx < hsotg->num_of_eps; idx++) {
hs_ep = hsotg->eps_in[idx];
/* Proceed only unmasked ISOC EPs */
if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
continue;
epctrl = dwc2_readl(hsotg, DIEPCTL(idx));
if ((epctrl & DXEPCTL_EPENA) &&
dwc2_gadget_target_frame_elapsed(hs_ep)) {
epctrl |= DXEPCTL_SNAK;
epctrl |= DXEPCTL_EPDIS;
dwc2_writel(hsotg, epctrl, DIEPCTL(idx));
}
}
/* Clear interrupt */
dwc2_writel(hsotg, GINTSTS_INCOMPL_SOIN, GINTSTS);
}
/**
* dwc2_gadget_handle_incomplete_isoc_out - handle incomplete ISO OUT Interrupt
* @hsotg: The device state:
*
* This interrupt indicates one of the following conditions occurred while
* transmitting an ISOC transaction.
* - Corrupted OUT Token for ISOC EP.
* - Packet not complete in FIFO.
*
* The following actions will be taken:
* - Determine the EP
* - Set DCTL_SGOUTNAK and unmask GOUTNAKEFF if target frame elapsed.
*/
static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg)
{
u32 gintsts;
u32 gintmsk;
u32 daintmsk;
u32 epctrl;
struct dwc2_hsotg_ep *hs_ep;
int idx;
dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOOUT\n", __func__);
daintmsk = dwc2_readl(hsotg, DAINTMSK);
daintmsk >>= DAINT_OUTEP_SHIFT;
for (idx = 1; idx < hsotg->num_of_eps; idx++) {
hs_ep = hsotg->eps_out[idx];
/* Proceed only unmasked ISOC EPs */
if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
continue;
epctrl = dwc2_readl(hsotg, DOEPCTL(idx));
if ((epctrl & DXEPCTL_EPENA) &&
dwc2_gadget_target_frame_elapsed(hs_ep)) {
/* Unmask GOUTNAKEFF interrupt */
gintmsk = dwc2_readl(hsotg, GINTMSK);
gintmsk |= GINTSTS_GOUTNAKEFF;
dwc2_writel(hsotg, gintmsk, GINTMSK);
gintsts = dwc2_readl(hsotg, GINTSTS);
if (!(gintsts & GINTSTS_GOUTNAKEFF)) {
dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
break;
}
}
}
/* Clear interrupt */
dwc2_writel(hsotg, GINTSTS_INCOMPL_SOOUT, GINTSTS);
}
/**
* dwc2_hsotg_irq - handle device interrupt
* @irq: The IRQ number triggered
* @pw: The pw value when registered the handler.
*/
static irqreturn_t dwc2_hsotg_irq(int irq, void *pw)
{
struct dwc2_hsotg *hsotg = pw;
int retry_count = 8;
u32 gintsts;
u32 gintmsk;
if (!dwc2_is_device_mode(hsotg))
return IRQ_NONE;
spin_lock(&hsotg->lock);
irq_retry:
gintsts = dwc2_readl(hsotg, GINTSTS);
gintmsk = dwc2_readl(hsotg, GINTMSK);
dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",
__func__, gintsts, gintsts & gintmsk, gintmsk, retry_count);
gintsts &= gintmsk;
if (gintsts & GINTSTS_RESETDET) {
dev_dbg(hsotg->dev, "%s: USBRstDet\n", __func__);
dwc2_writel(hsotg, GINTSTS_RESETDET, GINTSTS);
/* This event must be used only if controller is suspended */
if (hsotg->in_ppd && hsotg->lx_state == DWC2_L2)
dwc2_exit_partial_power_down(hsotg, 0, true);
hsotg->lx_state = DWC2_L0;
}
if (gintsts & (GINTSTS_USBRST | GINTSTS_RESETDET)) {
u32 usb_status = dwc2_readl(hsotg, GOTGCTL);
u32 connected = hsotg->connected;
dev_dbg(hsotg->dev, "%s: USBRst\n", __func__);
dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
dwc2_readl(hsotg, GNPTXSTS));
dwc2_writel(hsotg, GINTSTS_USBRST, GINTSTS);
/* Report disconnection if it is not already done. */
dwc2_hsotg_disconnect(hsotg);
/* Reset device address to zero */
dwc2_clear_bit(hsotg, DCFG, DCFG_DEVADDR_MASK);
if (usb_status & GOTGCTL_BSESVLD && connected)
dwc2_hsotg_core_init_disconnected(hsotg, true);
}
if (gintsts & GINTSTS_ENUMDONE) {
dwc2_writel(hsotg, GINTSTS_ENUMDONE, GINTSTS);
dwc2_hsotg_irq_enumdone(hsotg);
}
if (gintsts & (GINTSTS_OEPINT | GINTSTS_IEPINT)) {
u32 daint = dwc2_readl(hsotg, DAINT);
u32 daintmsk = dwc2_readl(hsotg, DAINTMSK);
u32 daint_out, daint_in;
int ep;
daint &= daintmsk;
daint_out = daint >> DAINT_OUTEP_SHIFT;
daint_in = daint & ~(daint_out << DAINT_OUTEP_SHIFT);
dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
for (ep = 0; ep < hsotg->num_of_eps && daint_out;
ep++, daint_out >>= 1) {
if (daint_out & 1)
dwc2_hsotg_epint(hsotg, ep, 0);
}
for (ep = 0; ep < hsotg->num_of_eps && daint_in;
ep++, daint_in >>= 1) {
if (daint_in & 1)
dwc2_hsotg_epint(hsotg, ep, 1);
}
}
/* check both FIFOs */
if (gintsts & GINTSTS_NPTXFEMP) {
dev_dbg(hsotg->dev, "NPTxFEmp\n");
/*
* Disable the interrupt to stop it happening again
* unless one of these endpoint routines decides that
* it needs re-enabling
*/
dwc2_hsotg_disable_gsint(hsotg, GINTSTS_NPTXFEMP);
dwc2_hsotg_irq_fifoempty(hsotg, false);
}
if (gintsts & GINTSTS_PTXFEMP) {
dev_dbg(hsotg->dev, "PTxFEmp\n");
/* See note in GINTSTS_NPTxFEmp */
dwc2_hsotg_disable_gsint(hsotg, GINTSTS_PTXFEMP);
dwc2_hsotg_irq_fifoempty(hsotg, true);
}
if (gintsts & GINTSTS_RXFLVL) {
/*
* note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,
* we need to retry dwc2_hsotg_handle_rx if this is still
* set.
*/
dwc2_hsotg_handle_rx(hsotg);
}
if (gintsts & GINTSTS_ERLYSUSP) {
dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n");
dwc2_writel(hsotg, GINTSTS_ERLYSUSP, GINTSTS);
}
/*
* these next two seem to crop-up occasionally causing the core
* to shutdown the USB transfer, so try clearing them and logging
* the occurrence.
*/
if (gintsts & GINTSTS_GOUTNAKEFF) {
u8 idx;
u32 epctrl;
u32 gintmsk;
u32 daintmsk;
struct dwc2_hsotg_ep *hs_ep;
daintmsk = dwc2_readl(hsotg, DAINTMSK);
daintmsk >>= DAINT_OUTEP_SHIFT;
/* Mask this interrupt */
gintmsk = dwc2_readl(hsotg, GINTMSK);
gintmsk &= ~GINTSTS_GOUTNAKEFF;
dwc2_writel(hsotg, gintmsk, GINTMSK);
dev_dbg(hsotg->dev, "GOUTNakEff triggered\n");
for (idx = 1; idx < hsotg->num_of_eps; idx++) {
hs_ep = hsotg->eps_out[idx];
/* Proceed only unmasked ISOC EPs */
if (BIT(idx) & ~daintmsk)
continue;
epctrl = dwc2_readl(hsotg, DOEPCTL(idx));
//ISOC Ep's only
if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous) {
epctrl |= DXEPCTL_SNAK;
epctrl |= DXEPCTL_EPDIS;
dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
continue;
}
//Non-ISOC EP's
if (hs_ep->halted) {
if (!(epctrl & DXEPCTL_EPENA))
epctrl |= DXEPCTL_EPENA;
epctrl |= DXEPCTL_EPDIS;
epctrl |= DXEPCTL_STALL;
dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
}
}
/* This interrupt bit is cleared in DXEPINT_EPDISBLD handler */
}
if (gintsts & GINTSTS_GINNAKEFF) {
dev_info(hsotg->dev, "GINNakEff triggered\n");
dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK);
dwc2_hsotg_dump(hsotg);
}
if (gintsts & GINTSTS_INCOMPL_SOIN)
dwc2_gadget_handle_incomplete_isoc_in(hsotg);
if (gintsts & GINTSTS_INCOMPL_SOOUT)
dwc2_gadget_handle_incomplete_isoc_out(hsotg);
/*
* if we've had fifo events, we should try and go around the
* loop again to see if there's any point in returning yet.
*/
if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
goto irq_retry;
/* Check WKUP_ALERT interrupt*/
if (hsotg->params.service_interval)
dwc2_gadget_wkup_alert_handler(hsotg);
spin_unlock(&hsotg->lock);
return IRQ_HANDLED;
}
static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *hs_ep)
{
u32 epctrl_reg;
u32 epint_reg;
epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) :
DOEPCTL(hs_ep->index);
epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) :
DOEPINT(hs_ep->index);
dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__,
hs_ep->name);
if (hs_ep->dir_in) {
if (hsotg->dedicated_fifos || hs_ep->periodic) {
dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_SNAK);
/* Wait for Nak effect */
if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg,
DXEPINT_INEPNAKEFF, 100))
dev_warn(hsotg->dev,
"%s: timeout DIEPINT.NAKEFF\n",
__func__);
} else {
dwc2_set_bit(hsotg, DCTL, DCTL_SGNPINNAK);
/* Wait for Nak effect */
if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
GINTSTS_GINNAKEFF, 100))
dev_warn(hsotg->dev,
"%s: timeout GINTSTS.GINNAKEFF\n",
__func__);
}
} else {
/* Mask GINTSTS_GOUTNAKEFF interrupt */
dwc2_hsotg_disable_gsint(hsotg, GINTSTS_GOUTNAKEFF);
if (!(dwc2_readl(hsotg, GINTSTS) & GINTSTS_GOUTNAKEFF))
dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
if (!using_dma(hsotg)) {
/* Wait for GINTSTS_RXFLVL interrupt */
if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
GINTSTS_RXFLVL, 100)) {
dev_warn(hsotg->dev, "%s: timeout GINTSTS.RXFLVL\n",
__func__);
} else {
/*
* Pop GLOBAL OUT NAK status packet from RxFIFO
* to assert GOUTNAKEFF interrupt
*/
dwc2_readl(hsotg, GRXSTSP);
}
}
/* Wait for global nak to take effect */
if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
GINTSTS_GOUTNAKEFF, 100))
dev_warn(hsotg->dev, "%s: timeout GINTSTS.GOUTNAKEFF\n",
__func__);
}
/* Disable ep */
dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK);
/* Wait for ep to be disabled */
if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100))
dev_warn(hsotg->dev,
"%s: timeout DOEPCTL.EPDisable\n", __func__);
/* Clear EPDISBLD interrupt */
dwc2_set_bit(hsotg, epint_reg, DXEPINT_EPDISBLD);
if (hs_ep->dir_in) {
unsigned short fifo_index;
if (hsotg->dedicated_fifos || hs_ep->periodic)
fifo_index = hs_ep->fifo_index;
else
fifo_index = 0;
/* Flush TX FIFO */
dwc2_flush_tx_fifo(hsotg, fifo_index);
/* Clear Global In NP NAK in Shared FIFO for non periodic ep */
if (!hsotg->dedicated_fifos && !hs_ep->periodic)
dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK);
} else {
/* Remove global NAKs */
dwc2_set_bit(hsotg, DCTL, DCTL_CGOUTNAK);
}
}
/**
* dwc2_hsotg_ep_enable - enable the given endpoint
* @ep: The USB endpint to configure
* @desc: The USB endpoint descriptor to configure with.
*
* This is called from the USB gadget code's usb_ep_enable().
*/
static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
struct dwc2_hsotg *hsotg = hs_ep->parent;
unsigned long flags;
unsigned int index = hs_ep->index;
u32 epctrl_reg;
u32 epctrl;
u32 mps;
u32 mc;
u32 mask;
unsigned int dir_in;
unsigned int i, val, size;
int ret = 0;
unsigned char ep_type;
int desc_num;
dev_dbg(hsotg->dev,
"%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
__func__, ep->name, desc->bEndpointAddress, desc->bmAttributes,
desc->wMaxPacketSize, desc->bInterval);
/* not to be called for EP0 */
if (index == 0) {
dev_err(hsotg->dev, "%s: called for EP 0\n", __func__);
return -EINVAL;
}
dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
if (dir_in != hs_ep->dir_in) {
dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__);
return -EINVAL;
}
ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
mps = usb_endpoint_maxp(desc);
mc = usb_endpoint_maxp_mult(desc);
/* ISOC IN in DDMA supported bInterval up to 10 */
if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC &&
dir_in && desc->bInterval > 10) {
dev_err(hsotg->dev,
"%s: ISOC IN, DDMA: bInterval>10 not supported!\n", __func__);
return -EINVAL;
}
/* High bandwidth ISOC OUT in DDMA not supported */
if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC &&
!dir_in && mc > 1) {
dev_err(hsotg->dev,
"%s: ISOC OUT, DDMA: HB not supported!\n", __func__);
return -EINVAL;
}
/* note, we handle this here instead of dwc2_hsotg_set_ep_maxpacket */
epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
epctrl = dwc2_readl(hsotg, epctrl_reg);
dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
__func__, epctrl, epctrl_reg);
if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC)
desc_num = MAX_DMA_DESC_NUM_HS_ISOC;
else
desc_num = MAX_DMA_DESC_NUM_GENERIC;
/* Allocate DMA descriptor chain for non-ctrl endpoints */
if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
desc_num * sizeof(struct dwc2_dma_desc),
&hs_ep->desc_list_dma, GFP_ATOMIC);
if (!hs_ep->desc_list) {
ret = -ENOMEM;
goto error2;
}
}
spin_lock_irqsave(&hsotg->lock, flags);
epctrl &= ~(DXEPCTL_EPTYPE_MASK | DXEPCTL_MPS_MASK);
epctrl |= DXEPCTL_MPS(mps);
/*
* mark the endpoint as active, otherwise the core may ignore
* transactions entirely for this endpoint
*/
epctrl |= DXEPCTL_USBACTEP;
/* update the endpoint state */
dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, mc, dir_in);
/* default, set to non-periodic */
hs_ep->isochronous = 0;
hs_ep->periodic = 0;
hs_ep->halted = 0;
hs_ep->wedged = 0;
hs_ep->interval = desc->bInterval;
switch (ep_type) {
case USB_ENDPOINT_XFER_ISOC:
epctrl |= DXEPCTL_EPTYPE_ISO;
epctrl |= DXEPCTL_SETEVENFR;
hs_ep->isochronous = 1;
hs_ep->interval = 1 << (desc->bInterval - 1);
hs_ep->target_frame = TARGET_FRAME_INITIAL;
hs_ep->next_desc = 0;
hs_ep->compl_desc = 0;
if (dir_in) {
hs_ep->periodic = 1;
mask = dwc2_readl(hsotg, DIEPMSK);
mask |= DIEPMSK_NAKMSK;
dwc2_writel(hsotg, mask, DIEPMSK);
} else {
epctrl |= DXEPCTL_SNAK;
mask = dwc2_readl(hsotg, DOEPMSK);
mask |= DOEPMSK_OUTTKNEPDISMSK;
dwc2_writel(hsotg, mask, DOEPMSK);
}
break;
case USB_ENDPOINT_XFER_BULK:
epctrl |= DXEPCTL_EPTYPE_BULK;
break;
case USB_ENDPOINT_XFER_INT:
if (dir_in)
hs_ep->periodic = 1;
if (hsotg->gadget.speed == USB_SPEED_HIGH)
hs_ep->interval = 1 << (desc->bInterval - 1);
epctrl |= DXEPCTL_EPTYPE_INTERRUPT;
break;
case USB_ENDPOINT_XFER_CONTROL:
epctrl |= DXEPCTL_EPTYPE_CONTROL;
break;
}
/*
* if the hardware has dedicated fifos, we must give each IN EP
* a unique tx-fifo even if it is non-periodic.
*/
if (dir_in && hsotg->dedicated_fifos) {
unsigned fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
u32 fifo_index = 0;
u32 fifo_size = UINT_MAX;
size = hs_ep->ep.maxpacket * hs_ep->mc;
for (i = 1; i <= fifo_count; ++i) {
if (hsotg->fifo_map & (1 << i))
continue;
val = dwc2_readl(hsotg, DPTXFSIZN(i));
val = (val >> FIFOSIZE_DEPTH_SHIFT) * 4;
if (val < size)
continue;
/* Search for smallest acceptable fifo */
if (val < fifo_size) {
fifo_size = val;
fifo_index = i;
}
}
if (!fifo_index) {
dev_err(hsotg->dev,
"%s: No suitable fifo found\n", __func__);
ret = -ENOMEM;
goto error1;
}
epctrl &= ~(DXEPCTL_TXFNUM_LIMIT << DXEPCTL_TXFNUM_SHIFT);
hsotg->fifo_map |= 1 << fifo_index;
epctrl |= DXEPCTL_TXFNUM(fifo_index);
hs_ep->fifo_index = fifo_index;
hs_ep->fifo_size = fifo_size;
}
/* for non control endpoints, set PID to D0 */
if (index && !hs_ep->isochronous)
epctrl |= DXEPCTL_SETD0PID;
/* WA for Full speed ISOC IN in DDMA mode.
* By Clear NAK status of EP, core will send ZLP
* to IN token and assert NAK interrupt relying
* on TxFIFO status only
*/
if (hsotg->gadget.speed == USB_SPEED_FULL &&
hs_ep->isochronous && dir_in) {
/* The WA applies only to core versions from 2.72a
* to 4.00a (including both). Also for FS_IOT_1.00a
* and HS_IOT_1.00a.
*/
u32 gsnpsid = dwc2_readl(hsotg, GSNPSID);
if ((gsnpsid >= DWC2_CORE_REV_2_72a &&
gsnpsid <= DWC2_CORE_REV_4_00a) ||
gsnpsid == DWC2_FS_IOT_REV_1_00a ||
gsnpsid == DWC2_HS_IOT_REV_1_00a)
epctrl |= DXEPCTL_CNAK;
}
dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
__func__, epctrl);
dwc2_writel(hsotg, epctrl, epctrl_reg);
dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",
__func__, dwc2_readl(hsotg, epctrl_reg));
/* enable the endpoint interrupt */
dwc2_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
error1:
spin_unlock_irqrestore(&hsotg->lock, flags);
error2:
if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
dmam_free_coherent(hsotg->dev, desc_num *
sizeof(struct dwc2_dma_desc),
hs_ep->desc_list, hs_ep->desc_list_dma);
hs_ep->desc_list = NULL;
}
return ret;
}
/**
* dwc2_hsotg_ep_disable - disable given endpoint
* @ep: The endpoint to disable.
*/
static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
{
struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
struct dwc2_hsotg *hsotg = hs_ep->parent;
int dir_in = hs_ep->dir_in;
int index = hs_ep->index;
u32 epctrl_reg;
u32 ctrl;
dev_dbg(hsotg->dev, "%s(ep %p)\n", __func__, ep);
if (ep == &hsotg->eps_out[0]->ep) {
dev_err(hsotg->dev, "%s: called for ep0\n", __func__);
return -EINVAL;
}
if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
dev_err(hsotg->dev, "%s: called in host mode?\n", __func__);
return -EINVAL;
}
epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
ctrl = dwc2_readl(hsotg, epctrl_reg);
if (ctrl & DXEPCTL_EPENA)
dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
ctrl &= ~DXEPCTL_EPENA;
ctrl &= ~DXEPCTL_USBACTEP;
ctrl |= DXEPCTL_SNAK;
dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
dwc2_writel(hsotg, ctrl, epctrl_reg);
/* disable endpoint interrupts */
dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
/* terminate all requests with shutdown */
kill_all_requests(hsotg, hs_ep, -ESHUTDOWN);
hsotg->fifo_map &= ~(1 << hs_ep->fifo_index);
hs_ep->fifo_index = 0;
hs_ep->fifo_size = 0;
return 0;
}
static int dwc2_hsotg_ep_disable_lock(struct usb_ep *ep)
{
struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
struct dwc2_hsotg *hsotg = hs_ep->parent;
unsigned long flags;
int ret;
spin_lock_irqsave(&hsotg->lock, flags);
ret = dwc2_hsotg_ep_disable(ep);
spin_unlock_irqrestore(&hsotg->lock, flags);
return ret;
}
/**
* on_list - check request is on the given endpoint
* @ep: The endpoint to check.
* @test: The request to test if it is on the endpoint.
*/
static bool on_list(struct dwc2_hsotg_ep *ep, struct dwc2_hsotg_req *test)
{
struct dwc2_hsotg_req *req, *treq;
list_for_each_entry_safe(req, treq, &ep->queue, queue) {
if (req == test)
return true;
}
return false;
}
/**
* dwc2_hsotg_ep_dequeue - dequeue given endpoint
* @ep: The endpoint to dequeue.
* @req: The request to be removed from a queue.
*/
static int dwc2_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
{
struct dwc2_hsotg_req *hs_req = our_req(req);
struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
struct dwc2_hsotg *hs = hs_ep->parent;
unsigned long flags;
dev_dbg(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
spin_lock_irqsave(&hs->lock, flags);
if (!on_list(hs_ep, hs_req)) {
spin_unlock_irqrestore(&hs->lock, flags);
return -EINVAL;
}
/* Dequeue already started request */
if (req == &hs_ep->req->req)
dwc2_hsotg_ep_stop_xfr(hs, hs_ep);
dwc2_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);
spin_unlock_irqrestore(&hs->lock, flags);
return 0;
}
/**
* dwc2_gadget_ep_set_wedge - set wedge on a given endpoint
* @ep: The endpoint to be wedged.
*
*/
static int dwc2_gadget_ep_set_wedge(struct usb_ep *ep)
{
struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
struct dwc2_hsotg *hs = hs_ep->parent;
unsigned long flags;
int ret;
spin_lock_irqsave(&hs->lock, flags);
hs_ep->wedged = 1;
ret = dwc2_hsotg_ep_sethalt(ep, 1, false);
spin_unlock_irqrestore(&hs->lock, flags);
return ret;
}
/**
* dwc2_hsotg_ep_sethalt - set halt on a given endpoint
* @ep: The endpoint to set halt.
* @value: Set or unset the halt.
* @now: If true, stall the endpoint now. Otherwise return -EAGAIN if
* the endpoint is busy processing requests.
*
* We need to stall the endpoint immediately if request comes from set_feature
* protocol command handler.
*/
static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
{
struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
struct dwc2_hsotg *hs = hs_ep->parent;
int index = hs_ep->index;
u32 epreg;
u32 epctl;
u32 xfertype;
dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);
if (index == 0) {
if (value)
dwc2_hsotg_stall_ep0(hs);
else
dev_warn(hs->dev,
"%s: can't clear halt on ep0\n", __func__);
return 0;
}
if (hs_ep->isochronous) {
dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name);
return -EINVAL;
}
if (!now && value && !list_empty(&hs_ep->queue)) {
dev_dbg(hs->dev, "%s request is pending, cannot halt\n",
ep->name);
return -EAGAIN;
}
if (hs_ep->dir_in) {
epreg = DIEPCTL(index);
epctl = dwc2_readl(hs, epreg);
if (value) {
epctl |= DXEPCTL_STALL | DXEPCTL_SNAK;
if (epctl & DXEPCTL_EPENA)
epctl |= DXEPCTL_EPDIS;
} else {
epctl &= ~DXEPCTL_STALL;
hs_ep->wedged = 0;
xfertype = epctl & DXEPCTL_EPTYPE_MASK;
if (xfertype == DXEPCTL_EPTYPE_BULK ||
xfertype == DXEPCTL_EPTYPE_INTERRUPT)
epctl |= DXEPCTL_SETD0PID;
}
dwc2_writel(hs, epctl, epreg);
} else {
epreg = DOEPCTL(index);
epctl = dwc2_readl(hs, epreg);
if (value) {
/* Unmask GOUTNAKEFF interrupt */
dwc2_hsotg_en_gsint(hs, GINTSTS_GOUTNAKEFF);
if (!(dwc2_readl(hs, GINTSTS) & GINTSTS_GOUTNAKEFF))
dwc2_set_bit(hs, DCTL, DCTL_SGOUTNAK);
// STALL bit will be set in GOUTNAKEFF interrupt handler
} else {
epctl &= ~DXEPCTL_STALL;
hs_ep->wedged = 0;
xfertype = epctl & DXEPCTL_EPTYPE_MASK;
if (xfertype == DXEPCTL_EPTYPE_BULK ||
xfertype == DXEPCTL_EPTYPE_INTERRUPT)
epctl |= DXEPCTL_SETD0PID;
dwc2_writel(hs, epctl, epreg);
}
}
hs_ep->halted = value;
return 0;
}
/**
* dwc2_hsotg_ep_sethalt_lock - set halt on a given endpoint with lock held
* @ep: The endpoint to set halt.
* @value: Set or unset the halt.
*/
static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
{
struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
struct dwc2_hsotg *hs = hs_ep->parent;
unsigned long flags;
int ret;
spin_lock_irqsave(&hs->lock, flags);
ret = dwc2_hsotg_ep_sethalt(ep, value, false);
spin_unlock_irqrestore(&hs->lock, flags);
return ret;
}
static const struct usb_ep_ops dwc2_hsotg_ep_ops = {
.enable = dwc2_hsotg_ep_enable,
.disable = dwc2_hsotg_ep_disable_lock,
.alloc_request = dwc2_hsotg_ep_alloc_request,
.free_request = dwc2_hsotg_ep_free_request,
.queue = dwc2_hsotg_ep_queue_lock,
.dequeue = dwc2_hsotg_ep_dequeue,
.set_halt = dwc2_hsotg_ep_sethalt_lock,
.set_wedge = dwc2_gadget_ep_set_wedge,
/* note, don't believe we have any call for the fifo routines */
};
/**
* dwc2_hsotg_init - initialize the usb core
* @hsotg: The driver state
*/
static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
{
/* unmask subset of endpoint interrupts */
dwc2_writel(hsotg, DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK,
DIEPMSK);
dwc2_writel(hsotg, DOEPMSK_SETUPMSK | DOEPMSK_AHBERRMSK |
DOEPMSK_EPDISBLDMSK | DOEPMSK_XFERCOMPLMSK,
DOEPMSK);
dwc2_writel(hsotg, 0, DAINTMSK);
/* Be in disconnected state until gadget is registered */
dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
/* setup fifos */
dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
dwc2_readl(hsotg, GRXFSIZ),
dwc2_readl(hsotg, GNPTXFSIZ));
dwc2_hsotg_init_fifo(hsotg);
if (using_dma(hsotg))
dwc2_set_bit(hsotg, GAHBCFG, GAHBCFG_DMA_EN);
}
/**
* dwc2_hsotg_udc_start - prepare the udc for work
* @gadget: The usb gadget state
* @driver: The usb gadget driver
*
* Perform initialization to prepare udc device and driver
* to work.
*/
static int dwc2_hsotg_udc_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct dwc2_hsotg *hsotg = to_hsotg(gadget);
unsigned long flags;
int ret;
if (!hsotg) {
pr_err("%s: called with no device\n", __func__);
return -ENODEV;
}
if (!driver) {
dev_err(hsotg->dev, "%s: no driver\n", __func__);
return -EINVAL;
}
if (driver->max_speed < USB_SPEED_FULL)
dev_err(hsotg->dev, "%s: bad speed\n", __func__);
if (!driver->setup) {
dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
return -EINVAL;
}
WARN_ON(hsotg->driver);
hsotg->driver = driver;
hsotg->gadget.dev.of_node = hsotg->dev->of_node;
hsotg->gadget.speed = USB_SPEED_UNKNOWN;
if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
ret = dwc2_lowlevel_hw_enable(hsotg);
if (ret)
goto err;
}
if (!IS_ERR_OR_NULL(hsotg->uphy))
otg_set_peripheral(hsotg->uphy->otg, &hsotg->gadget);
spin_lock_irqsave(&hsotg->lock, flags);
if (dwc2_hw_is_device(hsotg)) {
dwc2_hsotg_init(hsotg);
dwc2_hsotg_core_init_disconnected(hsotg, false);
}
hsotg->enabled = 0;
spin_unlock_irqrestore(&hsotg->lock, flags);
gadget->sg_supported = using_desc_dma(hsotg);
dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
return 0;
err:
hsotg->driver = NULL;
return ret;
}
/**
* dwc2_hsotg_udc_stop - stop the udc
* @gadget: The usb gadget state
*
* Stop udc hw block and stay tunned for future transmissions
*/
static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
{
struct dwc2_hsotg *hsotg = to_hsotg(gadget);
unsigned long flags;
int ep;
if (!hsotg)
return -ENODEV;
/* all endpoints should be shutdown */
for (ep = 1; ep < hsotg->num_of_eps; ep++) {
if (hsotg->eps_in[ep])
dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
if (hsotg->eps_out[ep])
dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
}
spin_lock_irqsave(&hsotg->lock, flags);
hsotg->driver = NULL;
hsotg->gadget.speed = USB_SPEED_UNKNOWN;
hsotg->enabled = 0;
spin_unlock_irqrestore(&hsotg->lock, flags);
if (!IS_ERR_OR_NULL(hsotg->uphy))
otg_set_peripheral(hsotg->uphy->otg, NULL);
if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
dwc2_lowlevel_hw_disable(hsotg);
return 0;
}
/**
* dwc2_hsotg_gadget_getframe - read the frame number
* @gadget: The usb gadget state
*
* Read the {micro} frame number
*/
static int dwc2_hsotg_gadget_getframe(struct usb_gadget *gadget)
{
return dwc2_hsotg_read_frameno(to_hsotg(gadget));
}
/**
* dwc2_hsotg_set_selfpowered - set if device is self/bus powered
* @gadget: The usb gadget state
* @is_selfpowered: Whether the device is self-powered
*
* Set if the device is self or bus powered.
*/
static int dwc2_hsotg_set_selfpowered(struct usb_gadget *gadget,
int is_selfpowered)
{
struct dwc2_hsotg *hsotg = to_hsotg(gadget);
unsigned long flags;
spin_lock_irqsave(&hsotg->lock, flags);
gadget->is_selfpowered = !!is_selfpowered;
spin_unlock_irqrestore(&hsotg->lock, flags);
return 0;
}
/**
* dwc2_hsotg_pullup - connect/disconnect the USB PHY
* @gadget: The usb gadget state
* @is_on: Current state of the USB PHY
*
* Connect/Disconnect the USB PHY pullup
*/
static int dwc2_hsotg_pullup(struct usb_gadget *gadget, int is_on)
{
struct dwc2_hsotg *hsotg = to_hsotg(gadget);
unsigned long flags;
dev_dbg(hsotg->dev, "%s: is_on: %d op_state: %d\n", __func__, is_on,
hsotg->op_state);
/* Don't modify pullup state while in host mode */
if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
hsotg->enabled = is_on;
return 0;
}
spin_lock_irqsave(&hsotg->lock, flags);
if (is_on) {
hsotg->enabled = 1;
dwc2_hsotg_core_init_disconnected(hsotg, false);
/* Enable ACG feature in device mode,if supported */
dwc2_enable_acg(hsotg);
dwc2_hsotg_core_connect(hsotg);
} else {
dwc2_hsotg_core_disconnect(hsotg);
dwc2_hsotg_disconnect(hsotg);
hsotg->enabled = 0;
}
hsotg->gadget.speed = USB_SPEED_UNKNOWN;
spin_unlock_irqrestore(&hsotg->lock, flags);
return 0;
}
static int dwc2_hsotg_vbus_session(struct usb_gadget *gadget, int is_active)
{
struct dwc2_hsotg *hsotg = to_hsotg(gadget);
unsigned long flags;
dev_dbg(hsotg->dev, "%s: is_active: %d\n", __func__, is_active);
spin_lock_irqsave(&hsotg->lock, flags);
/*
* If controller is in partial power down state, it must exit from
* that state before being initialized / de-initialized
*/
if (hsotg->lx_state == DWC2_L2 && hsotg->in_ppd)
/*
* No need to check the return value as
* registers are not being restored.
*/
dwc2_exit_partial_power_down(hsotg, 0, false);
if (is_active) {
hsotg->op_state = OTG_STATE_B_PERIPHERAL;
dwc2_hsotg_core_init_disconnected(hsotg, false);
if (hsotg->enabled) {
/* Enable ACG feature in device mode,if supported */
dwc2_enable_acg(hsotg);
dwc2_hsotg_core_connect(hsotg);
}
} else {
dwc2_hsotg_core_disconnect(hsotg);
dwc2_hsotg_disconnect(hsotg);
}
spin_unlock_irqrestore(&hsotg->lock, flags);
return 0;
}
/**
* dwc2_hsotg_vbus_draw - report bMaxPower field
* @gadget: The usb gadget state
* @mA: Amount of current
*
* Report how much power the device may consume to the phy.
*/
static int dwc2_hsotg_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
{
struct dwc2_hsotg *hsotg = to_hsotg(gadget);
if (IS_ERR_OR_NULL(hsotg->uphy))
return -ENOTSUPP;
return usb_phy_set_power(hsotg->uphy, mA);
}
static void dwc2_gadget_set_speed(struct usb_gadget *g, enum usb_device_speed speed)
{
struct dwc2_hsotg *hsotg = to_hsotg(g);
unsigned long flags;
spin_lock_irqsave(&hsotg->lock, flags);
switch (speed) {
case USB_SPEED_HIGH:
hsotg->params.speed = DWC2_SPEED_PARAM_HIGH;
break;
case USB_SPEED_FULL:
hsotg->params.speed = DWC2_SPEED_PARAM_FULL;
break;
case USB_SPEED_LOW:
hsotg->params.speed = DWC2_SPEED_PARAM_LOW;
break;
default:
dev_err(hsotg->dev, "invalid speed (%d)\n", speed);
}
spin_unlock_irqrestore(&hsotg->lock, flags);
}
static const struct usb_gadget_ops dwc2_hsotg_gadget_ops = {
.get_frame = dwc2_hsotg_gadget_getframe,
.set_selfpowered = dwc2_hsotg_set_selfpowered,
.udc_start = dwc2_hsotg_udc_start,
.udc_stop = dwc2_hsotg_udc_stop,
.pullup = dwc2_hsotg_pullup,
.udc_set_speed = dwc2_gadget_set_speed,
.vbus_session = dwc2_hsotg_vbus_session,
.vbus_draw = dwc2_hsotg_vbus_draw,
};
/**
* dwc2_hsotg_initep - initialise a single endpoint
* @hsotg: The device state.
* @hs_ep: The endpoint to be initialised.
* @epnum: The endpoint number
* @dir_in: True if direction is in.
*
* Initialise the given endpoint (as part of the probe and device state
* creation) to give to the gadget driver. Setup the endpoint name, any
* direction information and other state that may be required.
*/
static void dwc2_hsotg_initep(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *hs_ep,
int epnum,
bool dir_in)
{
char *dir;
if (epnum == 0)
dir = "";
else if (dir_in)
dir = "in";
else
dir = "out";
hs_ep->dir_in = dir_in;
hs_ep->index = epnum;
snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);
INIT_LIST_HEAD(&hs_ep->queue);
INIT_LIST_HEAD(&hs_ep->ep.ep_list);
/* add to the list of endpoints known by the gadget driver */
if (epnum)
list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list);
hs_ep->parent = hsotg;
hs_ep->ep.name = hs_ep->name;
if (hsotg->params.speed == DWC2_SPEED_PARAM_LOW)
usb_ep_set_maxpacket_limit(&hs_ep->ep, 8);
else
usb_ep_set_maxpacket_limit(&hs_ep->ep,
epnum ? 1024 : EP0_MPS_LIMIT);
hs_ep->ep.ops = &dwc2_hsotg_ep_ops;
if (epnum == 0) {
hs_ep->ep.caps.type_control = true;
} else {
if (hsotg->params.speed != DWC2_SPEED_PARAM_LOW) {
hs_ep->ep.caps.type_iso = true;
hs_ep->ep.caps.type_bulk = true;
}
hs_ep->ep.caps.type_int = true;
}
if (dir_in)
hs_ep->ep.caps.dir_in = true;
else
hs_ep->ep.caps.dir_out = true;
/*
* if we're using dma, we need to set the next-endpoint pointer
* to be something valid.
*/
if (using_dma(hsotg)) {
u32 next = DXEPCTL_NEXTEP((epnum + 1) % 15);
if (dir_in)
dwc2_writel(hsotg, next, DIEPCTL(epnum));
else
dwc2_writel(hsotg, next, DOEPCTL(epnum));
}
}
/**
* dwc2_hsotg_hw_cfg - read HW configuration registers
* @hsotg: Programming view of the DWC_otg controller
*
* Read the USB core HW configuration registers
*/
static int dwc2_hsotg_hw_cfg(struct dwc2_hsotg *hsotg)
{
u32 cfg;
u32 ep_type;
u32 i;
/* check hardware configuration */
hsotg->num_of_eps = hsotg->hw_params.num_dev_ep;
/* Add ep0 */
hsotg->num_of_eps++;
hsotg->eps_in[0] = devm_kzalloc(hsotg->dev,
sizeof(struct dwc2_hsotg_ep),
GFP_KERNEL);
if (!hsotg->eps_in[0])
return -ENOMEM;
/* Same dwc2_hsotg_ep is used in both directions for ep0 */
hsotg->eps_out[0] = hsotg->eps_in[0];
cfg = hsotg->hw_params.dev_ep_dirs;
for (i = 1, cfg >>= 2; i < hsotg->num_of_eps; i++, cfg >>= 2) {
ep_type = cfg & 3;
/* Direction in or both */
if (!(ep_type & 2)) {
hsotg->eps_in[i] = devm_kzalloc(hsotg->dev,
sizeof(struct dwc2_hsotg_ep), GFP_KERNEL);
if (!hsotg->eps_in[i])
return -ENOMEM;
}
/* Direction out or both */
if (!(ep_type & 1)) {
hsotg->eps_out[i] = devm_kzalloc(hsotg->dev,
sizeof(struct dwc2_hsotg_ep), GFP_KERNEL);
if (!hsotg->eps_out[i])
return -ENOMEM;
}
}
hsotg->fifo_mem = hsotg->hw_params.total_fifo_size;
hsotg->dedicated_fifos = hsotg->hw_params.en_multiple_tx_fifo;
dev_info(hsotg->dev, "EPs: %d, %s fifos, %d entries in SPRAM\n",
hsotg->num_of_eps,
hsotg->dedicated_fifos ? "dedicated" : "shared",
hsotg->fifo_mem);
return 0;
}
/**
* dwc2_hsotg_dump - dump state of the udc
* @hsotg: Programming view of the DWC_otg controller
*
*/
static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg)
{
#ifdef DEBUG
struct device *dev = hsotg->dev;
u32 val;
int idx;
dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",
dwc2_readl(hsotg, DCFG), dwc2_readl(hsotg, DCTL),
dwc2_readl(hsotg, DIEPMSK));
dev_info(dev, "GAHBCFG=0x%08x, GHWCFG1=0x%08x\n",
dwc2_readl(hsotg, GAHBCFG), dwc2_readl(hsotg, GHWCFG1));
dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
dwc2_readl(hsotg, GRXFSIZ), dwc2_readl(hsotg, GNPTXFSIZ));
/* show periodic fifo settings */
for (idx = 1; idx < hsotg->num_of_eps; idx++) {
val = dwc2_readl(hsotg, DPTXFSIZN(idx));
dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,
val >> FIFOSIZE_DEPTH_SHIFT,
val & FIFOSIZE_STARTADDR_MASK);
}
for (idx = 0; idx < hsotg->num_of_eps; idx++) {
dev_info(dev,
"ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
dwc2_readl(hsotg, DIEPCTL(idx)),
dwc2_readl(hsotg, DIEPTSIZ(idx)),
dwc2_readl(hsotg, DIEPDMA(idx)));
val = dwc2_readl(hsotg, DOEPCTL(idx));
dev_info(dev,
"ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
idx, dwc2_readl(hsotg, DOEPCTL(idx)),
dwc2_readl(hsotg, DOEPTSIZ(idx)),
dwc2_readl(hsotg, DOEPDMA(idx)));
}
dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
dwc2_readl(hsotg, DVBUSDIS), dwc2_readl(hsotg, DVBUSPULSE));
#endif
}
/**
* dwc2_gadget_init - init function for gadget
* @hsotg: Programming view of the DWC_otg controller
*
*/
int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
{
struct device *dev = hsotg->dev;
int epnum;
int ret;
/* Dump fifo information */
dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n",
hsotg->params.g_np_tx_fifo_size);
dev_dbg(dev, "RXFIFO size: %d\n", hsotg->params.g_rx_fifo_size);
switch (hsotg->params.speed) {
case DWC2_SPEED_PARAM_LOW:
hsotg->gadget.max_speed = USB_SPEED_LOW;
break;
case DWC2_SPEED_PARAM_FULL:
hsotg->gadget.max_speed = USB_SPEED_FULL;
break;
default:
hsotg->gadget.max_speed = USB_SPEED_HIGH;
break;
}
hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
hsotg->gadget.name = dev_name(dev);
hsotg->gadget.otg_caps = &hsotg->params.otg_caps;
hsotg->remote_wakeup_allowed = 0;
if (hsotg->params.lpm)
hsotg->gadget.lpm_capable = true;
if (hsotg->dr_mode == USB_DR_MODE_OTG)
hsotg->gadget.is_otg = 1;
else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
hsotg->op_state = OTG_STATE_B_PERIPHERAL;
ret = dwc2_hsotg_hw_cfg(hsotg);
if (ret) {
dev_err(hsotg->dev, "Hardware configuration failed: %d\n", ret);
return ret;
}
hsotg->ctrl_buff = devm_kzalloc(hsotg->dev,
DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
if (!hsotg->ctrl_buff)
return -ENOMEM;
hsotg->ep0_buff = devm_kzalloc(hsotg->dev,
DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
if (!hsotg->ep0_buff)
return -ENOMEM;
if (using_desc_dma(hsotg)) {
ret = dwc2_gadget_alloc_ctrl_desc_chains(hsotg);
if (ret < 0)
return ret;
}
ret = devm_request_irq(hsotg->dev, hsotg->irq, dwc2_hsotg_irq,
IRQF_SHARED, dev_name(hsotg->dev), hsotg);
if (ret < 0) {
dev_err(dev, "cannot claim IRQ for gadget\n");
return ret;
}
/* hsotg->num_of_eps holds number of EPs other than ep0 */
if (hsotg->num_of_eps == 0) {
dev_err(dev, "wrong number of EPs (zero)\n");
return -EINVAL;
}
/* setup endpoint information */
INIT_LIST_HEAD(&hsotg->gadget.ep_list);
hsotg->gadget.ep0 = &hsotg->eps_out[0]->ep;
/* allocate EP0 request */
hsotg->ctrl_req = dwc2_hsotg_ep_alloc_request(&hsotg->eps_out[0]->ep,
GFP_KERNEL);
if (!hsotg->ctrl_req) {
dev_err(dev, "failed to allocate ctrl req\n");
return -ENOMEM;
}
/* initialise the endpoints now the core has been initialised */
for (epnum = 0; epnum < hsotg->num_of_eps; epnum++) {
if (hsotg->eps_in[epnum])
dwc2_hsotg_initep(hsotg, hsotg->eps_in[epnum],
epnum, 1);
if (hsotg->eps_out[epnum])
dwc2_hsotg_initep(hsotg, hsotg->eps_out[epnum],
epnum, 0);
}
dwc2_hsotg_dump(hsotg);
return 0;
}
/**
* dwc2_hsotg_remove - remove function for hsotg driver
* @hsotg: Programming view of the DWC_otg controller
*
*/
int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg)
{
usb_del_gadget_udc(&hsotg->gadget);
dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req);
return 0;
}
int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg)
{
unsigned long flags;
if (hsotg->lx_state != DWC2_L0)
return 0;
if (hsotg->driver) {
int ep;
dev_info(hsotg->dev, "suspending usb gadget %s\n",
hsotg->driver->driver.name);
spin_lock_irqsave(&hsotg->lock, flags);
if (hsotg->enabled)
dwc2_hsotg_core_disconnect(hsotg);
dwc2_hsotg_disconnect(hsotg);
hsotg->gadget.speed = USB_SPEED_UNKNOWN;
spin_unlock_irqrestore(&hsotg->lock, flags);
for (ep = 1; ep < hsotg->num_of_eps; ep++) {
if (hsotg->eps_in[ep])
dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
if (hsotg->eps_out[ep])
dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
}
}
return 0;
}
int dwc2_hsotg_resume(struct dwc2_hsotg *hsotg)
{
unsigned long flags;
if (hsotg->lx_state == DWC2_L2)
return 0;
if (hsotg->driver) {
dev_info(hsotg->dev, "resuming usb gadget %s\n",
hsotg->driver->driver.name);
spin_lock_irqsave(&hsotg->lock, flags);
dwc2_hsotg_core_init_disconnected(hsotg, false);
if (hsotg->enabled) {
/* Enable ACG feature in device mode,if supported */
dwc2_enable_acg(hsotg);
dwc2_hsotg_core_connect(hsotg);
}
spin_unlock_irqrestore(&hsotg->lock, flags);
}
return 0;
}
/**
* dwc2_backup_device_registers() - Backup controller device registers.
* When suspending usb bus, registers needs to be backuped
* if controller power is disabled once suspended.
*
* @hsotg: Programming view of the DWC_otg controller
*/
int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
{
struct dwc2_dregs_backup *dr;
int i;
dev_dbg(hsotg->dev, "%s\n", __func__);
/* Backup dev regs */
dr = &hsotg->dr_backup;
dr->dcfg = dwc2_readl(hsotg, DCFG);
dr->dctl = dwc2_readl(hsotg, DCTL);
dr->daintmsk = dwc2_readl(hsotg, DAINTMSK);
dr->diepmsk = dwc2_readl(hsotg, DIEPMSK);
dr->doepmsk = dwc2_readl(hsotg, DOEPMSK);
for (i = 0; i < hsotg->num_of_eps; i++) {
/* Backup IN EPs */
dr->diepctl[i] = dwc2_readl(hsotg, DIEPCTL(i));
/* Ensure DATA PID is correctly configured */
if (dr->diepctl[i] & DXEPCTL_DPID)
dr->diepctl[i] |= DXEPCTL_SETD1PID;
else
dr->diepctl[i] |= DXEPCTL_SETD0PID;
dr->dieptsiz[i] = dwc2_readl(hsotg, DIEPTSIZ(i));
dr->diepdma[i] = dwc2_readl(hsotg, DIEPDMA(i));
/* Backup OUT EPs */
dr->doepctl[i] = dwc2_readl(hsotg, DOEPCTL(i));
/* Ensure DATA PID is correctly configured */
if (dr->doepctl[i] & DXEPCTL_DPID)
dr->doepctl[i] |= DXEPCTL_SETD1PID;
else
dr->doepctl[i] |= DXEPCTL_SETD0PID;
dr->doeptsiz[i] = dwc2_readl(hsotg, DOEPTSIZ(i));
dr->doepdma[i] = dwc2_readl(hsotg, DOEPDMA(i));
dr->dtxfsiz[i] = dwc2_readl(hsotg, DPTXFSIZN(i));
}
dr->valid = true;
return 0;
}
/**
* dwc2_restore_device_registers() - Restore controller device registers.
* When resuming usb bus, device registers needs to be restored
* if controller power were disabled.
*
* @hsotg: Programming view of the DWC_otg controller
* @remote_wakeup: Indicates whether resume is initiated by Device or Host.
*
* Return: 0 if successful, negative error code otherwise
*/
int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup)
{
struct dwc2_dregs_backup *dr;
int i;
dev_dbg(hsotg->dev, "%s\n", __func__);
/* Restore dev regs */
dr = &hsotg->dr_backup;
if (!dr->valid) {
dev_err(hsotg->dev, "%s: no device registers to restore\n",
__func__);
return -EINVAL;
}
dr->valid = false;
if (!remote_wakeup)
dwc2_writel(hsotg, dr->dctl, DCTL);
dwc2_writel(hsotg, dr->daintmsk, DAINTMSK);
dwc2_writel(hsotg, dr->diepmsk, DIEPMSK);
dwc2_writel(hsotg, dr->doepmsk, DOEPMSK);
for (i = 0; i < hsotg->num_of_eps; i++) {
/* Restore IN EPs */
dwc2_writel(hsotg, dr->dieptsiz[i], DIEPTSIZ(i));
dwc2_writel(hsotg, dr->diepdma[i], DIEPDMA(i));
dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i));
/** WA for enabled EPx's IN in DDMA mode. On entering to
* hibernation wrong value read and saved from DIEPDMAx,
* as result BNA interrupt asserted on hibernation exit
* by restoring from saved area.
*/
if (using_desc_dma(hsotg) &&
(dr->diepctl[i] & DXEPCTL_EPENA))
dr->diepdma[i] = hsotg->eps_in[i]->desc_list_dma;
dwc2_writel(hsotg, dr->dtxfsiz[i], DPTXFSIZN(i));
dwc2_writel(hsotg, dr->diepctl[i], DIEPCTL(i));
/* Restore OUT EPs */
dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i));
/* WA for enabled EPx's OUT in DDMA mode. On entering to
* hibernation wrong value read and saved from DOEPDMAx,
* as result BNA interrupt asserted on hibernation exit
* by restoring from saved area.
*/
if (using_desc_dma(hsotg) &&
(dr->doepctl[i] & DXEPCTL_EPENA))
dr->doepdma[i] = hsotg->eps_out[i]->desc_list_dma;
dwc2_writel(hsotg, dr->doepdma[i], DOEPDMA(i));
dwc2_writel(hsotg, dr->doepctl[i], DOEPCTL(i));
}
return 0;
}
/**
* dwc2_gadget_init_lpm - Configure the core to support LPM in device mode
*
* @hsotg: Programming view of DWC_otg controller
*
*/
void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg)
{
u32 val;
if (!hsotg->params.lpm)
return;
val = GLPMCFG_LPMCAP | GLPMCFG_APPL1RES;
val |= hsotg->params.hird_threshold_en ? GLPMCFG_HIRD_THRES_EN : 0;
val |= hsotg->params.lpm_clock_gating ? GLPMCFG_ENBLSLPM : 0;
val |= hsotg->params.hird_threshold << GLPMCFG_HIRD_THRES_SHIFT;
val |= hsotg->params.besl ? GLPMCFG_ENBESL : 0;
val |= GLPMCFG_LPM_REJECT_CTRL_CONTROL;
val |= GLPMCFG_LPM_ACCEPT_CTRL_ISOC;
dwc2_writel(hsotg, val, GLPMCFG);
dev_dbg(hsotg->dev, "GLPMCFG=0x%08x\n", dwc2_readl(hsotg, GLPMCFG));
/* Unmask WKUP_ALERT Interrupt */
if (hsotg->params.service_interval)
dwc2_set_bit(hsotg, GINTMSK2, GINTMSK2_WKUP_ALERT_INT_MSK);
}
/**
* dwc2_gadget_program_ref_clk - Program GREFCLK register in device mode
*
* @hsotg: Programming view of DWC_otg controller
*
*/
void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg)
{
u32 val = 0;
val |= GREFCLK_REF_CLK_MODE;
val |= hsotg->params.ref_clk_per << GREFCLK_REFCLKPER_SHIFT;
val |= hsotg->params.sof_cnt_wkup_alert <<
GREFCLK_SOF_CNT_WKUP_ALERT_SHIFT;
dwc2_writel(hsotg, val, GREFCLK);
dev_dbg(hsotg->dev, "GREFCLK=0x%08x\n", dwc2_readl(hsotg, GREFCLK));
}
/**
* dwc2_gadget_enter_hibernation() - Put controller in Hibernation.
*
* @hsotg: Programming view of the DWC_otg controller
*
* Return non-zero if failed to enter to hibernation.
*/
int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg)
{
u32 gpwrdn;
int ret = 0;
/* Change to L2(suspend) state */
hsotg->lx_state = DWC2_L2;
dev_dbg(hsotg->dev, "Start of hibernation completed\n");
ret = dwc2_backup_global_registers(hsotg);
if (ret) {
dev_err(hsotg->dev, "%s: failed to backup global registers\n",
__func__);
return ret;
}
ret = dwc2_backup_device_registers(hsotg);
if (ret) {
dev_err(hsotg->dev, "%s: failed to backup device registers\n",
__func__);
return ret;
}
gpwrdn = GPWRDN_PWRDNRSTN;
gpwrdn |= GPWRDN_PMUACTV;
dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Set flag to indicate that we are in hibernation */
hsotg->hibernated = 1;
/* Enable interrupts from wake up logic */
gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_PMUINTSEL;
dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Unmask device mode interrupts in GPWRDN */
gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_RST_DET_MSK;
gpwrdn |= GPWRDN_LNSTSCHG_MSK;
gpwrdn |= GPWRDN_STS_CHGINT_MSK;
dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Enable Power Down Clamp */
gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_PWRDNCLMP;
dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Switch off VDD */
gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_PWRDNSWTCH;
dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Save gpwrdn register for further usage if stschng interrupt */
hsotg->gr_backup.gpwrdn = dwc2_readl(hsotg, GPWRDN);
dev_dbg(hsotg->dev, "Hibernation completed\n");
return ret;
}
/**
* dwc2_gadget_exit_hibernation()
* This function is for exiting from Device mode hibernation by host initiated
* resume/reset and device initiated remote-wakeup.
*
* @hsotg: Programming view of the DWC_otg controller
* @rem_wakeup: indicates whether resume is initiated by Device or Host.
* @reset: indicates whether resume is initiated by Reset.
*
* Return non-zero if failed to exit from hibernation.
*/
int dwc2_gadget_exit_hibernation(struct dwc2_hsotg *hsotg,
int rem_wakeup, int reset)
{
u32 pcgcctl;
u32 gpwrdn;
u32 dctl;
int ret = 0;
struct dwc2_gregs_backup *gr;
struct dwc2_dregs_backup *dr;
gr = &hsotg->gr_backup;
dr = &hsotg->dr_backup;
if (!hsotg->hibernated) {
dev_dbg(hsotg->dev, "Already exited from Hibernation\n");
return 1;
}
dev_dbg(hsotg->dev,
"%s: called with rem_wakeup = %d reset = %d\n",
__func__, rem_wakeup, reset);
dwc2_hib_restore_common(hsotg, rem_wakeup, 0);
if (!reset) {
/* Clear all pending interupts */
dwc2_writel(hsotg, 0xffffffff, GINTSTS);
}
/* De-assert Restore */
gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn &= ~GPWRDN_RESTORE;
dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
if (!rem_wakeup) {
pcgcctl = dwc2_readl(hsotg, PCGCTL);
pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
dwc2_writel(hsotg, pcgcctl, PCGCTL);
}
/* Restore GUSBCFG, DCFG and DCTL */
dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
dwc2_writel(hsotg, dr->dcfg, DCFG);
dwc2_writel(hsotg, dr->dctl, DCTL);
/* On USB Reset, reset device address to zero */
if (reset)
dwc2_clear_bit(hsotg, DCFG, DCFG_DEVADDR_MASK);
/* De-assert Wakeup Logic */
gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn &= ~GPWRDN_PMUACTV;
dwc2_writel(hsotg, gpwrdn, GPWRDN);
if (rem_wakeup) {
udelay(10);
/* Start Remote Wakeup Signaling */
dwc2_writel(hsotg, dr->dctl | DCTL_RMTWKUPSIG, DCTL);
} else {
udelay(50);
/* Set Device programming done bit */
dctl = dwc2_readl(hsotg, DCTL);
dctl |= DCTL_PWRONPRGDONE;
dwc2_writel(hsotg, dctl, DCTL);
}
/* Wait for interrupts which must be cleared */
mdelay(2);
/* Clear all pending interupts */
dwc2_writel(hsotg, 0xffffffff, GINTSTS);
/* Restore global registers */
ret = dwc2_restore_global_registers(hsotg);
if (ret) {
dev_err(hsotg->dev, "%s: failed to restore registers\n",
__func__);
return ret;
}
/* Restore device registers */
ret = dwc2_restore_device_registers(hsotg, rem_wakeup);
if (ret) {
dev_err(hsotg->dev, "%s: failed to restore device registers\n",
__func__);
return ret;
}
if (rem_wakeup) {
mdelay(10);
dctl = dwc2_readl(hsotg, DCTL);
dctl &= ~DCTL_RMTWKUPSIG;
dwc2_writel(hsotg, dctl, DCTL);
}
hsotg->hibernated = 0;
hsotg->lx_state = DWC2_L0;
dev_dbg(hsotg->dev, "Hibernation recovery completes here\n");
return ret;
}
/**
* dwc2_gadget_enter_partial_power_down() - Put controller in partial
* power down.
*
* @hsotg: Programming view of the DWC_otg controller
*
* Return: non-zero if failed to enter device partial power down.
*
* This function is for entering device mode partial power down.
*/
int dwc2_gadget_enter_partial_power_down(struct dwc2_hsotg *hsotg)
{
u32 pcgcctl;
int ret = 0;
dev_dbg(hsotg->dev, "Entering device partial power down started.\n");
/* Backup all registers */
ret = dwc2_backup_global_registers(hsotg);
if (ret) {
dev_err(hsotg->dev, "%s: failed to backup global registers\n",
__func__);
return ret;
}
ret = dwc2_backup_device_registers(hsotg);
if (ret) {
dev_err(hsotg->dev, "%s: failed to backup device registers\n",
__func__);
return ret;
}
/*
* Clear any pending interrupts since dwc2 will not be able to
* clear them after entering partial_power_down.
*/
dwc2_writel(hsotg, 0xffffffff, GINTSTS);
/* Put the controller in low power state */
pcgcctl = dwc2_readl(hsotg, PCGCTL);
pcgcctl |= PCGCTL_PWRCLMP;
dwc2_writel(hsotg, pcgcctl, PCGCTL);
udelay(5);
pcgcctl |= PCGCTL_RSTPDWNMODULE;
dwc2_writel(hsotg, pcgcctl, PCGCTL);
udelay(5);
pcgcctl |= PCGCTL_STOPPCLK;
dwc2_writel(hsotg, pcgcctl, PCGCTL);
/* Set in_ppd flag to 1 as here core enters suspend. */
hsotg->in_ppd = 1;
hsotg->lx_state = DWC2_L2;
dev_dbg(hsotg->dev, "Entering device partial power down completed.\n");
return ret;
}
/*
* dwc2_gadget_exit_partial_power_down() - Exit controller from device partial
* power down.
*
* @hsotg: Programming view of the DWC_otg controller
* @restore: indicates whether need to restore the registers or not.
*
* Return: non-zero if failed to exit device partial power down.
*
* This function is for exiting from device mode partial power down.
*/
int dwc2_gadget_exit_partial_power_down(struct dwc2_hsotg *hsotg,
bool restore)
{
u32 pcgcctl;
u32 dctl;
struct dwc2_dregs_backup *dr;
int ret = 0;
dr = &hsotg->dr_backup;
dev_dbg(hsotg->dev, "Exiting device partial Power Down started.\n");
pcgcctl = dwc2_readl(hsotg, PCGCTL);
pcgcctl &= ~PCGCTL_STOPPCLK;
dwc2_writel(hsotg, pcgcctl, PCGCTL);
pcgcctl = dwc2_readl(hsotg, PCGCTL);
pcgcctl &= ~PCGCTL_PWRCLMP;
dwc2_writel(hsotg, pcgcctl, PCGCTL);
pcgcctl = dwc2_readl(hsotg, PCGCTL);
pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
dwc2_writel(hsotg, pcgcctl, PCGCTL);
udelay(100);
if (restore) {
ret = dwc2_restore_global_registers(hsotg);
if (ret) {
dev_err(hsotg->dev, "%s: failed to restore registers\n",
__func__);
return ret;
}
/* Restore DCFG */
dwc2_writel(hsotg, dr->dcfg, DCFG);
ret = dwc2_restore_device_registers(hsotg, 0);
if (ret) {
dev_err(hsotg->dev, "%s: failed to restore device registers\n",
__func__);
return ret;
}
}
/* Set the Power-On Programming done bit */
dctl = dwc2_readl(hsotg, DCTL);
dctl |= DCTL_PWRONPRGDONE;
dwc2_writel(hsotg, dctl, DCTL);
/* Set in_ppd flag to 0 as here core exits from suspend. */
hsotg->in_ppd = 0;
hsotg->lx_state = DWC2_L0;
dev_dbg(hsotg->dev, "Exiting device partial Power Down completed.\n");
return ret;
}
/**
* dwc2_gadget_enter_clock_gating() - Put controller in clock gating.
*
* @hsotg: Programming view of the DWC_otg controller
*
* Return: non-zero if failed to enter device partial power down.
*
* This function is for entering device mode clock gating.
*/
void dwc2_gadget_enter_clock_gating(struct dwc2_hsotg *hsotg)
{
u32 pcgctl;
dev_dbg(hsotg->dev, "Entering device clock gating.\n");
/* Set the Phy Clock bit as suspend is received. */
pcgctl = dwc2_readl(hsotg, PCGCTL);
pcgctl |= PCGCTL_STOPPCLK;
dwc2_writel(hsotg, pcgctl, PCGCTL);
udelay(5);
/* Set the Gate hclk as suspend is received. */
pcgctl = dwc2_readl(hsotg, PCGCTL);
pcgctl |= PCGCTL_GATEHCLK;
dwc2_writel(hsotg, pcgctl, PCGCTL);
udelay(5);
hsotg->lx_state = DWC2_L2;
hsotg->bus_suspended = true;
}
/*
* dwc2_gadget_exit_clock_gating() - Exit controller from device clock gating.
*
* @hsotg: Programming view of the DWC_otg controller
* @rem_wakeup: indicates whether remote wake up is enabled.
*
* This function is for exiting from device mode clock gating.
*/
void dwc2_gadget_exit_clock_gating(struct dwc2_hsotg *hsotg, int rem_wakeup)
{
u32 pcgctl;
u32 dctl;
dev_dbg(hsotg->dev, "Exiting device clock gating.\n");
/* Clear the Gate hclk. */
pcgctl = dwc2_readl(hsotg, PCGCTL);
pcgctl &= ~PCGCTL_GATEHCLK;
dwc2_writel(hsotg, pcgctl, PCGCTL);
udelay(5);
/* Phy Clock bit. */
pcgctl = dwc2_readl(hsotg, PCGCTL);
pcgctl &= ~PCGCTL_STOPPCLK;
dwc2_writel(hsotg, pcgctl, PCGCTL);
udelay(5);
if (rem_wakeup) {
/* Set Remote Wakeup Signaling */
dctl = dwc2_readl(hsotg, DCTL);
dctl |= DCTL_RMTWKUPSIG;
dwc2_writel(hsotg, dctl, DCTL);
}
/* Change to L0 state */
call_gadget(hsotg, resume);
hsotg->lx_state = DWC2_L0;
hsotg->bus_suspended = false;
}
| linux-master | drivers/usb/dwc2/gadget.c |
// SPDX-License-Identifier: GPL-2.0
/*
* debugfs.c - Designware USB2 DRD controller debugfs
*
* Copyright (C) 2015 Intel Corporation
* Mian Yousaf Kaukab <[email protected]>
*/
#include <linux/spinlock.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include "core.h"
#include "debug.h"
#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
/**
* testmode_write() - change usb test mode state.
* @file: The file to write to.
* @ubuf: The buffer where user wrote.
* @count: The ubuf size.
* @ppos: Unused parameter.
*/
static ssize_t testmode_write(struct file *file, const char __user *ubuf, size_t
count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct dwc2_hsotg *hsotg = s->private;
unsigned long flags;
u32 testmode = 0;
char buf[32];
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
if (!strncmp(buf, "test_j", 6))
testmode = USB_TEST_J;
else if (!strncmp(buf, "test_k", 6))
testmode = USB_TEST_K;
else if (!strncmp(buf, "test_se0_nak", 12))
testmode = USB_TEST_SE0_NAK;
else if (!strncmp(buf, "test_packet", 11))
testmode = USB_TEST_PACKET;
else if (!strncmp(buf, "test_force_enable", 17))
testmode = USB_TEST_FORCE_ENABLE;
else
testmode = 0;
spin_lock_irqsave(&hsotg->lock, flags);
dwc2_hsotg_set_test_mode(hsotg, testmode);
spin_unlock_irqrestore(&hsotg->lock, flags);
return count;
}
/**
* testmode_show() - debugfs: show usb test mode state
* @s: The seq file to write to.
* @unused: Unused parameter.
*
* This debugfs entry shows which usb test mode is currently enabled.
*/
static int testmode_show(struct seq_file *s, void *unused)
{
struct dwc2_hsotg *hsotg = s->private;
unsigned long flags;
int dctl;
spin_lock_irqsave(&hsotg->lock, flags);
dctl = dwc2_readl(hsotg, DCTL);
dctl &= DCTL_TSTCTL_MASK;
dctl >>= DCTL_TSTCTL_SHIFT;
spin_unlock_irqrestore(&hsotg->lock, flags);
switch (dctl) {
case 0:
seq_puts(s, "no test\n");
break;
case USB_TEST_J:
seq_puts(s, "test_j\n");
break;
case USB_TEST_K:
seq_puts(s, "test_k\n");
break;
case USB_TEST_SE0_NAK:
seq_puts(s, "test_se0_nak\n");
break;
case USB_TEST_PACKET:
seq_puts(s, "test_packet\n");
break;
case USB_TEST_FORCE_ENABLE:
seq_puts(s, "test_force_enable\n");
break;
default:
seq_printf(s, "UNKNOWN %d\n", dctl);
}
return 0;
}
static int testmode_open(struct inode *inode, struct file *file)
{
return single_open(file, testmode_show, inode->i_private);
}
static const struct file_operations testmode_fops = {
.owner = THIS_MODULE,
.open = testmode_open,
.write = testmode_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/**
* state_show - debugfs: show overall driver and device state.
* @seq: The seq file to write to.
* @v: Unused parameter.
*
* This debugfs entry shows the overall state of the hardware and
* some general information about each of the endpoints available
* to the system.
*/
static int state_show(struct seq_file *seq, void *v)
{
struct dwc2_hsotg *hsotg = seq->private;
int idx;
seq_printf(seq, "DCFG=0x%08x, DCTL=0x%08x, DSTS=0x%08x\n",
dwc2_readl(hsotg, DCFG),
dwc2_readl(hsotg, DCTL),
dwc2_readl(hsotg, DSTS));
seq_printf(seq, "DIEPMSK=0x%08x, DOEPMASK=0x%08x\n",
dwc2_readl(hsotg, DIEPMSK), dwc2_readl(hsotg, DOEPMSK));
seq_printf(seq, "GINTMSK=0x%08x, GINTSTS=0x%08x\n",
dwc2_readl(hsotg, GINTMSK),
dwc2_readl(hsotg, GINTSTS));
seq_printf(seq, "DAINTMSK=0x%08x, DAINT=0x%08x\n",
dwc2_readl(hsotg, DAINTMSK),
dwc2_readl(hsotg, DAINT));
seq_printf(seq, "GNPTXSTS=0x%08x, GRXSTSR=%08x\n",
dwc2_readl(hsotg, GNPTXSTS),
dwc2_readl(hsotg, GRXSTSR));
seq_puts(seq, "\nEndpoint status:\n");
for (idx = 0; idx < hsotg->num_of_eps; idx++) {
u32 in, out;
in = dwc2_readl(hsotg, DIEPCTL(idx));
out = dwc2_readl(hsotg, DOEPCTL(idx));
seq_printf(seq, "ep%d: DIEPCTL=0x%08x, DOEPCTL=0x%08x",
idx, in, out);
in = dwc2_readl(hsotg, DIEPTSIZ(idx));
out = dwc2_readl(hsotg, DOEPTSIZ(idx));
seq_printf(seq, ", DIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x",
in, out);
seq_puts(seq, "\n");
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(state);
/**
* fifo_show - debugfs: show the fifo information
* @seq: The seq_file to write data to.
* @v: Unused parameter.
*
* Show the FIFO information for the overall fifo and all the
* periodic transmission FIFOs.
*/
static int fifo_show(struct seq_file *seq, void *v)
{
struct dwc2_hsotg *hsotg = seq->private;
int fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
u32 val;
int idx;
seq_puts(seq, "Non-periodic FIFOs:\n");
seq_printf(seq, "RXFIFO: Size %d\n", dwc2_readl(hsotg, GRXFSIZ));
val = dwc2_readl(hsotg, GNPTXFSIZ);
seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n",
val >> FIFOSIZE_DEPTH_SHIFT,
val & FIFOSIZE_STARTADDR_MASK);
seq_puts(seq, "\nPeriodic TXFIFOs:\n");
for (idx = 1; idx <= fifo_count; idx++) {
val = dwc2_readl(hsotg, DPTXFSIZN(idx));
seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx,
val >> FIFOSIZE_DEPTH_SHIFT,
val & FIFOSIZE_STARTADDR_MASK);
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(fifo);
static const char *decode_direction(int is_in)
{
return is_in ? "in" : "out";
}
/**
* ep_show - debugfs: show the state of an endpoint.
* @seq: The seq_file to write data to.
* @v: Unused parameter.
*
* This debugfs entry shows the state of the given endpoint (one is
* registered for each available).
*/
static int ep_show(struct seq_file *seq, void *v)
{
struct dwc2_hsotg_ep *ep = seq->private;
struct dwc2_hsotg *hsotg = ep->parent;
struct dwc2_hsotg_req *req;
int index = ep->index;
int show_limit = 15;
unsigned long flags;
seq_printf(seq, "Endpoint index %d, named %s, dir %s:\n",
ep->index, ep->ep.name, decode_direction(ep->dir_in));
/* first show the register state */
seq_printf(seq, "\tDIEPCTL=0x%08x, DOEPCTL=0x%08x\n",
dwc2_readl(hsotg, DIEPCTL(index)),
dwc2_readl(hsotg, DOEPCTL(index)));
seq_printf(seq, "\tDIEPDMA=0x%08x, DOEPDMA=0x%08x\n",
dwc2_readl(hsotg, DIEPDMA(index)),
dwc2_readl(hsotg, DOEPDMA(index)));
seq_printf(seq, "\tDIEPINT=0x%08x, DOEPINT=0x%08x\n",
dwc2_readl(hsotg, DIEPINT(index)),
dwc2_readl(hsotg, DOEPINT(index)));
seq_printf(seq, "\tDIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x\n",
dwc2_readl(hsotg, DIEPTSIZ(index)),
dwc2_readl(hsotg, DOEPTSIZ(index)));
seq_puts(seq, "\n");
seq_printf(seq, "mps %d\n", ep->ep.maxpacket);
seq_printf(seq, "total_data=%ld\n", ep->total_data);
seq_printf(seq, "request list (%p,%p):\n",
ep->queue.next, ep->queue.prev);
spin_lock_irqsave(&hsotg->lock, flags);
list_for_each_entry(req, &ep->queue, queue) {
if (--show_limit < 0) {
seq_puts(seq, "not showing more requests...\n");
break;
}
seq_printf(seq, "%c req %p: %d bytes @%p, ",
req == ep->req ? '*' : ' ',
req, req->req.length, req->req.buf);
seq_printf(seq, "%d done, res %d\n",
req->req.actual, req->req.status);
}
spin_unlock_irqrestore(&hsotg->lock, flags);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ep);
/**
* dwc2_hsotg_create_debug - create debugfs directory and files
* @hsotg: The driver state
*
* Create the debugfs files to allow the user to get information
* about the state of the system. The directory name is created
* with the same name as the device itself, in case we end up
* with multiple blocks in future systems.
*/
static void dwc2_hsotg_create_debug(struct dwc2_hsotg *hsotg)
{
struct dentry *root;
unsigned int epidx;
root = hsotg->debug_root;
/* create general state file */
debugfs_create_file("state", 0444, root, hsotg, &state_fops);
debugfs_create_file("testmode", 0644, root, hsotg, &testmode_fops);
debugfs_create_file("fifo", 0444, root, hsotg, &fifo_fops);
/* Create one file for each out endpoint */
for (epidx = 0; epidx < hsotg->num_of_eps; epidx++) {
struct dwc2_hsotg_ep *ep;
ep = hsotg->eps_out[epidx];
if (ep)
debugfs_create_file(ep->name, 0444, root, ep, &ep_fops);
}
/* Create one file for each in endpoint. EP0 is handled with out eps */
for (epidx = 1; epidx < hsotg->num_of_eps; epidx++) {
struct dwc2_hsotg_ep *ep;
ep = hsotg->eps_in[epidx];
if (ep)
debugfs_create_file(ep->name, 0444, root, ep, &ep_fops);
}
}
#else
static inline void dwc2_hsotg_create_debug(struct dwc2_hsotg *hsotg) {}
#endif
/* dwc2_hsotg_delete_debug is removed as cleanup in done in dwc2_debugfs_exit */
#define dump_register(nm) \
{ \
.name = #nm, \
.offset = nm, \
}
static const struct debugfs_reg32 dwc2_regs[] = {
/*
* Accessing registers like this can trigger mode mismatch interrupt.
* However, according to dwc2 databook, the register access, in this
* case, is completed on the processor bus but is ignored by the core
* and does not affect its operation.
*/
dump_register(GOTGCTL),
dump_register(GOTGINT),
dump_register(GAHBCFG),
dump_register(GUSBCFG),
dump_register(GRSTCTL),
dump_register(GINTSTS),
dump_register(GINTMSK),
dump_register(GRXSTSR),
/* Omit GRXSTSP */
dump_register(GRXFSIZ),
dump_register(GNPTXFSIZ),
dump_register(GNPTXSTS),
dump_register(GI2CCTL),
dump_register(GPVNDCTL),
dump_register(GGPIO),
dump_register(GUID),
dump_register(GSNPSID),
dump_register(GHWCFG1),
dump_register(GHWCFG2),
dump_register(GHWCFG3),
dump_register(GHWCFG4),
dump_register(GLPMCFG),
dump_register(GPWRDN),
dump_register(GDFIFOCFG),
dump_register(ADPCTL),
dump_register(HPTXFSIZ),
dump_register(DPTXFSIZN(1)),
dump_register(DPTXFSIZN(2)),
dump_register(DPTXFSIZN(3)),
dump_register(DPTXFSIZN(4)),
dump_register(DPTXFSIZN(5)),
dump_register(DPTXFSIZN(6)),
dump_register(DPTXFSIZN(7)),
dump_register(DPTXFSIZN(8)),
dump_register(DPTXFSIZN(9)),
dump_register(DPTXFSIZN(10)),
dump_register(DPTXFSIZN(11)),
dump_register(DPTXFSIZN(12)),
dump_register(DPTXFSIZN(13)),
dump_register(DPTXFSIZN(14)),
dump_register(DPTXFSIZN(15)),
dump_register(DCFG),
dump_register(DCTL),
dump_register(DSTS),
dump_register(DIEPMSK),
dump_register(DOEPMSK),
dump_register(DAINT),
dump_register(DAINTMSK),
dump_register(DTKNQR1),
dump_register(DTKNQR2),
dump_register(DTKNQR3),
dump_register(DTKNQR4),
dump_register(DVBUSDIS),
dump_register(DVBUSPULSE),
dump_register(DIEPCTL(0)),
dump_register(DIEPCTL(1)),
dump_register(DIEPCTL(2)),
dump_register(DIEPCTL(3)),
dump_register(DIEPCTL(4)),
dump_register(DIEPCTL(5)),
dump_register(DIEPCTL(6)),
dump_register(DIEPCTL(7)),
dump_register(DIEPCTL(8)),
dump_register(DIEPCTL(9)),
dump_register(DIEPCTL(10)),
dump_register(DIEPCTL(11)),
dump_register(DIEPCTL(12)),
dump_register(DIEPCTL(13)),
dump_register(DIEPCTL(14)),
dump_register(DIEPCTL(15)),
dump_register(DOEPCTL(0)),
dump_register(DOEPCTL(1)),
dump_register(DOEPCTL(2)),
dump_register(DOEPCTL(3)),
dump_register(DOEPCTL(4)),
dump_register(DOEPCTL(5)),
dump_register(DOEPCTL(6)),
dump_register(DOEPCTL(7)),
dump_register(DOEPCTL(8)),
dump_register(DOEPCTL(9)),
dump_register(DOEPCTL(10)),
dump_register(DOEPCTL(11)),
dump_register(DOEPCTL(12)),
dump_register(DOEPCTL(13)),
dump_register(DOEPCTL(14)),
dump_register(DOEPCTL(15)),
dump_register(DIEPINT(0)),
dump_register(DIEPINT(1)),
dump_register(DIEPINT(2)),
dump_register(DIEPINT(3)),
dump_register(DIEPINT(4)),
dump_register(DIEPINT(5)),
dump_register(DIEPINT(6)),
dump_register(DIEPINT(7)),
dump_register(DIEPINT(8)),
dump_register(DIEPINT(9)),
dump_register(DIEPINT(10)),
dump_register(DIEPINT(11)),
dump_register(DIEPINT(12)),
dump_register(DIEPINT(13)),
dump_register(DIEPINT(14)),
dump_register(DIEPINT(15)),
dump_register(DOEPINT(0)),
dump_register(DOEPINT(1)),
dump_register(DOEPINT(2)),
dump_register(DOEPINT(3)),
dump_register(DOEPINT(4)),
dump_register(DOEPINT(5)),
dump_register(DOEPINT(6)),
dump_register(DOEPINT(7)),
dump_register(DOEPINT(8)),
dump_register(DOEPINT(9)),
dump_register(DOEPINT(10)),
dump_register(DOEPINT(11)),
dump_register(DOEPINT(12)),
dump_register(DOEPINT(13)),
dump_register(DOEPINT(14)),
dump_register(DOEPINT(15)),
dump_register(DIEPTSIZ(0)),
dump_register(DIEPTSIZ(1)),
dump_register(DIEPTSIZ(2)),
dump_register(DIEPTSIZ(3)),
dump_register(DIEPTSIZ(4)),
dump_register(DIEPTSIZ(5)),
dump_register(DIEPTSIZ(6)),
dump_register(DIEPTSIZ(7)),
dump_register(DIEPTSIZ(8)),
dump_register(DIEPTSIZ(9)),
dump_register(DIEPTSIZ(10)),
dump_register(DIEPTSIZ(11)),
dump_register(DIEPTSIZ(12)),
dump_register(DIEPTSIZ(13)),
dump_register(DIEPTSIZ(14)),
dump_register(DIEPTSIZ(15)),
dump_register(DOEPTSIZ(0)),
dump_register(DOEPTSIZ(1)),
dump_register(DOEPTSIZ(2)),
dump_register(DOEPTSIZ(3)),
dump_register(DOEPTSIZ(4)),
dump_register(DOEPTSIZ(5)),
dump_register(DOEPTSIZ(6)),
dump_register(DOEPTSIZ(7)),
dump_register(DOEPTSIZ(8)),
dump_register(DOEPTSIZ(9)),
dump_register(DOEPTSIZ(10)),
dump_register(DOEPTSIZ(11)),
dump_register(DOEPTSIZ(12)),
dump_register(DOEPTSIZ(13)),
dump_register(DOEPTSIZ(14)),
dump_register(DOEPTSIZ(15)),
dump_register(DIEPDMA(0)),
dump_register(DIEPDMA(1)),
dump_register(DIEPDMA(2)),
dump_register(DIEPDMA(3)),
dump_register(DIEPDMA(4)),
dump_register(DIEPDMA(5)),
dump_register(DIEPDMA(6)),
dump_register(DIEPDMA(7)),
dump_register(DIEPDMA(8)),
dump_register(DIEPDMA(9)),
dump_register(DIEPDMA(10)),
dump_register(DIEPDMA(11)),
dump_register(DIEPDMA(12)),
dump_register(DIEPDMA(13)),
dump_register(DIEPDMA(14)),
dump_register(DIEPDMA(15)),
dump_register(DOEPDMA(0)),
dump_register(DOEPDMA(1)),
dump_register(DOEPDMA(2)),
dump_register(DOEPDMA(3)),
dump_register(DOEPDMA(4)),
dump_register(DOEPDMA(5)),
dump_register(DOEPDMA(6)),
dump_register(DOEPDMA(7)),
dump_register(DOEPDMA(8)),
dump_register(DOEPDMA(9)),
dump_register(DOEPDMA(10)),
dump_register(DOEPDMA(11)),
dump_register(DOEPDMA(12)),
dump_register(DOEPDMA(13)),
dump_register(DOEPDMA(14)),
dump_register(DOEPDMA(15)),
dump_register(DTXFSTS(0)),
dump_register(DTXFSTS(1)),
dump_register(DTXFSTS(2)),
dump_register(DTXFSTS(3)),
dump_register(DTXFSTS(4)),
dump_register(DTXFSTS(5)),
dump_register(DTXFSTS(6)),
dump_register(DTXFSTS(7)),
dump_register(DTXFSTS(8)),
dump_register(DTXFSTS(9)),
dump_register(DTXFSTS(10)),
dump_register(DTXFSTS(11)),
dump_register(DTXFSTS(12)),
dump_register(DTXFSTS(13)),
dump_register(DTXFSTS(14)),
dump_register(DTXFSTS(15)),
dump_register(PCGCTL),
dump_register(HCFG),
dump_register(HFIR),
dump_register(HFNUM),
dump_register(HPTXSTS),
dump_register(HAINT),
dump_register(HAINTMSK),
dump_register(HFLBADDR),
dump_register(HPRT0),
dump_register(HCCHAR(0)),
dump_register(HCCHAR(1)),
dump_register(HCCHAR(2)),
dump_register(HCCHAR(3)),
dump_register(HCCHAR(4)),
dump_register(HCCHAR(5)),
dump_register(HCCHAR(6)),
dump_register(HCCHAR(7)),
dump_register(HCCHAR(8)),
dump_register(HCCHAR(9)),
dump_register(HCCHAR(10)),
dump_register(HCCHAR(11)),
dump_register(HCCHAR(12)),
dump_register(HCCHAR(13)),
dump_register(HCCHAR(14)),
dump_register(HCCHAR(15)),
dump_register(HCSPLT(0)),
dump_register(HCSPLT(1)),
dump_register(HCSPLT(2)),
dump_register(HCSPLT(3)),
dump_register(HCSPLT(4)),
dump_register(HCSPLT(5)),
dump_register(HCSPLT(6)),
dump_register(HCSPLT(7)),
dump_register(HCSPLT(8)),
dump_register(HCSPLT(9)),
dump_register(HCSPLT(10)),
dump_register(HCSPLT(11)),
dump_register(HCSPLT(12)),
dump_register(HCSPLT(13)),
dump_register(HCSPLT(14)),
dump_register(HCSPLT(15)),
dump_register(HCINT(0)),
dump_register(HCINT(1)),
dump_register(HCINT(2)),
dump_register(HCINT(3)),
dump_register(HCINT(4)),
dump_register(HCINT(5)),
dump_register(HCINT(6)),
dump_register(HCINT(7)),
dump_register(HCINT(8)),
dump_register(HCINT(9)),
dump_register(HCINT(10)),
dump_register(HCINT(11)),
dump_register(HCINT(12)),
dump_register(HCINT(13)),
dump_register(HCINT(14)),
dump_register(HCINT(15)),
dump_register(HCINTMSK(0)),
dump_register(HCINTMSK(1)),
dump_register(HCINTMSK(2)),
dump_register(HCINTMSK(3)),
dump_register(HCINTMSK(4)),
dump_register(HCINTMSK(5)),
dump_register(HCINTMSK(6)),
dump_register(HCINTMSK(7)),
dump_register(HCINTMSK(8)),
dump_register(HCINTMSK(9)),
dump_register(HCINTMSK(10)),
dump_register(HCINTMSK(11)),
dump_register(HCINTMSK(12)),
dump_register(HCINTMSK(13)),
dump_register(HCINTMSK(14)),
dump_register(HCINTMSK(15)),
dump_register(HCTSIZ(0)),
dump_register(HCTSIZ(1)),
dump_register(HCTSIZ(2)),
dump_register(HCTSIZ(3)),
dump_register(HCTSIZ(4)),
dump_register(HCTSIZ(5)),
dump_register(HCTSIZ(6)),
dump_register(HCTSIZ(7)),
dump_register(HCTSIZ(8)),
dump_register(HCTSIZ(9)),
dump_register(HCTSIZ(10)),
dump_register(HCTSIZ(11)),
dump_register(HCTSIZ(12)),
dump_register(HCTSIZ(13)),
dump_register(HCTSIZ(14)),
dump_register(HCTSIZ(15)),
dump_register(HCDMA(0)),
dump_register(HCDMA(1)),
dump_register(HCDMA(2)),
dump_register(HCDMA(3)),
dump_register(HCDMA(4)),
dump_register(HCDMA(5)),
dump_register(HCDMA(6)),
dump_register(HCDMA(7)),
dump_register(HCDMA(8)),
dump_register(HCDMA(9)),
dump_register(HCDMA(10)),
dump_register(HCDMA(11)),
dump_register(HCDMA(12)),
dump_register(HCDMA(13)),
dump_register(HCDMA(14)),
dump_register(HCDMA(15)),
dump_register(HCDMAB(0)),
dump_register(HCDMAB(1)),
dump_register(HCDMAB(2)),
dump_register(HCDMAB(3)),
dump_register(HCDMAB(4)),
dump_register(HCDMAB(5)),
dump_register(HCDMAB(6)),
dump_register(HCDMAB(7)),
dump_register(HCDMAB(8)),
dump_register(HCDMAB(9)),
dump_register(HCDMAB(10)),
dump_register(HCDMAB(11)),
dump_register(HCDMAB(12)),
dump_register(HCDMAB(13)),
dump_register(HCDMAB(14)),
dump_register(HCDMAB(15)),
};
#define print_param(_seq, _ptr, _param) \
seq_printf((_seq), "%-30s: %d\n", #_param, (_ptr)->_param)
#define print_param_hex(_seq, _ptr, _param) \
seq_printf((_seq), "%-30s: 0x%x\n", #_param, (_ptr)->_param)
static int params_show(struct seq_file *seq, void *v)
{
struct dwc2_hsotg *hsotg = seq->private;
struct dwc2_core_params *p = &hsotg->params;
int i;
print_param(seq, p, otg_caps.hnp_support);
print_param(seq, p, otg_caps.srp_support);
print_param(seq, p, otg_caps.otg_rev);
print_param(seq, p, dma_desc_enable);
print_param(seq, p, dma_desc_fs_enable);
print_param(seq, p, speed);
print_param(seq, p, enable_dynamic_fifo);
print_param(seq, p, en_multiple_tx_fifo);
print_param(seq, p, host_rx_fifo_size);
print_param(seq, p, host_nperio_tx_fifo_size);
print_param(seq, p, host_perio_tx_fifo_size);
print_param(seq, p, max_transfer_size);
print_param(seq, p, max_packet_count);
print_param(seq, p, host_channels);
print_param(seq, p, phy_type);
print_param(seq, p, phy_utmi_width);
print_param(seq, p, phy_ulpi_ddr);
print_param(seq, p, phy_ulpi_ext_vbus);
print_param(seq, p, i2c_enable);
print_param(seq, p, ipg_isoc_en);
print_param(seq, p, ulpi_fs_ls);
print_param(seq, p, host_support_fs_ls_low_power);
print_param(seq, p, host_ls_low_power_phy_clk);
print_param(seq, p, activate_stm_fs_transceiver);
print_param(seq, p, activate_stm_id_vb_detection);
print_param(seq, p, ts_dline);
print_param(seq, p, reload_ctl);
print_param_hex(seq, p, ahbcfg);
print_param(seq, p, uframe_sched);
print_param(seq, p, external_id_pin_ctl);
print_param(seq, p, power_down);
print_param(seq, p, lpm);
print_param(seq, p, lpm_clock_gating);
print_param(seq, p, besl);
print_param(seq, p, hird_threshold_en);
print_param(seq, p, hird_threshold);
print_param(seq, p, service_interval);
print_param(seq, p, host_dma);
print_param(seq, p, g_dma);
print_param(seq, p, g_dma_desc);
print_param(seq, p, g_rx_fifo_size);
print_param(seq, p, g_np_tx_fifo_size);
for (i = 0; i < MAX_EPS_CHANNELS; i++) {
char str[32];
snprintf(str, 32, "g_tx_fifo_size[%d]", i);
seq_printf(seq, "%-30s: %d\n", str, p->g_tx_fifo_size[i]);
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(params);
static int hw_params_show(struct seq_file *seq, void *v)
{
struct dwc2_hsotg *hsotg = seq->private;
struct dwc2_hw_params *hw = &hsotg->hw_params;
print_param(seq, hw, op_mode);
print_param(seq, hw, arch);
print_param(seq, hw, dma_desc_enable);
print_param(seq, hw, enable_dynamic_fifo);
print_param(seq, hw, en_multiple_tx_fifo);
print_param(seq, hw, rx_fifo_size);
print_param(seq, hw, host_nperio_tx_fifo_size);
print_param(seq, hw, dev_nperio_tx_fifo_size);
print_param(seq, hw, host_perio_tx_fifo_size);
print_param(seq, hw, nperio_tx_q_depth);
print_param(seq, hw, host_perio_tx_q_depth);
print_param(seq, hw, dev_token_q_depth);
print_param(seq, hw, max_transfer_size);
print_param(seq, hw, max_packet_count);
print_param(seq, hw, host_channels);
print_param(seq, hw, hs_phy_type);
print_param(seq, hw, fs_phy_type);
print_param(seq, hw, i2c_enable);
print_param(seq, hw, num_dev_ep);
print_param(seq, hw, num_dev_perio_in_ep);
print_param(seq, hw, total_fifo_size);
print_param(seq, hw, power_optimized);
print_param(seq, hw, utmi_phy_data_width);
print_param_hex(seq, hw, snpsid);
print_param_hex(seq, hw, dev_ep_dirs);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(hw_params);
static int dr_mode_show(struct seq_file *seq, void *v)
{
struct dwc2_hsotg *hsotg = seq->private;
const char *dr_mode = "";
device_property_read_string(hsotg->dev, "dr_mode", &dr_mode);
seq_printf(seq, "%s\n", dr_mode);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(dr_mode);
int dwc2_debugfs_init(struct dwc2_hsotg *hsotg)
{
int ret;
struct dentry *root;
root = debugfs_create_dir(dev_name(hsotg->dev), usb_debug_root);
hsotg->debug_root = root;
debugfs_create_file("params", 0444, root, hsotg, ¶ms_fops);
debugfs_create_file("hw_params", 0444, root, hsotg, &hw_params_fops);
debugfs_create_file("dr_mode", 0444, root, hsotg, &dr_mode_fops);
/* Add gadget debugfs nodes */
dwc2_hsotg_create_debug(hsotg);
hsotg->regset = devm_kzalloc(hsotg->dev, sizeof(*hsotg->regset),
GFP_KERNEL);
if (!hsotg->regset) {
ret = -ENOMEM;
goto err;
}
hsotg->regset->regs = dwc2_regs;
hsotg->regset->nregs = ARRAY_SIZE(dwc2_regs);
hsotg->regset->base = hsotg->regs;
debugfs_create_regset32("regdump", 0444, root, hsotg->regset);
return 0;
err:
debugfs_remove_recursive(hsotg->debug_root);
return ret;
}
void dwc2_debugfs_exit(struct dwc2_hsotg *hsotg)
{
debugfs_remove_recursive(hsotg->debug_root);
hsotg->debug_root = NULL;
}
| linux-master | drivers/usb/dwc2/debugfs.c |
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* pci.c - DesignWare HS OTG Controller PCI driver
*
* Copyright (C) 2004-2013 Synopsys, Inc.
*/
/*
* Provides the initialization and cleanup entry points for the DWC_otg PCI
* driver
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/ch11.h>
#include <linux/platform_device.h>
#include <linux/usb/usb_phy_generic.h>
#include "core.h"
static const char dwc2_driver_name[] = "dwc2-pci";
struct dwc2_pci_glue {
struct platform_device *dwc2;
struct platform_device *phy;
};
/**
* dwc2_pci_remove() - Provides the cleanup entry points for the DWC_otg PCI
* driver
*
* @pci: The programming view of DWC_otg PCI
*/
static void dwc2_pci_remove(struct pci_dev *pci)
{
struct dwc2_pci_glue *glue = pci_get_drvdata(pci);
platform_device_unregister(glue->dwc2);
usb_phy_generic_unregister(glue->phy);
pci_set_drvdata(pci, NULL);
}
static int dwc2_pci_probe(struct pci_dev *pci,
const struct pci_device_id *id)
{
struct resource res[2];
struct platform_device *dwc2;
struct platform_device *phy;
int ret;
struct device *dev = &pci->dev;
struct dwc2_pci_glue *glue;
ret = pcim_enable_device(pci);
if (ret) {
dev_err(dev, "failed to enable pci device\n");
return -ENODEV;
}
pci_set_master(pci);
phy = usb_phy_generic_register();
if (IS_ERR(phy)) {
dev_err(dev, "error registering generic PHY (%ld)\n",
PTR_ERR(phy));
return PTR_ERR(phy);
}
dwc2 = platform_device_alloc("dwc2", PLATFORM_DEVID_AUTO);
if (!dwc2) {
dev_err(dev, "couldn't allocate dwc2 device\n");
ret = -ENOMEM;
goto err;
}
memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
res[0].start = pci_resource_start(pci, 0);
res[0].end = pci_resource_end(pci, 0);
res[0].name = "dwc2";
res[0].flags = IORESOURCE_MEM;
res[1].start = pci->irq;
res[1].name = "dwc2";
res[1].flags = IORESOURCE_IRQ;
ret = platform_device_add_resources(dwc2, res, ARRAY_SIZE(res));
if (ret) {
dev_err(dev, "couldn't add resources to dwc2 device\n");
goto err;
}
dwc2->dev.parent = dev;
glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
if (!glue) {
ret = -ENOMEM;
goto err;
}
ret = platform_device_add(dwc2);
if (ret) {
dev_err(dev, "failed to register dwc2 device\n");
goto err;
}
glue->phy = phy;
glue->dwc2 = dwc2;
pci_set_drvdata(pci, glue);
return 0;
err:
usb_phy_generic_unregister(phy);
platform_device_put(dwc2);
return ret;
}
static struct pci_driver dwc2_pci_driver = {
.name = dwc2_driver_name,
.id_table = dwc2_pci_ids,
.probe = dwc2_pci_probe,
.remove = dwc2_pci_remove,
};
module_pci_driver(dwc2_pci_driver);
MODULE_DESCRIPTION("DESIGNWARE HS OTG PCI Bus Glue");
MODULE_AUTHOR("Synopsys, Inc.");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | drivers/usb/dwc2/pci.c |
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* platform.c - DesignWare HS OTG Controller platform driver
*
* Copyright (C) Matthijs Kooijman <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_data/s3c-hsotg.h>
#include <linux/reset.h>
#include <linux/usb/of.h>
#include "core.h"
#include "hcd.h"
#include "debug.h"
static const char dwc2_driver_name[] = "dwc2";
/*
* Check the dr_mode against the module configuration and hardware
* capabilities.
*
* The hardware, module, and dr_mode, can each be set to host, device,
* or otg. Check that all these values are compatible and adjust the
* value of dr_mode if possible.
*
* actual
* HW MOD dr_mode dr_mode
* ------------------------------
* HST HST any : HST
* HST DEV any : ---
* HST OTG any : HST
*
* DEV HST any : ---
* DEV DEV any : DEV
* DEV OTG any : DEV
*
* OTG HST any : HST
* OTG DEV any : DEV
* OTG OTG any : dr_mode
*/
static int dwc2_get_dr_mode(struct dwc2_hsotg *hsotg)
{
enum usb_dr_mode mode;
hsotg->dr_mode = usb_get_dr_mode(hsotg->dev);
if (hsotg->dr_mode == USB_DR_MODE_UNKNOWN)
hsotg->dr_mode = USB_DR_MODE_OTG;
mode = hsotg->dr_mode;
if (dwc2_hw_is_device(hsotg)) {
if (IS_ENABLED(CONFIG_USB_DWC2_HOST)) {
dev_err(hsotg->dev,
"Controller does not support host mode.\n");
return -EINVAL;
}
mode = USB_DR_MODE_PERIPHERAL;
} else if (dwc2_hw_is_host(hsotg)) {
if (IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL)) {
dev_err(hsotg->dev,
"Controller does not support device mode.\n");
return -EINVAL;
}
mode = USB_DR_MODE_HOST;
} else {
if (IS_ENABLED(CONFIG_USB_DWC2_HOST))
mode = USB_DR_MODE_HOST;
else if (IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL))
mode = USB_DR_MODE_PERIPHERAL;
}
if (mode != hsotg->dr_mode) {
dev_warn(hsotg->dev,
"Configuration mismatch. dr_mode forced to %s\n",
mode == USB_DR_MODE_HOST ? "host" : "device");
hsotg->dr_mode = mode;
}
return 0;
}
static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
{
struct platform_device *pdev = to_platform_device(hsotg->dev);
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(hsotg->supplies),
hsotg->supplies);
if (ret)
return ret;
if (hsotg->utmi_clk) {
ret = clk_prepare_enable(hsotg->utmi_clk);
if (ret)
goto err_dis_reg;
}
if (hsotg->clk) {
ret = clk_prepare_enable(hsotg->clk);
if (ret)
goto err_dis_utmi_clk;
}
if (hsotg->uphy) {
ret = usb_phy_init(hsotg->uphy);
} else if (hsotg->plat && hsotg->plat->phy_init) {
ret = hsotg->plat->phy_init(pdev, hsotg->plat->phy_type);
} else {
ret = phy_init(hsotg->phy);
if (ret == 0) {
ret = phy_power_on(hsotg->phy);
if (ret)
phy_exit(hsotg->phy);
}
}
if (ret)
goto err_dis_clk;
return 0;
err_dis_clk:
if (hsotg->clk)
clk_disable_unprepare(hsotg->clk);
err_dis_utmi_clk:
if (hsotg->utmi_clk)
clk_disable_unprepare(hsotg->utmi_clk);
err_dis_reg:
regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
return ret;
}
/**
* dwc2_lowlevel_hw_enable - enable platform lowlevel hw resources
* @hsotg: The driver state
*
* A wrapper for platform code responsible for controlling
* low-level USB platform resources (phy, clock, regulators)
*/
int dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
{
int ret = __dwc2_lowlevel_hw_enable(hsotg);
if (ret == 0)
hsotg->ll_hw_enabled = true;
return ret;
}
static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
{
struct platform_device *pdev = to_platform_device(hsotg->dev);
int ret = 0;
if (hsotg->uphy) {
usb_phy_shutdown(hsotg->uphy);
} else if (hsotg->plat && hsotg->plat->phy_exit) {
ret = hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type);
} else {
ret = phy_power_off(hsotg->phy);
if (ret == 0)
ret = phy_exit(hsotg->phy);
}
if (ret)
return ret;
if (hsotg->clk)
clk_disable_unprepare(hsotg->clk);
if (hsotg->utmi_clk)
clk_disable_unprepare(hsotg->utmi_clk);
return regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
}
/**
* dwc2_lowlevel_hw_disable - disable platform lowlevel hw resources
* @hsotg: The driver state
*
* A wrapper for platform code responsible for controlling
* low-level USB platform resources (phy, clock, regulators)
*/
int dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
{
int ret = __dwc2_lowlevel_hw_disable(hsotg);
if (ret == 0)
hsotg->ll_hw_enabled = false;
return ret;
}
static void dwc2_reset_control_assert(void *data)
{
reset_control_assert(data);
}
static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
{
int i, ret;
hsotg->reset = devm_reset_control_get_optional(hsotg->dev, "dwc2");
if (IS_ERR(hsotg->reset))
return dev_err_probe(hsotg->dev, PTR_ERR(hsotg->reset),
"error getting reset control\n");
reset_control_deassert(hsotg->reset);
ret = devm_add_action_or_reset(hsotg->dev, dwc2_reset_control_assert,
hsotg->reset);
if (ret)
return ret;
hsotg->reset_ecc = devm_reset_control_get_optional(hsotg->dev, "dwc2-ecc");
if (IS_ERR(hsotg->reset_ecc))
return dev_err_probe(hsotg->dev, PTR_ERR(hsotg->reset_ecc),
"error getting reset control for ecc\n");
reset_control_deassert(hsotg->reset_ecc);
ret = devm_add_action_or_reset(hsotg->dev, dwc2_reset_control_assert,
hsotg->reset_ecc);
if (ret)
return ret;
/*
* Attempt to find a generic PHY, then look for an old style
* USB PHY and then fall back to pdata
*/
hsotg->phy = devm_phy_get(hsotg->dev, "usb2-phy");
if (IS_ERR(hsotg->phy)) {
ret = PTR_ERR(hsotg->phy);
switch (ret) {
case -ENODEV:
case -ENOSYS:
hsotg->phy = NULL;
break;
default:
return dev_err_probe(hsotg->dev, ret, "error getting phy\n");
}
}
if (!hsotg->phy) {
hsotg->uphy = devm_usb_get_phy(hsotg->dev, USB_PHY_TYPE_USB2);
if (IS_ERR(hsotg->uphy)) {
ret = PTR_ERR(hsotg->uphy);
switch (ret) {
case -ENODEV:
case -ENXIO:
hsotg->uphy = NULL;
break;
default:
return dev_err_probe(hsotg->dev, ret, "error getting usb phy\n");
}
}
}
hsotg->plat = dev_get_platdata(hsotg->dev);
/* Clock */
hsotg->clk = devm_clk_get_optional(hsotg->dev, "otg");
if (IS_ERR(hsotg->clk))
return dev_err_probe(hsotg->dev, PTR_ERR(hsotg->clk), "cannot get otg clock\n");
hsotg->utmi_clk = devm_clk_get_optional(hsotg->dev, "utmi");
if (IS_ERR(hsotg->utmi_clk))
return dev_err_probe(hsotg->dev, PTR_ERR(hsotg->utmi_clk),
"cannot get utmi clock\n");
/* Regulators */
for (i = 0; i < ARRAY_SIZE(hsotg->supplies); i++)
hsotg->supplies[i].supply = dwc2_hsotg_supply_names[i];
ret = devm_regulator_bulk_get(hsotg->dev, ARRAY_SIZE(hsotg->supplies),
hsotg->supplies);
if (ret)
return dev_err_probe(hsotg->dev, ret, "failed to request supplies\n");
return 0;
}
/**
* dwc2_driver_remove() - Called when the DWC_otg core is unregistered with the
* DWC_otg driver
*
* @dev: Platform device
*
* This routine is called, for example, when the rmmod command is executed. The
* device may or may not be electrically present. If it is present, the driver
* stops device processing. Any resources used on behalf of this device are
* freed.
*/
static void dwc2_driver_remove(struct platform_device *dev)
{
struct dwc2_hsotg *hsotg = platform_get_drvdata(dev);
struct dwc2_gregs_backup *gr;
int ret = 0;
gr = &hsotg->gr_backup;
/* Exit Hibernation when driver is removed. */
if (hsotg->hibernated) {
if (gr->gotgctl & GOTGCTL_CURMODE_HOST)
ret = dwc2_exit_hibernation(hsotg, 0, 0, 1);
else
ret = dwc2_exit_hibernation(hsotg, 0, 0, 0);
if (ret)
dev_err(hsotg->dev,
"exit hibernation failed.\n");
}
/* Exit Partial Power Down when driver is removed. */
if (hsotg->in_ppd) {
ret = dwc2_exit_partial_power_down(hsotg, 0, true);
if (ret)
dev_err(hsotg->dev,
"exit partial_power_down failed\n");
}
/* Exit clock gating when driver is removed. */
if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE &&
hsotg->bus_suspended) {
if (dwc2_is_device_mode(hsotg))
dwc2_gadget_exit_clock_gating(hsotg, 0);
else
dwc2_host_exit_clock_gating(hsotg, 0);
}
dwc2_debugfs_exit(hsotg);
if (hsotg->hcd_enabled)
dwc2_hcd_remove(hsotg);
if (hsotg->gadget_enabled)
dwc2_hsotg_remove(hsotg);
dwc2_drd_exit(hsotg);
if (hsotg->params.activate_stm_id_vb_detection)
regulator_disable(hsotg->usb33d);
if (hsotg->ll_hw_enabled)
dwc2_lowlevel_hw_disable(hsotg);
}
/**
* dwc2_driver_shutdown() - Called on device shutdown
*
* @dev: Platform device
*
* In specific conditions (involving usb hubs) dwc2 devices can create a
* lot of interrupts, even to the point of overwhelming devices running
* at low frequencies. Some devices need to do special clock handling
* at shutdown-time which may bring the system clock below the threshold
* of being able to handle the dwc2 interrupts. Disabling dwc2-irqs
* prevents reboots/poweroffs from getting stuck in such cases.
*/
static void dwc2_driver_shutdown(struct platform_device *dev)
{
struct dwc2_hsotg *hsotg = platform_get_drvdata(dev);
dwc2_disable_global_interrupts(hsotg);
synchronize_irq(hsotg->irq);
}
/**
* dwc2_check_core_endianness() - Returns true if core and AHB have
* opposite endianness.
* @hsotg: Programming view of the DWC_otg controller.
*/
static bool dwc2_check_core_endianness(struct dwc2_hsotg *hsotg)
{
u32 snpsid;
snpsid = ioread32(hsotg->regs + GSNPSID);
if ((snpsid & GSNPSID_ID_MASK) == DWC2_OTG_ID ||
(snpsid & GSNPSID_ID_MASK) == DWC2_FS_IOT_ID ||
(snpsid & GSNPSID_ID_MASK) == DWC2_HS_IOT_ID)
return false;
return true;
}
/**
* dwc2_check_core_version() - Check core version
*
* @hsotg: Programming view of the DWC_otg controller
*
*/
int dwc2_check_core_version(struct dwc2_hsotg *hsotg)
{
struct dwc2_hw_params *hw = &hsotg->hw_params;
/*
* Attempt to ensure this device is really a DWC_otg Controller.
* Read and verify the GSNPSID register contents. The value should be
* 0x45f4xxxx, 0x5531xxxx or 0x5532xxxx
*/
hw->snpsid = dwc2_readl(hsotg, GSNPSID);
if ((hw->snpsid & GSNPSID_ID_MASK) != DWC2_OTG_ID &&
(hw->snpsid & GSNPSID_ID_MASK) != DWC2_FS_IOT_ID &&
(hw->snpsid & GSNPSID_ID_MASK) != DWC2_HS_IOT_ID) {
dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
hw->snpsid);
return -ENODEV;
}
dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
return 0;
}
/**
* dwc2_driver_probe() - Called when the DWC_otg core is bound to the DWC_otg
* driver
*
* @dev: Platform device
*
* This routine creates the driver components required to control the device
* (core, HCD, and PCD) and initializes the device. The driver components are
* stored in a dwc2_hsotg structure. A reference to the dwc2_hsotg is saved
* in the device private data. This allows the driver to access the dwc2_hsotg
* structure on subsequent calls to driver methods for this device.
*/
static int dwc2_driver_probe(struct platform_device *dev)
{
struct dwc2_hsotg *hsotg;
struct resource *res;
int retval;
hsotg = devm_kzalloc(&dev->dev, sizeof(*hsotg), GFP_KERNEL);
if (!hsotg)
return -ENOMEM;
hsotg->dev = &dev->dev;
/*
* Use reasonable defaults so platforms don't have to provide these.
*/
if (!dev->dev.dma_mask)
dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
retval = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
if (retval) {
dev_err(&dev->dev, "can't set coherent DMA mask: %d\n", retval);
return retval;
}
hsotg->regs = devm_platform_get_and_ioremap_resource(dev, 0, &res);
if (IS_ERR(hsotg->regs))
return PTR_ERR(hsotg->regs);
dev_dbg(&dev->dev, "mapped PA %08lx to VA %p\n",
(unsigned long)res->start, hsotg->regs);
retval = dwc2_lowlevel_hw_init(hsotg);
if (retval)
return retval;
spin_lock_init(&hsotg->lock);
hsotg->irq = platform_get_irq(dev, 0);
if (hsotg->irq < 0)
return hsotg->irq;
dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
hsotg->irq);
retval = devm_request_irq(hsotg->dev, hsotg->irq,
dwc2_handle_common_intr, IRQF_SHARED,
dev_name(hsotg->dev), hsotg);
if (retval)
return retval;
hsotg->vbus_supply = devm_regulator_get_optional(hsotg->dev, "vbus");
if (IS_ERR(hsotg->vbus_supply)) {
retval = PTR_ERR(hsotg->vbus_supply);
hsotg->vbus_supply = NULL;
if (retval != -ENODEV)
return retval;
}
retval = dwc2_lowlevel_hw_enable(hsotg);
if (retval)
return retval;
hsotg->needs_byte_swap = dwc2_check_core_endianness(hsotg);
retval = dwc2_get_dr_mode(hsotg);
if (retval)
goto error;
hsotg->need_phy_for_wake =
of_property_read_bool(dev->dev.of_node,
"snps,need-phy-for-wake");
/*
* Before performing any core related operations
* check core version.
*/
retval = dwc2_check_core_version(hsotg);
if (retval)
goto error;
/*
* Reset before dwc2_get_hwparams() then it could get power-on real
* reset value form registers.
*/
retval = dwc2_core_reset(hsotg, false);
if (retval)
goto error;
/* Detect config values from hardware */
retval = dwc2_get_hwparams(hsotg);
if (retval)
goto error;
/*
* For OTG cores, set the force mode bits to reflect the value
* of dr_mode. Force mode bits should not be touched at any
* other time after this.
*/
dwc2_force_dr_mode(hsotg);
retval = dwc2_init_params(hsotg);
if (retval)
goto error;
if (hsotg->params.activate_stm_id_vb_detection) {
u32 ggpio;
hsotg->usb33d = devm_regulator_get(hsotg->dev, "usb33d");
if (IS_ERR(hsotg->usb33d)) {
retval = PTR_ERR(hsotg->usb33d);
dev_err_probe(hsotg->dev, retval, "failed to request usb33d supply\n");
goto error;
}
retval = regulator_enable(hsotg->usb33d);
if (retval) {
dev_err_probe(hsotg->dev, retval, "failed to enable usb33d supply\n");
goto error;
}
ggpio = dwc2_readl(hsotg, GGPIO);
ggpio |= GGPIO_STM32_OTG_GCCFG_IDEN;
ggpio |= GGPIO_STM32_OTG_GCCFG_VBDEN;
dwc2_writel(hsotg, ggpio, GGPIO);
/* ID/VBUS detection startup time */
usleep_range(5000, 7000);
}
retval = dwc2_drd_init(hsotg);
if (retval) {
dev_err_probe(hsotg->dev, retval, "failed to initialize dual-role\n");
goto error_init;
}
if (hsotg->dr_mode != USB_DR_MODE_HOST) {
retval = dwc2_gadget_init(hsotg);
if (retval)
goto error_drd;
hsotg->gadget_enabled = 1;
}
/*
* If we need PHY for wakeup we must be wakeup capable.
* When we have a device that can wake without the PHY we
* can adjust this condition.
*/
if (hsotg->need_phy_for_wake)
device_set_wakeup_capable(&dev->dev, true);
hsotg->reset_phy_on_wake =
of_property_read_bool(dev->dev.of_node,
"snps,reset-phy-on-wake");
if (hsotg->reset_phy_on_wake && !hsotg->phy) {
dev_warn(hsotg->dev,
"Quirk reset-phy-on-wake only supports generic PHYs\n");
hsotg->reset_phy_on_wake = false;
}
if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) {
retval = dwc2_hcd_init(hsotg);
if (retval) {
if (hsotg->gadget_enabled)
dwc2_hsotg_remove(hsotg);
goto error_drd;
}
hsotg->hcd_enabled = 1;
}
platform_set_drvdata(dev, hsotg);
hsotg->hibernated = 0;
dwc2_debugfs_init(hsotg);
/* Gadget code manages lowlevel hw on its own */
if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
dwc2_lowlevel_hw_disable(hsotg);
#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
/* Postponed adding a new gadget to the udc class driver list */
if (hsotg->gadget_enabled) {
retval = usb_add_gadget_udc(hsotg->dev, &hsotg->gadget);
if (retval) {
hsotg->gadget.udc = NULL;
dwc2_hsotg_remove(hsotg);
goto error_debugfs;
}
}
#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
return 0;
#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
error_debugfs:
dwc2_debugfs_exit(hsotg);
if (hsotg->hcd_enabled)
dwc2_hcd_remove(hsotg);
#endif
error_drd:
dwc2_drd_exit(hsotg);
error_init:
if (hsotg->params.activate_stm_id_vb_detection)
regulator_disable(hsotg->usb33d);
error:
if (hsotg->ll_hw_enabled)
dwc2_lowlevel_hw_disable(hsotg);
return retval;
}
static int __maybe_unused dwc2_suspend(struct device *dev)
{
struct dwc2_hsotg *dwc2 = dev_get_drvdata(dev);
bool is_device_mode = dwc2_is_device_mode(dwc2);
int ret = 0;
if (is_device_mode)
dwc2_hsotg_suspend(dwc2);
dwc2_drd_suspend(dwc2);
if (dwc2->params.activate_stm_id_vb_detection) {
unsigned long flags;
u32 ggpio, gotgctl;
/*
* Need to force the mode to the current mode to avoid Mode
* Mismatch Interrupt when ID detection will be disabled.
*/
dwc2_force_mode(dwc2, !is_device_mode);
spin_lock_irqsave(&dwc2->lock, flags);
gotgctl = dwc2_readl(dwc2, GOTGCTL);
/* bypass debounce filter, enable overrides */
gotgctl |= GOTGCTL_DBNCE_FLTR_BYPASS;
gotgctl |= GOTGCTL_BVALOEN | GOTGCTL_AVALOEN;
/* Force A / B session if needed */
if (gotgctl & GOTGCTL_ASESVLD)
gotgctl |= GOTGCTL_AVALOVAL;
if (gotgctl & GOTGCTL_BSESVLD)
gotgctl |= GOTGCTL_BVALOVAL;
dwc2_writel(dwc2, gotgctl, GOTGCTL);
spin_unlock_irqrestore(&dwc2->lock, flags);
ggpio = dwc2_readl(dwc2, GGPIO);
ggpio &= ~GGPIO_STM32_OTG_GCCFG_IDEN;
ggpio &= ~GGPIO_STM32_OTG_GCCFG_VBDEN;
dwc2_writel(dwc2, ggpio, GGPIO);
regulator_disable(dwc2->usb33d);
}
if (dwc2->ll_hw_enabled &&
(is_device_mode || dwc2_host_can_poweroff_phy(dwc2))) {
ret = __dwc2_lowlevel_hw_disable(dwc2);
dwc2->phy_off_for_suspend = true;
}
return ret;
}
static int __maybe_unused dwc2_resume(struct device *dev)
{
struct dwc2_hsotg *dwc2 = dev_get_drvdata(dev);
int ret = 0;
if (dwc2->phy_off_for_suspend && dwc2->ll_hw_enabled) {
ret = __dwc2_lowlevel_hw_enable(dwc2);
if (ret)
return ret;
}
dwc2->phy_off_for_suspend = false;
if (dwc2->params.activate_stm_id_vb_detection) {
unsigned long flags;
u32 ggpio, gotgctl;
ret = regulator_enable(dwc2->usb33d);
if (ret)
return ret;
ggpio = dwc2_readl(dwc2, GGPIO);
ggpio |= GGPIO_STM32_OTG_GCCFG_IDEN;
ggpio |= GGPIO_STM32_OTG_GCCFG_VBDEN;
dwc2_writel(dwc2, ggpio, GGPIO);
/* ID/VBUS detection startup time */
usleep_range(5000, 7000);
spin_lock_irqsave(&dwc2->lock, flags);
gotgctl = dwc2_readl(dwc2, GOTGCTL);
gotgctl &= ~GOTGCTL_DBNCE_FLTR_BYPASS;
gotgctl &= ~(GOTGCTL_BVALOEN | GOTGCTL_AVALOEN |
GOTGCTL_BVALOVAL | GOTGCTL_AVALOVAL);
dwc2_writel(dwc2, gotgctl, GOTGCTL);
spin_unlock_irqrestore(&dwc2->lock, flags);
}
if (!dwc2->role_sw) {
/* Need to restore FORCEDEVMODE/FORCEHOSTMODE */
dwc2_force_dr_mode(dwc2);
} else {
dwc2_drd_resume(dwc2);
}
if (dwc2_is_device_mode(dwc2))
ret = dwc2_hsotg_resume(dwc2);
return ret;
}
static const struct dev_pm_ops dwc2_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dwc2_suspend, dwc2_resume)
};
static struct platform_driver dwc2_platform_driver = {
.driver = {
.name = dwc2_driver_name,
.of_match_table = dwc2_of_match_table,
.acpi_match_table = ACPI_PTR(dwc2_acpi_match),
.pm = &dwc2_dev_pm_ops,
},
.probe = dwc2_driver_probe,
.remove_new = dwc2_driver_remove,
.shutdown = dwc2_driver_shutdown,
};
module_platform_driver(dwc2_platform_driver);
| linux-master | drivers/usb/dwc2/platform.c |
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* core.c - DesignWare HS OTG Controller common routines
*
* Copyright (C) 2004-2013 Synopsys, Inc.
*/
/*
* The Core code provides basic services for accessing and managing the
* DWC_otg hardware. These services are used by both the Host Controller
* Driver and the Peripheral Controller Driver.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/ch11.h>
#include "core.h"
#include "hcd.h"
/**
* dwc2_backup_global_registers() - Backup global controller registers.
* When suspending usb bus, registers needs to be backuped
* if controller power is disabled once suspended.
*
* @hsotg: Programming view of the DWC_otg controller
*/
int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
{
struct dwc2_gregs_backup *gr;
dev_dbg(hsotg->dev, "%s\n", __func__);
/* Backup global regs */
gr = &hsotg->gr_backup;
gr->gotgctl = dwc2_readl(hsotg, GOTGCTL);
gr->gintmsk = dwc2_readl(hsotg, GINTMSK);
gr->gahbcfg = dwc2_readl(hsotg, GAHBCFG);
gr->gusbcfg = dwc2_readl(hsotg, GUSBCFG);
gr->grxfsiz = dwc2_readl(hsotg, GRXFSIZ);
gr->gnptxfsiz = dwc2_readl(hsotg, GNPTXFSIZ);
gr->gdfifocfg = dwc2_readl(hsotg, GDFIFOCFG);
gr->pcgcctl1 = dwc2_readl(hsotg, PCGCCTL1);
gr->glpmcfg = dwc2_readl(hsotg, GLPMCFG);
gr->gi2cctl = dwc2_readl(hsotg, GI2CCTL);
gr->pcgcctl = dwc2_readl(hsotg, PCGCTL);
gr->valid = true;
return 0;
}
/**
* dwc2_restore_global_registers() - Restore controller global registers.
* When resuming usb bus, device registers needs to be restored
* if controller power were disabled.
*
* @hsotg: Programming view of the DWC_otg controller
*/
int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg)
{
struct dwc2_gregs_backup *gr;
dev_dbg(hsotg->dev, "%s\n", __func__);
/* Restore global regs */
gr = &hsotg->gr_backup;
if (!gr->valid) {
dev_err(hsotg->dev, "%s: no global registers to restore\n",
__func__);
return -EINVAL;
}
gr->valid = false;
dwc2_writel(hsotg, 0xffffffff, GINTSTS);
dwc2_writel(hsotg, gr->gotgctl, GOTGCTL);
dwc2_writel(hsotg, gr->gintmsk, GINTMSK);
dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
dwc2_writel(hsotg, gr->gahbcfg, GAHBCFG);
dwc2_writel(hsotg, gr->grxfsiz, GRXFSIZ);
dwc2_writel(hsotg, gr->gnptxfsiz, GNPTXFSIZ);
dwc2_writel(hsotg, gr->gdfifocfg, GDFIFOCFG);
dwc2_writel(hsotg, gr->pcgcctl1, PCGCCTL1);
dwc2_writel(hsotg, gr->glpmcfg, GLPMCFG);
dwc2_writel(hsotg, gr->pcgcctl, PCGCTL);
dwc2_writel(hsotg, gr->gi2cctl, GI2CCTL);
return 0;
}
/**
* dwc2_exit_partial_power_down() - Exit controller from Partial Power Down.
*
* @hsotg: Programming view of the DWC_otg controller
* @rem_wakeup: indicates whether resume is initiated by Reset.
* @restore: Controller registers need to be restored
*/
int dwc2_exit_partial_power_down(struct dwc2_hsotg *hsotg, int rem_wakeup,
bool restore)
{
struct dwc2_gregs_backup *gr;
gr = &hsotg->gr_backup;
/*
* Restore host or device regisers with the same mode core enterted
* to partial power down by checking "GOTGCTL_CURMODE_HOST" backup
* value of the "gotgctl" register.
*/
if (gr->gotgctl & GOTGCTL_CURMODE_HOST)
return dwc2_host_exit_partial_power_down(hsotg, rem_wakeup,
restore);
else
return dwc2_gadget_exit_partial_power_down(hsotg, restore);
}
/**
* dwc2_enter_partial_power_down() - Put controller in Partial Power Down.
*
* @hsotg: Programming view of the DWC_otg controller
*/
int dwc2_enter_partial_power_down(struct dwc2_hsotg *hsotg)
{
if (dwc2_is_host_mode(hsotg))
return dwc2_host_enter_partial_power_down(hsotg);
else
return dwc2_gadget_enter_partial_power_down(hsotg);
}
/**
* dwc2_restore_essential_regs() - Restore essiential regs of core.
*
* @hsotg: Programming view of the DWC_otg controller
* @rmode: Restore mode, enabled in case of remote-wakeup.
* @is_host: Host or device mode.
*/
static void dwc2_restore_essential_regs(struct dwc2_hsotg *hsotg, int rmode,
int is_host)
{
u32 pcgcctl;
struct dwc2_gregs_backup *gr;
struct dwc2_dregs_backup *dr;
struct dwc2_hregs_backup *hr;
gr = &hsotg->gr_backup;
dr = &hsotg->dr_backup;
hr = &hsotg->hr_backup;
dev_dbg(hsotg->dev, "%s: restoring essential regs\n", __func__);
/* Load restore values for [31:14] bits */
pcgcctl = (gr->pcgcctl & 0xffffc000);
/* If High Speed */
if (is_host) {
if (!(pcgcctl & PCGCTL_P2HD_PRT_SPD_MASK))
pcgcctl |= BIT(17);
} else {
if (!(pcgcctl & PCGCTL_P2HD_DEV_ENUM_SPD_MASK))
pcgcctl |= BIT(17);
}
dwc2_writel(hsotg, pcgcctl, PCGCTL);
/* Umnask global Interrupt in GAHBCFG and restore it */
dwc2_writel(hsotg, gr->gahbcfg | GAHBCFG_GLBL_INTR_EN, GAHBCFG);
/* Clear all pending interupts */
dwc2_writel(hsotg, 0xffffffff, GINTSTS);
/* Unmask restore done interrupt */
dwc2_writel(hsotg, GINTSTS_RESTOREDONE, GINTMSK);
/* Restore GUSBCFG and HCFG/DCFG */
dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
if (is_host) {
dwc2_writel(hsotg, hr->hcfg, HCFG);
if (rmode)
pcgcctl |= PCGCTL_RESTOREMODE;
dwc2_writel(hsotg, pcgcctl, PCGCTL);
udelay(10);
pcgcctl |= PCGCTL_ESS_REG_RESTORED;
dwc2_writel(hsotg, pcgcctl, PCGCTL);
udelay(10);
} else {
dwc2_writel(hsotg, dr->dcfg, DCFG);
if (!rmode)
pcgcctl |= PCGCTL_RESTOREMODE | PCGCTL_RSTPDWNMODULE;
dwc2_writel(hsotg, pcgcctl, PCGCTL);
udelay(10);
pcgcctl |= PCGCTL_ESS_REG_RESTORED;
dwc2_writel(hsotg, pcgcctl, PCGCTL);
udelay(10);
}
}
/**
* dwc2_hib_restore_common() - Common part of restore routine.
*
* @hsotg: Programming view of the DWC_otg controller
* @rem_wakeup: Remote-wakeup, enabled in case of remote-wakeup.
* @is_host: Host or device mode.
*/
void dwc2_hib_restore_common(struct dwc2_hsotg *hsotg, int rem_wakeup,
int is_host)
{
u32 gpwrdn;
/* Switch-on voltage to the core */
gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn &= ~GPWRDN_PWRDNSWTCH;
dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Reset core */
gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn &= ~GPWRDN_PWRDNRSTN;
dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Enable restore from PMU */
gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_RESTORE;
dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Disable Power Down Clamp */
gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn &= ~GPWRDN_PWRDNCLMP;
dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(50);
if (!is_host && rem_wakeup)
udelay(70);
/* Deassert reset core */
gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_PWRDNRSTN;
dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Disable PMU interrupt */
gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn &= ~GPWRDN_PMUINTSEL;
dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Set Restore Essential Regs bit in PCGCCTL register */
dwc2_restore_essential_regs(hsotg, rem_wakeup, is_host);
/*
* Wait For Restore_done Interrupt. This mechanism of polling the
* interrupt is introduced to avoid any possible race conditions
*/
if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, GINTSTS_RESTOREDONE,
20000)) {
dev_dbg(hsotg->dev,
"%s: Restore Done wasn't generated here\n",
__func__);
} else {
dev_dbg(hsotg->dev, "restore done generated here\n");
/*
* To avoid restore done interrupt storm after restore is
* generated clear GINTSTS_RESTOREDONE bit.
*/
dwc2_writel(hsotg, GINTSTS_RESTOREDONE, GINTSTS);
}
}
/**
* dwc2_wait_for_mode() - Waits for the controller mode.
* @hsotg: Programming view of the DWC_otg controller.
* @host_mode: If true, waits for host mode, otherwise device mode.
*/
static void dwc2_wait_for_mode(struct dwc2_hsotg *hsotg,
bool host_mode)
{
ktime_t start;
ktime_t end;
unsigned int timeout = 110;
dev_vdbg(hsotg->dev, "Waiting for %s mode\n",
host_mode ? "host" : "device");
start = ktime_get();
while (1) {
s64 ms;
if (dwc2_is_host_mode(hsotg) == host_mode) {
dev_vdbg(hsotg->dev, "%s mode set\n",
host_mode ? "Host" : "Device");
break;
}
end = ktime_get();
ms = ktime_to_ms(ktime_sub(end, start));
if (ms >= (s64)timeout) {
dev_warn(hsotg->dev, "%s: Couldn't set %s mode\n",
__func__, host_mode ? "host" : "device");
break;
}
usleep_range(1000, 2000);
}
}
/**
* dwc2_iddig_filter_enabled() - Returns true if the IDDIG debounce
* filter is enabled.
*
* @hsotg: Programming view of DWC_otg controller
*/
static bool dwc2_iddig_filter_enabled(struct dwc2_hsotg *hsotg)
{
u32 gsnpsid;
u32 ghwcfg4;
if (!dwc2_hw_is_otg(hsotg))
return false;
/* Check if core configuration includes the IDDIG filter. */
ghwcfg4 = dwc2_readl(hsotg, GHWCFG4);
if (!(ghwcfg4 & GHWCFG4_IDDIG_FILT_EN))
return false;
/*
* Check if the IDDIG debounce filter is bypassed. Available
* in core version >= 3.10a.
*/
gsnpsid = dwc2_readl(hsotg, GSNPSID);
if (gsnpsid >= DWC2_CORE_REV_3_10a) {
u32 gotgctl = dwc2_readl(hsotg, GOTGCTL);
if (gotgctl & GOTGCTL_DBNCE_FLTR_BYPASS)
return false;
}
return true;
}
/*
* dwc2_enter_hibernation() - Common function to enter hibernation.
*
* @hsotg: Programming view of the DWC_otg controller
* @is_host: True if core is in host mode.
*
* Return: 0 if successful, negative error code otherwise
*/
int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg, int is_host)
{
if (is_host)
return dwc2_host_enter_hibernation(hsotg);
else
return dwc2_gadget_enter_hibernation(hsotg);
}
/*
* dwc2_exit_hibernation() - Common function to exit from hibernation.
*
* @hsotg: Programming view of the DWC_otg controller
* @rem_wakeup: Remote-wakeup, enabled in case of remote-wakeup.
* @reset: Enabled in case of restore with reset.
* @is_host: True if core is in host mode.
*
* Return: 0 if successful, negative error code otherwise
*/
int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
int reset, int is_host)
{
if (is_host)
return dwc2_host_exit_hibernation(hsotg, rem_wakeup, reset);
else
return dwc2_gadget_exit_hibernation(hsotg, rem_wakeup, reset);
}
/*
* Do core a soft reset of the core. Be careful with this because it
* resets all the internal state machines of the core.
*/
int dwc2_core_reset(struct dwc2_hsotg *hsotg, bool skip_wait)
{
u32 greset;
bool wait_for_host_mode = false;
dev_vdbg(hsotg->dev, "%s()\n", __func__);
/*
* If the current mode is host, either due to the force mode
* bit being set (which persists after core reset) or the
* connector id pin, a core soft reset will temporarily reset
* the mode to device. A delay from the IDDIG debounce filter
* will occur before going back to host mode.
*
* Determine whether we will go back into host mode after a
* reset and account for this delay after the reset.
*/
if (dwc2_iddig_filter_enabled(hsotg)) {
u32 gotgctl = dwc2_readl(hsotg, GOTGCTL);
u32 gusbcfg = dwc2_readl(hsotg, GUSBCFG);
if (!(gotgctl & GOTGCTL_CONID_B) ||
(gusbcfg & GUSBCFG_FORCEHOSTMODE)) {
wait_for_host_mode = true;
}
}
/* Core Soft Reset */
greset = dwc2_readl(hsotg, GRSTCTL);
greset |= GRSTCTL_CSFTRST;
dwc2_writel(hsotg, greset, GRSTCTL);
if ((hsotg->hw_params.snpsid & DWC2_CORE_REV_MASK) <
(DWC2_CORE_REV_4_20a & DWC2_CORE_REV_MASK)) {
if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL,
GRSTCTL_CSFTRST, 10000)) {
dev_warn(hsotg->dev, "%s: HANG! Soft Reset timeout GRSTCTL_CSFTRST\n",
__func__);
return -EBUSY;
}
} else {
if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL,
GRSTCTL_CSFTRST_DONE, 10000)) {
dev_warn(hsotg->dev, "%s: HANG! Soft Reset timeout GRSTCTL_CSFTRST_DONE\n",
__func__);
return -EBUSY;
}
greset = dwc2_readl(hsotg, GRSTCTL);
greset &= ~GRSTCTL_CSFTRST;
greset |= GRSTCTL_CSFTRST_DONE;
dwc2_writel(hsotg, greset, GRSTCTL);
}
/*
* Switching from device mode to host mode by disconnecting
* device cable core enters and exits form hibernation.
* However, the fifo map remains not cleared. It results
* to a WARNING (WARNING: CPU: 5 PID: 0 at drivers/usb/dwc2/
* gadget.c:307 dwc2_hsotg_init_fifo+0x12/0x152 [dwc2])
* if in host mode we disconnect the micro a to b host
* cable. Because core reset occurs.
* To avoid the WARNING, fifo_map should be cleared
* in dwc2_core_reset() function by taking into account configs.
* fifo_map must be cleared only if driver is configured in
* "CONFIG_USB_DWC2_PERIPHERAL" or "CONFIG_USB_DWC2_DUAL_ROLE"
* mode.
*/
dwc2_clear_fifo_map(hsotg);
/* Wait for AHB master IDLE state */
if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL, GRSTCTL_AHBIDLE, 10000)) {
dev_warn(hsotg->dev, "%s: HANG! AHB Idle timeout GRSTCTL GRSTCTL_AHBIDLE\n",
__func__);
return -EBUSY;
}
if (wait_for_host_mode && !skip_wait)
dwc2_wait_for_mode(hsotg, true);
return 0;
}
/**
* dwc2_force_mode() - Force the mode of the controller.
*
* Forcing the mode is needed for two cases:
*
* 1) If the dr_mode is set to either HOST or PERIPHERAL we force the
* controller to stay in a particular mode regardless of ID pin
* changes. We do this once during probe.
*
* 2) During probe we want to read reset values of the hw
* configuration registers that are only available in either host or
* device mode. We may need to force the mode if the current mode does
* not allow us to access the register in the mode that we want.
*
* In either case it only makes sense to force the mode if the
* controller hardware is OTG capable.
*
* Checks are done in this function to determine whether doing a force
* would be valid or not.
*
* If a force is done, it requires a IDDIG debounce filter delay if
* the filter is configured and enabled. We poll the current mode of
* the controller to account for this delay.
*
* @hsotg: Programming view of DWC_otg controller
* @host: Host mode flag
*/
void dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host)
{
u32 gusbcfg;
u32 set;
u32 clear;
dev_dbg(hsotg->dev, "Forcing mode to %s\n", host ? "host" : "device");
/*
* Force mode has no effect if the hardware is not OTG.
*/
if (!dwc2_hw_is_otg(hsotg))
return;
/*
* If dr_mode is either peripheral or host only, there is no
* need to ever force the mode to the opposite mode.
*/
if (WARN_ON(host && hsotg->dr_mode == USB_DR_MODE_PERIPHERAL))
return;
if (WARN_ON(!host && hsotg->dr_mode == USB_DR_MODE_HOST))
return;
gusbcfg = dwc2_readl(hsotg, GUSBCFG);
set = host ? GUSBCFG_FORCEHOSTMODE : GUSBCFG_FORCEDEVMODE;
clear = host ? GUSBCFG_FORCEDEVMODE : GUSBCFG_FORCEHOSTMODE;
gusbcfg &= ~clear;
gusbcfg |= set;
dwc2_writel(hsotg, gusbcfg, GUSBCFG);
dwc2_wait_for_mode(hsotg, host);
return;
}
/**
* dwc2_clear_force_mode() - Clears the force mode bits.
*
* After clearing the bits, wait up to 100 ms to account for any
* potential IDDIG filter delay. We can't know if we expect this delay
* or not because the value of the connector ID status is affected by
* the force mode. We only need to call this once during probe if
* dr_mode == OTG.
*
* @hsotg: Programming view of DWC_otg controller
*/
static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg)
{
u32 gusbcfg;
if (!dwc2_hw_is_otg(hsotg))
return;
dev_dbg(hsotg->dev, "Clearing force mode bits\n");
gusbcfg = dwc2_readl(hsotg, GUSBCFG);
gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
dwc2_writel(hsotg, gusbcfg, GUSBCFG);
if (dwc2_iddig_filter_enabled(hsotg))
msleep(100);
}
/*
* Sets or clears force mode based on the dr_mode parameter.
*/
void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg)
{
switch (hsotg->dr_mode) {
case USB_DR_MODE_HOST:
/*
* NOTE: This is required for some rockchip soc based
* platforms on their host-only dwc2.
*/
if (!dwc2_hw_is_otg(hsotg))
msleep(50);
break;
case USB_DR_MODE_PERIPHERAL:
dwc2_force_mode(hsotg, false);
break;
case USB_DR_MODE_OTG:
dwc2_clear_force_mode(hsotg);
break;
default:
dev_warn(hsotg->dev, "%s() Invalid dr_mode=%d\n",
__func__, hsotg->dr_mode);
break;
}
}
/*
* dwc2_enable_acg - enable active clock gating feature
*/
void dwc2_enable_acg(struct dwc2_hsotg *hsotg)
{
if (hsotg->params.acg_enable) {
u32 pcgcctl1 = dwc2_readl(hsotg, PCGCCTL1);
dev_dbg(hsotg->dev, "Enabling Active Clock Gating\n");
pcgcctl1 |= PCGCCTL1_GATEEN;
dwc2_writel(hsotg, pcgcctl1, PCGCCTL1);
}
}
/**
* dwc2_dump_host_registers() - Prints the host registers
*
* @hsotg: Programming view of DWC_otg controller
*
* NOTE: This function will be removed once the peripheral controller code
* is integrated and the driver is stable
*/
void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
{
#ifdef DEBUG
u32 __iomem *addr;
int i;
dev_dbg(hsotg->dev, "Host Global Registers\n");
addr = hsotg->regs + HCFG;
dev_dbg(hsotg->dev, "HCFG @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, HCFG));
addr = hsotg->regs + HFIR;
dev_dbg(hsotg->dev, "HFIR @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, HFIR));
addr = hsotg->regs + HFNUM;
dev_dbg(hsotg->dev, "HFNUM @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, HFNUM));
addr = hsotg->regs + HPTXSTS;
dev_dbg(hsotg->dev, "HPTXSTS @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, HPTXSTS));
addr = hsotg->regs + HAINT;
dev_dbg(hsotg->dev, "HAINT @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, HAINT));
addr = hsotg->regs + HAINTMSK;
dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, HAINTMSK));
if (hsotg->params.dma_desc_enable) {
addr = hsotg->regs + HFLBADDR;
dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, HFLBADDR));
}
addr = hsotg->regs + HPRT0;
dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, HPRT0));
for (i = 0; i < hsotg->params.host_channels; i++) {
dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
addr = hsotg->regs + HCCHAR(i);
dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, HCCHAR(i)));
addr = hsotg->regs + HCSPLT(i);
dev_dbg(hsotg->dev, "HCSPLT @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, HCSPLT(i)));
addr = hsotg->regs + HCINT(i);
dev_dbg(hsotg->dev, "HCINT @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, HCINT(i)));
addr = hsotg->regs + HCINTMSK(i);
dev_dbg(hsotg->dev, "HCINTMSK @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, HCINTMSK(i)));
addr = hsotg->regs + HCTSIZ(i);
dev_dbg(hsotg->dev, "HCTSIZ @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, HCTSIZ(i)));
addr = hsotg->regs + HCDMA(i);
dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, HCDMA(i)));
if (hsotg->params.dma_desc_enable) {
addr = hsotg->regs + HCDMAB(i);
dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg,
HCDMAB(i)));
}
}
#endif
}
/**
* dwc2_dump_global_registers() - Prints the core global registers
*
* @hsotg: Programming view of DWC_otg controller
*
* NOTE: This function will be removed once the peripheral controller code
* is integrated and the driver is stable
*/
void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
{
#ifdef DEBUG
u32 __iomem *addr;
dev_dbg(hsotg->dev, "Core Global Registers\n");
addr = hsotg->regs + GOTGCTL;
dev_dbg(hsotg->dev, "GOTGCTL @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, GOTGCTL));
addr = hsotg->regs + GOTGINT;
dev_dbg(hsotg->dev, "GOTGINT @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, GOTGINT));
addr = hsotg->regs + GAHBCFG;
dev_dbg(hsotg->dev, "GAHBCFG @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, GAHBCFG));
addr = hsotg->regs + GUSBCFG;
dev_dbg(hsotg->dev, "GUSBCFG @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, GUSBCFG));
addr = hsotg->regs + GRSTCTL;
dev_dbg(hsotg->dev, "GRSTCTL @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, GRSTCTL));
addr = hsotg->regs + GINTSTS;
dev_dbg(hsotg->dev, "GINTSTS @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, GINTSTS));
addr = hsotg->regs + GINTMSK;
dev_dbg(hsotg->dev, "GINTMSK @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, GINTMSK));
addr = hsotg->regs + GRXSTSR;
dev_dbg(hsotg->dev, "GRXSTSR @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, GRXSTSR));
addr = hsotg->regs + GRXFSIZ;
dev_dbg(hsotg->dev, "GRXFSIZ @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, GRXFSIZ));
addr = hsotg->regs + GNPTXFSIZ;
dev_dbg(hsotg->dev, "GNPTXFSIZ @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, GNPTXFSIZ));
addr = hsotg->regs + GNPTXSTS;
dev_dbg(hsotg->dev, "GNPTXSTS @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, GNPTXSTS));
addr = hsotg->regs + GI2CCTL;
dev_dbg(hsotg->dev, "GI2CCTL @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, GI2CCTL));
addr = hsotg->regs + GPVNDCTL;
dev_dbg(hsotg->dev, "GPVNDCTL @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, GPVNDCTL));
addr = hsotg->regs + GGPIO;
dev_dbg(hsotg->dev, "GGPIO @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, GGPIO));
addr = hsotg->regs + GUID;
dev_dbg(hsotg->dev, "GUID @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, GUID));
addr = hsotg->regs + GSNPSID;
dev_dbg(hsotg->dev, "GSNPSID @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, GSNPSID));
addr = hsotg->regs + GHWCFG1;
dev_dbg(hsotg->dev, "GHWCFG1 @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, GHWCFG1));
addr = hsotg->regs + GHWCFG2;
dev_dbg(hsotg->dev, "GHWCFG2 @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, GHWCFG2));
addr = hsotg->regs + GHWCFG3;
dev_dbg(hsotg->dev, "GHWCFG3 @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, GHWCFG3));
addr = hsotg->regs + GHWCFG4;
dev_dbg(hsotg->dev, "GHWCFG4 @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, GHWCFG4));
addr = hsotg->regs + GLPMCFG;
dev_dbg(hsotg->dev, "GLPMCFG @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, GLPMCFG));
addr = hsotg->regs + GPWRDN;
dev_dbg(hsotg->dev, "GPWRDN @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, GPWRDN));
addr = hsotg->regs + GDFIFOCFG;
dev_dbg(hsotg->dev, "GDFIFOCFG @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, GDFIFOCFG));
addr = hsotg->regs + HPTXFSIZ;
dev_dbg(hsotg->dev, "HPTXFSIZ @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, HPTXFSIZ));
addr = hsotg->regs + PCGCTL;
dev_dbg(hsotg->dev, "PCGCTL @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(hsotg, PCGCTL));
#endif
}
/**
* dwc2_flush_tx_fifo() - Flushes a Tx FIFO
*
* @hsotg: Programming view of DWC_otg controller
* @num: Tx FIFO to flush
*/
void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num)
{
u32 greset;
dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num);
/* Wait for AHB master IDLE state */
if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL, GRSTCTL_AHBIDLE, 10000))
dev_warn(hsotg->dev, "%s: HANG! AHB Idle GRSCTL\n",
__func__);
greset = GRSTCTL_TXFFLSH;
greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
dwc2_writel(hsotg, greset, GRSTCTL);
if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_TXFFLSH, 10000))
dev_warn(hsotg->dev, "%s: HANG! timeout GRSTCTL GRSTCTL_TXFFLSH\n",
__func__);
/* Wait for at least 3 PHY Clocks */
udelay(1);
}
/**
* dwc2_flush_rx_fifo() - Flushes the Rx FIFO
*
* @hsotg: Programming view of DWC_otg controller
*/
void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
{
u32 greset;
dev_vdbg(hsotg->dev, "%s()\n", __func__);
/* Wait for AHB master IDLE state */
if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL, GRSTCTL_AHBIDLE, 10000))
dev_warn(hsotg->dev, "%s: HANG! AHB Idle GRSCTL\n",
__func__);
greset = GRSTCTL_RXFFLSH;
dwc2_writel(hsotg, greset, GRSTCTL);
/* Wait for RxFIFO flush done */
if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_RXFFLSH, 10000))
dev_warn(hsotg->dev, "%s: HANG! timeout GRSTCTL GRSTCTL_RXFFLSH\n",
__func__);
/* Wait for at least 3 PHY Clocks */
udelay(1);
}
bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
{
if (dwc2_readl(hsotg, GSNPSID) == 0xffffffff)
return false;
else
return true;
}
/**
* dwc2_enable_global_interrupts() - Enables the controller's Global
* Interrupt in the AHB Config register
*
* @hsotg: Programming view of DWC_otg controller
*/
void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
{
u32 ahbcfg = dwc2_readl(hsotg, GAHBCFG);
ahbcfg |= GAHBCFG_GLBL_INTR_EN;
dwc2_writel(hsotg, ahbcfg, GAHBCFG);
}
/**
* dwc2_disable_global_interrupts() - Disables the controller's Global
* Interrupt in the AHB Config register
*
* @hsotg: Programming view of DWC_otg controller
*/
void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg)
{
u32 ahbcfg = dwc2_readl(hsotg, GAHBCFG);
ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
dwc2_writel(hsotg, ahbcfg, GAHBCFG);
}
/* Returns the controller's GHWCFG2.OTG_MODE. */
unsigned int dwc2_op_mode(struct dwc2_hsotg *hsotg)
{
u32 ghwcfg2 = dwc2_readl(hsotg, GHWCFG2);
return (ghwcfg2 & GHWCFG2_OP_MODE_MASK) >>
GHWCFG2_OP_MODE_SHIFT;
}
/* Returns true if the controller is capable of DRD. */
bool dwc2_hw_is_otg(struct dwc2_hsotg *hsotg)
{
unsigned int op_mode = dwc2_op_mode(hsotg);
return (op_mode == GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) ||
(op_mode == GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE) ||
(op_mode == GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE);
}
/* Returns true if the controller is host-only. */
bool dwc2_hw_is_host(struct dwc2_hsotg *hsotg)
{
unsigned int op_mode = dwc2_op_mode(hsotg);
return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_HOST) ||
(op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST);
}
/* Returns true if the controller is device-only. */
bool dwc2_hw_is_device(struct dwc2_hsotg *hsotg)
{
unsigned int op_mode = dwc2_op_mode(hsotg);
return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) ||
(op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE);
}
/**
* dwc2_hsotg_wait_bit_set - Waits for bit to be set.
* @hsotg: Programming view of DWC_otg controller.
* @offset: Register's offset where bit/bits must be set.
* @mask: Mask of the bit/bits which must be set.
* @timeout: Timeout to wait.
*
* Return: 0 if bit/bits are set or -ETIMEDOUT in case of timeout.
*/
int dwc2_hsotg_wait_bit_set(struct dwc2_hsotg *hsotg, u32 offset, u32 mask,
u32 timeout)
{
u32 i;
for (i = 0; i < timeout; i++) {
if (dwc2_readl(hsotg, offset) & mask)
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
/**
* dwc2_hsotg_wait_bit_clear - Waits for bit to be clear.
* @hsotg: Programming view of DWC_otg controller.
* @offset: Register's offset where bit/bits must be set.
* @mask: Mask of the bit/bits which must be set.
* @timeout: Timeout to wait.
*
* Return: 0 if bit/bits are set or -ETIMEDOUT in case of timeout.
*/
int dwc2_hsotg_wait_bit_clear(struct dwc2_hsotg *hsotg, u32 offset, u32 mask,
u32 timeout)
{
u32 i;
for (i = 0; i < timeout; i++) {
if (!(dwc2_readl(hsotg, offset) & mask))
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
/*
* Initializes the FSLSPClkSel field of the HCFG register depending on the
* PHY type
*/
void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
{
u32 hcfg, val;
if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
hsotg->params.ulpi_fs_ls) ||
hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) {
/* Full speed PHY */
val = HCFG_FSLSPCLKSEL_48_MHZ;
} else {
/* High speed PHY running at full speed or high speed */
val = HCFG_FSLSPCLKSEL_30_60_MHZ;
}
dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
hcfg = dwc2_readl(hsotg, HCFG);
hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
dwc2_writel(hsotg, hcfg, HCFG);
}
static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
{
u32 usbcfg, ggpio, i2cctl;
int retval = 0;
/*
* core_init() is now called on every switch so only call the
* following for the first time through
*/
if (select_phy) {
dev_dbg(hsotg->dev, "FS PHY selected\n");
usbcfg = dwc2_readl(hsotg, GUSBCFG);
if (!(usbcfg & GUSBCFG_PHYSEL)) {
usbcfg |= GUSBCFG_PHYSEL;
dwc2_writel(hsotg, usbcfg, GUSBCFG);
/* Reset after a PHY select */
retval = dwc2_core_reset(hsotg, false);
if (retval) {
dev_err(hsotg->dev,
"%s: Reset failed, aborting", __func__);
return retval;
}
}
if (hsotg->params.activate_stm_fs_transceiver) {
ggpio = dwc2_readl(hsotg, GGPIO);
if (!(ggpio & GGPIO_STM32_OTG_GCCFG_PWRDWN)) {
dev_dbg(hsotg->dev, "Activating transceiver\n");
/*
* STM32F4x9 uses the GGPIO register as general
* core configuration register.
*/
ggpio |= GGPIO_STM32_OTG_GCCFG_PWRDWN;
dwc2_writel(hsotg, ggpio, GGPIO);
}
}
}
/*
* Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
* do this on HNP Dev/Host mode switches (done in dev_init and
* host_init).
*/
if (dwc2_is_host_mode(hsotg))
dwc2_init_fs_ls_pclk_sel(hsotg);
if (hsotg->params.i2c_enable) {
dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
/* Program GUSBCFG.OtgUtmiFsSel to I2C */
usbcfg = dwc2_readl(hsotg, GUSBCFG);
usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
dwc2_writel(hsotg, usbcfg, GUSBCFG);
/* Program GI2CCTL.I2CEn */
i2cctl = dwc2_readl(hsotg, GI2CCTL);
i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
i2cctl &= ~GI2CCTL_I2CEN;
dwc2_writel(hsotg, i2cctl, GI2CCTL);
i2cctl |= GI2CCTL_I2CEN;
dwc2_writel(hsotg, i2cctl, GI2CCTL);
}
return retval;
}
static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
{
u32 usbcfg, usbcfg_old;
int retval = 0;
if (!select_phy)
return 0;
usbcfg = dwc2_readl(hsotg, GUSBCFG);
usbcfg_old = usbcfg;
/*
* HS PHY parameters. These parameters are preserved during soft reset
* so only program the first time. Do a soft reset immediately after
* setting phyif.
*/
switch (hsotg->params.phy_type) {
case DWC2_PHY_TYPE_PARAM_ULPI:
/* ULPI interface */
dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
if (hsotg->params.phy_ulpi_ddr)
usbcfg |= GUSBCFG_DDRSEL;
/* Set external VBUS indicator as needed. */
if (hsotg->params.oc_disable)
usbcfg |= (GUSBCFG_ULPI_INT_VBUS_IND |
GUSBCFG_INDICATORPASSTHROUGH);
break;
case DWC2_PHY_TYPE_PARAM_UTMI:
/* UTMI+ interface */
dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
if (hsotg->params.phy_utmi_width == 16)
usbcfg |= GUSBCFG_PHYIF16;
break;
default:
dev_err(hsotg->dev, "FS PHY selected at HS!\n");
break;
}
if (usbcfg != usbcfg_old) {
dwc2_writel(hsotg, usbcfg, GUSBCFG);
/* Reset after setting the PHY parameters */
retval = dwc2_core_reset(hsotg, false);
if (retval) {
dev_err(hsotg->dev,
"%s: Reset failed, aborting", __func__);
return retval;
}
}
return retval;
}
static void dwc2_set_turnaround_time(struct dwc2_hsotg *hsotg)
{
u32 usbcfg;
if (hsotg->params.phy_type != DWC2_PHY_TYPE_PARAM_UTMI)
return;
usbcfg = dwc2_readl(hsotg, GUSBCFG);
usbcfg &= ~GUSBCFG_USBTRDTIM_MASK;
if (hsotg->params.phy_utmi_width == 16)
usbcfg |= 5 << GUSBCFG_USBTRDTIM_SHIFT;
else
usbcfg |= 9 << GUSBCFG_USBTRDTIM_SHIFT;
dwc2_writel(hsotg, usbcfg, GUSBCFG);
}
int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
{
u32 usbcfg;
u32 otgctl;
int retval = 0;
if ((hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
hsotg->params.speed == DWC2_SPEED_PARAM_LOW) &&
hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) {
/* If FS/LS mode with FS/LS PHY */
retval = dwc2_fs_phy_init(hsotg, select_phy);
if (retval)
return retval;
} else {
/* High speed PHY */
retval = dwc2_hs_phy_init(hsotg, select_phy);
if (retval)
return retval;
if (dwc2_is_device_mode(hsotg))
dwc2_set_turnaround_time(hsotg);
}
if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
hsotg->params.ulpi_fs_ls) {
dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
usbcfg = dwc2_readl(hsotg, GUSBCFG);
usbcfg |= GUSBCFG_ULPI_FS_LS;
usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
dwc2_writel(hsotg, usbcfg, GUSBCFG);
} else {
usbcfg = dwc2_readl(hsotg, GUSBCFG);
usbcfg &= ~GUSBCFG_ULPI_FS_LS;
usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
dwc2_writel(hsotg, usbcfg, GUSBCFG);
}
if (!hsotg->params.activate_ingenic_overcurrent_detection) {
if (dwc2_is_host_mode(hsotg)) {
otgctl = readl(hsotg->regs + GOTGCTL);
otgctl |= GOTGCTL_VBVALOEN | GOTGCTL_VBVALOVAL;
writel(otgctl, hsotg->regs + GOTGCTL);
}
}
return retval;
}
MODULE_DESCRIPTION("DESIGNWARE HS OTG Core");
MODULE_AUTHOR("Synopsys, Inc.");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | drivers/usb/dwc2/core.c |
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines
*
* Copyright (C) 2004-2013 Synopsys, Inc.
*/
/*
* This file contains the Descriptor DMA implementation for Host mode
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/ch11.h>
#include "core.h"
#include "hcd.h"
static u16 dwc2_frame_list_idx(u16 frame)
{
return frame & (FRLISTEN_64_SIZE - 1);
}
static u16 dwc2_desclist_idx_inc(u16 idx, u16 inc, u8 speed)
{
return (idx + inc) &
((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
MAX_DMA_DESC_NUM_GENERIC) - 1);
}
static u16 dwc2_desclist_idx_dec(u16 idx, u16 inc, u8 speed)
{
return (idx - inc) &
((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
MAX_DMA_DESC_NUM_GENERIC) - 1);
}
static u16 dwc2_max_desc_num(struct dwc2_qh *qh)
{
return (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
qh->dev_speed == USB_SPEED_HIGH) ?
MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC;
}
static u16 dwc2_frame_incr_val(struct dwc2_qh *qh)
{
return qh->dev_speed == USB_SPEED_HIGH ?
(qh->host_interval + 8 - 1) / 8 : qh->host_interval;
}
static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
gfp_t flags)
{
struct kmem_cache *desc_cache;
if (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
qh->dev_speed == USB_SPEED_HIGH)
desc_cache = hsotg->desc_hsisoc_cache;
else
desc_cache = hsotg->desc_gen_cache;
qh->desc_list_sz = sizeof(struct dwc2_dma_desc) *
dwc2_max_desc_num(qh);
qh->desc_list = kmem_cache_zalloc(desc_cache, flags | GFP_DMA);
if (!qh->desc_list)
return -ENOMEM;
qh->desc_list_dma = dma_map_single(hsotg->dev, qh->desc_list,
qh->desc_list_sz,
DMA_TO_DEVICE);
qh->n_bytes = kcalloc(dwc2_max_desc_num(qh), sizeof(u32), flags);
if (!qh->n_bytes) {
dma_unmap_single(hsotg->dev, qh->desc_list_dma,
qh->desc_list_sz,
DMA_FROM_DEVICE);
kmem_cache_free(desc_cache, qh->desc_list);
qh->desc_list = NULL;
return -ENOMEM;
}
return 0;
}
static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
struct kmem_cache *desc_cache;
if (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
qh->dev_speed == USB_SPEED_HIGH)
desc_cache = hsotg->desc_hsisoc_cache;
else
desc_cache = hsotg->desc_gen_cache;
if (qh->desc_list) {
dma_unmap_single(hsotg->dev, qh->desc_list_dma,
qh->desc_list_sz, DMA_FROM_DEVICE);
kmem_cache_free(desc_cache, qh->desc_list);
qh->desc_list = NULL;
}
kfree(qh->n_bytes);
qh->n_bytes = NULL;
}
static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags)
{
if (hsotg->frame_list)
return 0;
hsotg->frame_list_sz = 4 * FRLISTEN_64_SIZE;
hsotg->frame_list = kzalloc(hsotg->frame_list_sz, GFP_ATOMIC | GFP_DMA);
if (!hsotg->frame_list)
return -ENOMEM;
hsotg->frame_list_dma = dma_map_single(hsotg->dev, hsotg->frame_list,
hsotg->frame_list_sz,
DMA_TO_DEVICE);
return 0;
}
static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg)
{
unsigned long flags;
spin_lock_irqsave(&hsotg->lock, flags);
if (!hsotg->frame_list) {
spin_unlock_irqrestore(&hsotg->lock, flags);
return;
}
dma_unmap_single(hsotg->dev, hsotg->frame_list_dma,
hsotg->frame_list_sz, DMA_FROM_DEVICE);
kfree(hsotg->frame_list);
hsotg->frame_list = NULL;
spin_unlock_irqrestore(&hsotg->lock, flags);
}
static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en)
{
u32 hcfg;
unsigned long flags;
spin_lock_irqsave(&hsotg->lock, flags);
hcfg = dwc2_readl(hsotg, HCFG);
if (hcfg & HCFG_PERSCHEDENA) {
/* already enabled */
spin_unlock_irqrestore(&hsotg->lock, flags);
return;
}
dwc2_writel(hsotg, hsotg->frame_list_dma, HFLBADDR);
hcfg &= ~HCFG_FRLISTEN_MASK;
hcfg |= fr_list_en | HCFG_PERSCHEDENA;
dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n");
dwc2_writel(hsotg, hcfg, HCFG);
spin_unlock_irqrestore(&hsotg->lock, flags);
}
static void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg)
{
u32 hcfg;
unsigned long flags;
spin_lock_irqsave(&hsotg->lock, flags);
hcfg = dwc2_readl(hsotg, HCFG);
if (!(hcfg & HCFG_PERSCHEDENA)) {
/* already disabled */
spin_unlock_irqrestore(&hsotg->lock, flags);
return;
}
hcfg &= ~HCFG_PERSCHEDENA;
dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n");
dwc2_writel(hsotg, hcfg, HCFG);
spin_unlock_irqrestore(&hsotg->lock, flags);
}
/*
* Activates/Deactivates FrameList entries for the channel based on endpoint
* servicing period
*/
static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
int enable)
{
struct dwc2_host_chan *chan;
u16 i, j, inc;
if (!hsotg) {
pr_err("hsotg = %p\n", hsotg);
return;
}
if (!qh->channel) {
dev_err(hsotg->dev, "qh->channel = %p\n", qh->channel);
return;
}
if (!hsotg->frame_list) {
dev_err(hsotg->dev, "hsotg->frame_list = %p\n",
hsotg->frame_list);
return;
}
chan = qh->channel;
inc = dwc2_frame_incr_val(qh);
if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
i = dwc2_frame_list_idx(qh->next_active_frame);
else
i = 0;
j = i;
do {
if (enable)
hsotg->frame_list[j] |= 1 << chan->hc_num;
else
hsotg->frame_list[j] &= ~(1 << chan->hc_num);
j = (j + inc) & (FRLISTEN_64_SIZE - 1);
} while (j != i);
/*
* Sync frame list since controller will access it if periodic
* channel is currently enabled.
*/
dma_sync_single_for_device(hsotg->dev,
hsotg->frame_list_dma,
hsotg->frame_list_sz,
DMA_TO_DEVICE);
if (!enable)
return;
chan->schinfo = 0;
if (chan->speed == USB_SPEED_HIGH && qh->host_interval) {
j = 1;
/* TODO - check this */
inc = (8 + qh->host_interval - 1) / qh->host_interval;
for (i = 0; i < inc; i++) {
chan->schinfo |= j;
j = j << qh->host_interval;
}
} else {
chan->schinfo = 0xff;
}
}
static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh)
{
struct dwc2_host_chan *chan = qh->channel;
if (dwc2_qh_is_non_per(qh)) {
if (hsotg->params.uframe_sched)
hsotg->available_host_channels++;
else
hsotg->non_periodic_channels--;
} else {
dwc2_update_frame_list(hsotg, qh, 0);
hsotg->available_host_channels++;
}
/*
* The condition is added to prevent double cleanup try in case of
* device disconnect. See channel cleanup in dwc2_hcd_disconnect().
*/
if (chan->qh) {
if (!list_empty(&chan->hc_list_entry))
list_del(&chan->hc_list_entry);
dwc2_hc_cleanup(hsotg, chan);
list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
chan->qh = NULL;
}
qh->channel = NULL;
qh->ntd = 0;
if (qh->desc_list)
memset(qh->desc_list, 0, sizeof(struct dwc2_dma_desc) *
dwc2_max_desc_num(qh));
}
/**
* dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA
* related members
*
* @hsotg: The HCD state structure for the DWC OTG controller
* @qh: The QH to init
* @mem_flags: Indicates the type of memory allocation
*
* Return: 0 if successful, negative error code otherwise
*
* Allocates memory for the descriptor list. For the first periodic QH,
* allocates memory for the FrameList and enables periodic scheduling.
*/
int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
gfp_t mem_flags)
{
int retval;
if (qh->do_split) {
dev_err(hsotg->dev,
"SPLIT Transfers are not supported in Descriptor DMA mode.\n");
retval = -EINVAL;
goto err0;
}
retval = dwc2_desc_list_alloc(hsotg, qh, mem_flags);
if (retval)
goto err0;
if (qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
qh->ep_type == USB_ENDPOINT_XFER_INT) {
if (!hsotg->frame_list) {
retval = dwc2_frame_list_alloc(hsotg, mem_flags);
if (retval)
goto err1;
/* Enable periodic schedule on first periodic QH */
dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64);
}
}
qh->ntd = 0;
return 0;
err1:
dwc2_desc_list_free(hsotg, qh);
err0:
return retval;
}
/**
* dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related
* members
*
* @hsotg: The HCD state structure for the DWC OTG controller
* @qh: The QH to free
*
* Frees descriptor list memory associated with the QH. If QH is periodic and
* the last, frees FrameList memory and disables periodic scheduling.
*/
void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
unsigned long flags;
dwc2_desc_list_free(hsotg, qh);
/*
* Channel still assigned due to some reasons.
* Seen on Isoc URB dequeue. Channel halted but no subsequent
* ChHalted interrupt to release the channel. Afterwards
* when it comes here from endpoint disable routine
* channel remains assigned.
*/
spin_lock_irqsave(&hsotg->lock, flags);
if (qh->channel)
dwc2_release_channel_ddma(hsotg, qh);
spin_unlock_irqrestore(&hsotg->lock, flags);
if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
qh->ep_type == USB_ENDPOINT_XFER_INT) &&
(hsotg->params.uframe_sched ||
!hsotg->periodic_channels) && hsotg->frame_list) {
dwc2_per_sched_disable(hsotg);
dwc2_frame_list_free(hsotg);
}
}
static u8 dwc2_frame_to_desc_idx(struct dwc2_qh *qh, u16 frame_idx)
{
if (qh->dev_speed == USB_SPEED_HIGH)
/* Descriptor set (8 descriptors) index which is 8-aligned */
return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
else
return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1);
}
/*
* Determine starting frame for Isochronous transfer.
* Few frames skipped to prevent race condition with HC.
*/
static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh, u16 *skip_frames)
{
u16 frame;
hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
/*
* next_active_frame is always frame number (not uFrame) both in FS
* and HS!
*/
/*
* skip_frames is used to limit activated descriptors number
* to avoid the situation when HC services the last activated
* descriptor firstly.
* Example for FS:
* Current frame is 1, scheduled frame is 3. Since HC always fetches
* the descriptor corresponding to curr_frame+1, the descriptor
* corresponding to frame 2 will be fetched. If the number of
* descriptors is max=64 (or greather) the list will be fully programmed
* with Active descriptors and it is possible case (rare) that the
* latest descriptor(considering rollback) corresponding to frame 2 will
* be serviced first. HS case is more probable because, in fact, up to
* 11 uframes (16 in the code) may be skipped.
*/
if (qh->dev_speed == USB_SPEED_HIGH) {
/*
* Consider uframe counter also, to start xfer asap. If half of
* the frame elapsed skip 2 frames otherwise just 1 frame.
* Starting descriptor index must be 8-aligned, so if the
* current frame is near to complete the next one is skipped as
* well.
*/
if (dwc2_micro_frame_num(hsotg->frame_number) >= 5) {
*skip_frames = 2 * 8;
frame = dwc2_frame_num_inc(hsotg->frame_number,
*skip_frames);
} else {
*skip_frames = 1 * 8;
frame = dwc2_frame_num_inc(hsotg->frame_number,
*skip_frames);
}
frame = dwc2_full_frame_num(frame);
} else {
/*
* Two frames are skipped for FS - the current and the next.
* But for descriptor programming, 1 frame (descriptor) is
* enough, see example above.
*/
*skip_frames = 1;
frame = dwc2_frame_num_inc(hsotg->frame_number, 2);
}
return frame;
}
/*
* Calculate initial descriptor index for isochronous transfer based on
* scheduled frame
*/
static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh)
{
u16 frame, fr_idx, fr_idx_tmp, skip_frames;
/*
* With current ISOC processing algorithm the channel is being released
* when no more QTDs in the list (qh->ntd == 0). Thus this function is
* called only when qh->ntd == 0 and qh->channel == 0.
*
* So qh->channel != NULL branch is not used and just not removed from
* the source file. It is required for another possible approach which
* is, do not disable and release the channel when ISOC session
* completed, just move QH to inactive schedule until new QTD arrives.
* On new QTD, the QH moved back to 'ready' schedule, starting frame and
* therefore starting desc_index are recalculated. In this case channel
* is released only on ep_disable.
*/
/*
* Calculate starting descriptor index. For INTERRUPT endpoint it is
* always 0.
*/
if (qh->channel) {
frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames);
/*
* Calculate initial descriptor index based on FrameList current
* bitmap and servicing period
*/
fr_idx_tmp = dwc2_frame_list_idx(frame);
fr_idx = (FRLISTEN_64_SIZE +
dwc2_frame_list_idx(qh->next_active_frame) -
fr_idx_tmp) % dwc2_frame_incr_val(qh);
fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE;
} else {
qh->next_active_frame = dwc2_calc_starting_frame(hsotg, qh,
&skip_frames);
fr_idx = dwc2_frame_list_idx(qh->next_active_frame);
}
qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx);
return skip_frames;
}
#define ISOC_URB_GIVEBACK_ASAP
#define MAX_ISOC_XFER_SIZE_FS 1023
#define MAX_ISOC_XFER_SIZE_HS 3072
#define DESCNUM_THRESHOLD 4
static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_qtd *qtd,
struct dwc2_qh *qh, u32 max_xfer_size,
u16 idx)
{
struct dwc2_dma_desc *dma_desc = &qh->desc_list[idx];
struct dwc2_hcd_iso_packet_desc *frame_desc;
memset(dma_desc, 0, sizeof(*dma_desc));
frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
if (frame_desc->length > max_xfer_size)
qh->n_bytes[idx] = max_xfer_size;
else
qh->n_bytes[idx] = frame_desc->length;
dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT &
HOST_DMA_ISOC_NBYTES_MASK;
/* Set active bit */
dma_desc->status |= HOST_DMA_A;
qh->ntd++;
qtd->isoc_frame_index_last++;
#ifdef ISOC_URB_GIVEBACK_ASAP
/* Set IOC for each descriptor corresponding to last frame of URB */
if (qtd->isoc_frame_index_last == qtd->urb->packet_count)
dma_desc->status |= HOST_DMA_IOC;
#endif
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma +
(idx * sizeof(struct dwc2_dma_desc)),
sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
}
static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh, u16 skip_frames)
{
struct dwc2_qtd *qtd;
u32 max_xfer_size;
u16 idx, inc, n_desc = 0, ntd_max = 0;
u16 cur_idx;
u16 next_idx;
idx = qh->td_last;
inc = qh->host_interval;
hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed);
/*
* Ensure current frame number didn't overstep last scheduled
* descriptor. If it happens, the only way to recover is to move
* qh->td_last to current frame number + 1.
* So that next isoc descriptor will be scheduled on frame number + 1
* and not on a past frame.
*/
if (dwc2_frame_idx_num_gt(cur_idx, next_idx) || (cur_idx == next_idx)) {
if (inc < 32) {
dev_vdbg(hsotg->dev,
"current frame number overstep last descriptor\n");
qh->td_last = dwc2_desclist_idx_inc(cur_idx, inc,
qh->dev_speed);
idx = qh->td_last;
}
}
if (qh->host_interval) {
ntd_max = (dwc2_max_desc_num(qh) + qh->host_interval - 1) /
qh->host_interval;
if (skip_frames && !qh->channel)
ntd_max -= skip_frames / qh->host_interval;
}
max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ?
MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS;
list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
if (qtd->in_process &&
qtd->isoc_frame_index_last ==
qtd->urb->packet_count)
continue;
qtd->isoc_td_first = idx;
while (qh->ntd < ntd_max && qtd->isoc_frame_index_last <
qtd->urb->packet_count) {
dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh,
max_xfer_size, idx);
idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed);
n_desc++;
}
qtd->isoc_td_last = idx;
qtd->in_process = 1;
}
qh->td_last = idx;
#ifdef ISOC_URB_GIVEBACK_ASAP
/* Set IOC for last descriptor if descriptor list is full */
if (qh->ntd == ntd_max) {
idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
qh->desc_list[idx].status |= HOST_DMA_IOC;
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma + (idx *
sizeof(struct dwc2_dma_desc)),
sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
}
#else
/*
* Set IOC bit only for one descriptor. Always try to be ahead of HW
* processing, i.e. on IOC generation driver activates next descriptor
* but core continues to process descriptors following the one with IOC
* set.
*/
if (n_desc > DESCNUM_THRESHOLD)
/*
* Move IOC "up". Required even if there is only one QTD
* in the list, because QTDs might continue to be queued,
* but during the activation it was only one queued.
* Actually more than one QTD might be in the list if this
* function called from XferCompletion - QTDs was queued during
* HW processing of the previous descriptor chunk.
*/
idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2),
qh->dev_speed);
else
/*
* Set the IOC for the latest descriptor if either number of
* descriptors is not greater than threshold or no more new
* descriptors activated
*/
idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
qh->desc_list[idx].status |= HOST_DMA_IOC;
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma +
(idx * sizeof(struct dwc2_dma_desc)),
sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
#endif
}
static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan,
struct dwc2_qtd *qtd, struct dwc2_qh *qh,
int n_desc)
{
struct dwc2_dma_desc *dma_desc = &qh->desc_list[n_desc];
int len = chan->xfer_len;
if (len > HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1))
len = HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1);
if (chan->ep_is_in) {
int num_packets;
if (len > 0 && chan->max_packet)
num_packets = (len + chan->max_packet - 1)
/ chan->max_packet;
else
/* Need 1 packet for transfer length of 0 */
num_packets = 1;
/* Always program an integral # of packets for IN transfers */
len = num_packets * chan->max_packet;
}
dma_desc->status = len << HOST_DMA_NBYTES_SHIFT & HOST_DMA_NBYTES_MASK;
qh->n_bytes[n_desc] = len;
if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL &&
qtd->control_phase == DWC2_CONTROL_SETUP)
dma_desc->status |= HOST_DMA_SUP;
dma_desc->buf = (u32)chan->xfer_dma;
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma +
(n_desc * sizeof(struct dwc2_dma_desc)),
sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
/*
* Last (or only) descriptor of IN transfer with actual size less
* than MaxPacket
*/
if (len > chan->xfer_len) {
chan->xfer_len = 0;
} else {
chan->xfer_dma += len;
chan->xfer_len -= len;
}
}
static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh)
{
struct dwc2_qtd *qtd;
struct dwc2_host_chan *chan = qh->channel;
int n_desc = 0;
dev_vdbg(hsotg->dev, "%s(): qh=%p dma=%08lx len=%d\n", __func__, qh,
(unsigned long)chan->xfer_dma, chan->xfer_len);
/*
* Start with chan->xfer_dma initialized in assign_and_init_hc(), then
* if SG transfer consists of multiple URBs, this pointer is re-assigned
* to the buffer of the currently processed QTD. For non-SG request
* there is always one QTD active.
*/
list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
dev_vdbg(hsotg->dev, "qtd=%p\n", qtd);
if (n_desc) {
/* SG request - more than 1 QTD */
chan->xfer_dma = qtd->urb->dma +
qtd->urb->actual_length;
chan->xfer_len = qtd->urb->length -
qtd->urb->actual_length;
dev_vdbg(hsotg->dev, "buf=%08lx len=%d\n",
(unsigned long)chan->xfer_dma, chan->xfer_len);
}
qtd->n_desc = 0;
do {
if (n_desc > 1) {
qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
dev_vdbg(hsotg->dev,
"set A bit in desc %d (%p)\n",
n_desc - 1,
&qh->desc_list[n_desc - 1]);
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma +
((n_desc - 1) *
sizeof(struct dwc2_dma_desc)),
sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
}
dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc);
dev_vdbg(hsotg->dev,
"desc %d (%p) buf=%08x status=%08x\n",
n_desc, &qh->desc_list[n_desc],
qh->desc_list[n_desc].buf,
qh->desc_list[n_desc].status);
qtd->n_desc++;
n_desc++;
} while (chan->xfer_len > 0 &&
n_desc != MAX_DMA_DESC_NUM_GENERIC);
dev_vdbg(hsotg->dev, "n_desc=%d\n", n_desc);
qtd->in_process = 1;
if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL)
break;
if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
break;
}
if (n_desc) {
qh->desc_list[n_desc - 1].status |=
HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A;
dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n",
n_desc - 1, &qh->desc_list[n_desc - 1]);
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma + (n_desc - 1) *
sizeof(struct dwc2_dma_desc),
sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
if (n_desc > 1) {
qh->desc_list[0].status |= HOST_DMA_A;
dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n",
&qh->desc_list[0]);
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma,
sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
}
chan->ntd = n_desc;
}
}
/**
* dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode
*
* @hsotg: The HCD state structure for the DWC OTG controller
* @qh: The QH to init
*
* Return: 0 if successful, negative error code otherwise
*
* For Control and Bulk endpoints, initializes descriptor list and starts the
* transfer. For Interrupt and Isochronous endpoints, initializes descriptor
* list then updates FrameList, marking appropriate entries as active.
*
* For Isochronous endpoints the starting descriptor index is calculated based
* on the scheduled frame, but only on the first transfer descriptor within a
* session. Then the transfer is started via enabling the channel.
*
* For Isochronous endpoints the channel is not halted on XferComplete
* interrupt so remains assigned to the endpoint(QH) until session is done.
*/
void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
/* Channel is already assigned */
struct dwc2_host_chan *chan = qh->channel;
u16 skip_frames = 0;
switch (chan->ep_type) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_BULK:
dwc2_init_non_isoc_dma_desc(hsotg, qh);
dwc2_hc_start_transfer_ddma(hsotg, chan);
break;
case USB_ENDPOINT_XFER_INT:
dwc2_init_non_isoc_dma_desc(hsotg, qh);
dwc2_update_frame_list(hsotg, qh, 1);
dwc2_hc_start_transfer_ddma(hsotg, chan);
break;
case USB_ENDPOINT_XFER_ISOC:
if (!qh->ntd)
skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh);
dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames);
if (!chan->xfer_started) {
dwc2_update_frame_list(hsotg, qh, 1);
/*
* Always set to max, instead of actual size. Otherwise
* ntd will be changed with channel being enabled. Not
* recommended.
*/
chan->ntd = dwc2_max_desc_num(qh);
/* Enable channel only once for ISOC */
dwc2_hc_start_transfer_ddma(hsotg, chan);
}
break;
default:
break;
}
}
#define DWC2_CMPL_DONE 1
#define DWC2_CMPL_STOP 2
static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan,
struct dwc2_qtd *qtd,
struct dwc2_qh *qh, u16 idx)
{
struct dwc2_dma_desc *dma_desc;
struct dwc2_hcd_iso_packet_desc *frame_desc;
u16 remain = 0;
int rc = 0;
if (!qtd->urb)
return -EINVAL;
dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
sizeof(struct dwc2_dma_desc)),
sizeof(struct dwc2_dma_desc),
DMA_FROM_DEVICE);
dma_desc = &qh->desc_list[idx];
frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
if (chan->ep_is_in)
remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >>
HOST_DMA_ISOC_NBYTES_SHIFT;
if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
/*
* XactError, or unable to complete all the transactions
* in the scheduled micro-frame/frame, both indicated by
* HOST_DMA_STS_PKTERR
*/
qtd->urb->error_count++;
frame_desc->actual_length = qh->n_bytes[idx] - remain;
frame_desc->status = -EPROTO;
} else {
/* Success */
frame_desc->actual_length = qh->n_bytes[idx] - remain;
frame_desc->status = 0;
}
if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
/*
* urb->status is not used for isoc transfers here. The
* individual frame_desc status are used instead.
*/
dwc2_host_complete(hsotg, qtd, 0);
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
/*
* This check is necessary because urb_dequeue can be called
* from urb complete callback (sound driver for example). All
* pending URBs are dequeued there, so no need for further
* processing.
*/
if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE)
return -1;
rc = DWC2_CMPL_DONE;
}
qh->ntd--;
/* Stop if IOC requested descriptor reached */
if (dma_desc->status & HOST_DMA_IOC)
rc = DWC2_CMPL_STOP;
return rc;
}
static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan,
enum dwc2_halt_status halt_status)
{
struct dwc2_hcd_iso_packet_desc *frame_desc;
struct dwc2_qtd *qtd, *qtd_tmp;
struct dwc2_qh *qh;
u16 idx;
int rc;
qh = chan->qh;
idx = qh->td_first;
if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
qtd->in_process = 0;
return;
}
if (halt_status == DWC2_HC_XFER_AHB_ERR ||
halt_status == DWC2_HC_XFER_BABBLE_ERR) {
/*
* Channel is halted in these error cases, considered as serious
* issues.
* Complete all URBs marking all frames as failed, irrespective
* whether some of the descriptors (frames) succeeded or not.
* Pass error code to completion routine as well, to update
* urb->status, some of class drivers might use it to stop
* queing transfer requests.
*/
int err = halt_status == DWC2_HC_XFER_AHB_ERR ?
-EIO : -EOVERFLOW;
list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
qtd_list_entry) {
if (qtd->urb) {
for (idx = 0; idx < qtd->urb->packet_count;
idx++) {
frame_desc = &qtd->urb->iso_descs[idx];
frame_desc->status = err;
}
dwc2_host_complete(hsotg, qtd, err);
}
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
}
return;
}
list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
if (!qtd->in_process)
break;
/*
* Ensure idx corresponds to descriptor where first urb of this
* qtd was added. In fact, during isoc desc init, dwc2 may skip
* an index if current frame number is already over this index.
*/
if (idx != qtd->isoc_td_first) {
dev_vdbg(hsotg->dev,
"try to complete %d instead of %d\n",
idx, qtd->isoc_td_first);
idx = qtd->isoc_td_first;
}
do {
struct dwc2_qtd *qtd_next;
u16 cur_idx;
rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh,
idx);
if (rc < 0)
return;
idx = dwc2_desclist_idx_inc(idx, qh->host_interval,
chan->speed);
if (!rc)
continue;
if (rc == DWC2_CMPL_DONE)
break;
/* rc == DWC2_CMPL_STOP */
if (qh->host_interval >= 32)
goto stop_scan;
qh->td_first = idx;
cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
qtd_next = list_first_entry(&qh->qtd_list,
struct dwc2_qtd,
qtd_list_entry);
if (dwc2_frame_idx_num_gt(cur_idx,
qtd_next->isoc_td_last))
break;
goto stop_scan;
} while (idx != qh->td_first);
}
stop_scan:
qh->td_first = idx;
}
static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan,
struct dwc2_qtd *qtd,
struct dwc2_dma_desc *dma_desc,
enum dwc2_halt_status halt_status,
u32 n_bytes, int *xfer_done)
{
struct dwc2_hcd_urb *urb = qtd->urb;
u16 remain = 0;
if (chan->ep_is_in)
remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >>
HOST_DMA_NBYTES_SHIFT;
dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb);
if (halt_status == DWC2_HC_XFER_AHB_ERR) {
dev_err(hsotg->dev, "EIO\n");
urb->status = -EIO;
return 1;
}
if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
switch (halt_status) {
case DWC2_HC_XFER_STALL:
dev_vdbg(hsotg->dev, "Stall\n");
urb->status = -EPIPE;
break;
case DWC2_HC_XFER_BABBLE_ERR:
dev_err(hsotg->dev, "Babble\n");
urb->status = -EOVERFLOW;
break;
case DWC2_HC_XFER_XACT_ERR:
dev_err(hsotg->dev, "XactErr\n");
urb->status = -EPROTO;
break;
default:
dev_err(hsotg->dev,
"%s: Unhandled descriptor error status (%d)\n",
__func__, halt_status);
break;
}
return 1;
}
if (dma_desc->status & HOST_DMA_A) {
dev_vdbg(hsotg->dev,
"Active descriptor encountered on channel %d\n",
chan->hc_num);
return 0;
}
if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL) {
if (qtd->control_phase == DWC2_CONTROL_DATA) {
urb->actual_length += n_bytes - remain;
if (remain || urb->actual_length >= urb->length) {
/*
* For Control Data stage do not set urb->status
* to 0, to prevent URB callback. Set it when
* Status phase is done. See below.
*/
*xfer_done = 1;
}
} else if (qtd->control_phase == DWC2_CONTROL_STATUS) {
urb->status = 0;
*xfer_done = 1;
}
/* No handling for SETUP stage */
} else {
/* BULK and INTR */
urb->actual_length += n_bytes - remain;
dev_vdbg(hsotg->dev, "length=%d actual=%d\n", urb->length,
urb->actual_length);
if (remain || urb->actual_length >= urb->length) {
urb->status = 0;
*xfer_done = 1;
}
}
return 0;
}
static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan,
int chnum, struct dwc2_qtd *qtd,
int desc_num,
enum dwc2_halt_status halt_status,
int *xfer_done)
{
struct dwc2_qh *qh = chan->qh;
struct dwc2_hcd_urb *urb = qtd->urb;
struct dwc2_dma_desc *dma_desc;
u32 n_bytes;
int failed;
dev_vdbg(hsotg->dev, "%s()\n", __func__);
if (!urb)
return -EINVAL;
dma_sync_single_for_cpu(hsotg->dev,
qh->desc_list_dma + (desc_num *
sizeof(struct dwc2_dma_desc)),
sizeof(struct dwc2_dma_desc),
DMA_FROM_DEVICE);
dma_desc = &qh->desc_list[desc_num];
n_bytes = qh->n_bytes[desc_num];
dev_vdbg(hsotg->dev,
"qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n",
qtd, urb, desc_num, dma_desc, n_bytes);
failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
halt_status, n_bytes,
xfer_done);
if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
dwc2_host_complete(hsotg, qtd, urb->status);
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x\n",
failed, *xfer_done);
return failed;
}
if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) {
switch (qtd->control_phase) {
case DWC2_CONTROL_SETUP:
if (urb->length > 0)
qtd->control_phase = DWC2_CONTROL_DATA;
else
qtd->control_phase = DWC2_CONTROL_STATUS;
dev_vdbg(hsotg->dev,
" Control setup transaction done\n");
break;
case DWC2_CONTROL_DATA:
if (*xfer_done) {
qtd->control_phase = DWC2_CONTROL_STATUS;
dev_vdbg(hsotg->dev,
" Control data transfer done\n");
} else if (desc_num + 1 == qtd->n_desc) {
/*
* Last descriptor for Control data stage which
* is not completed yet
*/
dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
qtd);
}
break;
default:
break;
}
}
return 0;
}
static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan,
int chnum,
enum dwc2_halt_status halt_status)
{
struct list_head *qtd_item, *qtd_tmp;
struct dwc2_qh *qh = chan->qh;
struct dwc2_qtd *qtd = NULL;
int xfer_done;
int desc_num = 0;
if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
qtd->in_process = 0;
return;
}
list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
int i;
int qtd_desc_count;
qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
xfer_done = 0;
qtd_desc_count = qtd->n_desc;
for (i = 0; i < qtd_desc_count; i++) {
if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
desc_num, halt_status,
&xfer_done)) {
qtd = NULL;
goto stop_scan;
}
desc_num++;
}
}
stop_scan:
if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
/*
* Resetting the data toggle for bulk and interrupt endpoints
* in case of stall. See handle_hc_stall_intr().
*/
if (halt_status == DWC2_HC_XFER_STALL)
qh->data_toggle = DWC2_HC_PID_DATA0;
else
dwc2_hcd_save_data_toggle(hsotg, chan, chnum, NULL);
}
if (halt_status == DWC2_HC_XFER_COMPLETE) {
if (chan->hcint & HCINTMSK_NYET) {
/*
* Got a NYET on the last transaction of the transfer.
* It means that the endpoint should be in the PING
* state at the beginning of the next transfer.
*/
qh->ping_state = 1;
}
}
}
/**
* dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's
* status and calls completion routine for the URB if it's done. Called from
* interrupt handlers.
*
* @hsotg: The HCD state structure for the DWC OTG controller
* @chan: Host channel the transfer is completed on
* @chnum: Index of Host channel registers
* @halt_status: Reason the channel is being halted or just XferComplete
* for isochronous transfers
*
* Releases the channel to be used by other transfers.
* In case of Isochronous endpoint the channel is not halted until the end of
* the session, i.e. QTD list is empty.
* If periodic channel released the FrameList is updated accordingly.
* Calls transaction selection routines to activate pending transfers.
*/
void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, int chnum,
enum dwc2_halt_status halt_status)
{
struct dwc2_qh *qh = chan->qh;
int continue_isoc_xfer = 0;
enum dwc2_transaction_type tr_type;
if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status);
/* Release the channel if halted or session completed */
if (halt_status != DWC2_HC_XFER_COMPLETE ||
list_empty(&qh->qtd_list)) {
struct dwc2_qtd *qtd, *qtd_tmp;
/*
* Kill all remainings QTDs since channel has been
* halted.
*/
list_for_each_entry_safe(qtd, qtd_tmp,
&qh->qtd_list,
qtd_list_entry) {
dwc2_host_complete(hsotg, qtd,
-ECONNRESET);
dwc2_hcd_qtd_unlink_and_free(hsotg,
qtd, qh);
}
/* Halt the channel if session completed */
if (halt_status == DWC2_HC_XFER_COMPLETE)
dwc2_hc_halt(hsotg, chan, halt_status);
dwc2_release_channel_ddma(hsotg, qh);
dwc2_hcd_qh_unlink(hsotg, qh);
} else {
/* Keep in assigned schedule to continue transfer */
list_move_tail(&qh->qh_list_entry,
&hsotg->periodic_sched_assigned);
/*
* If channel has been halted during giveback of urb
* then prevent any new scheduling.
*/
if (!chan->halt_status)
continue_isoc_xfer = 1;
}
/*
* Todo: Consider the case when period exceeds FrameList size.
* Frame Rollover interrupt should be used.
*/
} else {
/*
* Scan descriptor list to complete the URB(s), then release
* the channel
*/
dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum,
halt_status);
dwc2_release_channel_ddma(hsotg, qh);
dwc2_hcd_qh_unlink(hsotg, qh);
if (!list_empty(&qh->qtd_list)) {
/*
* Add back to inactive non-periodic schedule on normal
* completion
*/
dwc2_hcd_qh_add(hsotg, qh);
}
}
tr_type = dwc2_hcd_select_transactions(hsotg);
if (tr_type != DWC2_TRANSACTION_NONE || continue_isoc_xfer) {
if (continue_isoc_xfer) {
if (tr_type == DWC2_TRANSACTION_NONE)
tr_type = DWC2_TRANSACTION_PERIODIC;
else if (tr_type == DWC2_TRANSACTION_NON_PERIODIC)
tr_type = DWC2_TRANSACTION_ALL;
}
dwc2_hcd_queue_transactions(hsotg, tr_type);
}
}
| linux-master | drivers/usb/dwc2/hcd_ddma.c |
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling
*
* Copyright (C) 2004-2013 Synopsys, Inc.
*/
/*
* This file contains the interrupt handlers for Host mode
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/ch11.h>
#include "core.h"
#include "hcd.h"
/*
* If we get this many NAKs on a split transaction we'll slow down
* retransmission. A 1 here means delay after the first NAK.
*/
#define DWC2_NAKS_BEFORE_DELAY 3
/* This function is for debug only */
static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
{
u16 curr_frame_number = hsotg->frame_number;
u16 expected = dwc2_frame_num_inc(hsotg->last_frame_num, 1);
if (expected != curr_frame_number)
dwc2_sch_vdbg(hsotg, "MISSED SOF %04x != %04x\n",
expected, curr_frame_number);
#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
if (expected != curr_frame_number) {
hsotg->frame_num_array[hsotg->frame_num_idx] =
curr_frame_number;
hsotg->last_frame_num_array[hsotg->frame_num_idx] =
hsotg->last_frame_num;
hsotg->frame_num_idx++;
}
} else if (!hsotg->dumped_frame_num_array) {
int i;
dev_info(hsotg->dev, "Frame Last Frame\n");
dev_info(hsotg->dev, "----- ----------\n");
for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
dev_info(hsotg->dev, "0x%04x 0x%04x\n",
hsotg->frame_num_array[i],
hsotg->last_frame_num_array[i]);
}
hsotg->dumped_frame_num_array = 1;
}
#endif
hsotg->last_frame_num = curr_frame_number;
}
static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan,
struct dwc2_qtd *qtd)
{
struct usb_device *root_hub = dwc2_hsotg_to_hcd(hsotg)->self.root_hub;
struct urb *usb_urb;
if (!chan->qh)
return;
if (chan->qh->dev_speed == USB_SPEED_HIGH)
return;
if (!qtd->urb)
return;
usb_urb = qtd->urb->priv;
if (!usb_urb || !usb_urb->dev || !usb_urb->dev->tt)
return;
/*
* The root hub doesn't really have a TT, but Linux thinks it
* does because how could you have a "high speed hub" that
* directly talks directly to low speed devices without a TT?
* It's all lies. Lies, I tell you.
*/
if (usb_urb->dev->tt->hub == root_hub)
return;
if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) {
chan->qh->tt_buffer_dirty = 1;
if (usb_hub_clear_tt_buffer(usb_urb))
/* Clear failed; let's hope things work anyway */
chan->qh->tt_buffer_dirty = 0;
}
}
/*
* Handles the start-of-frame interrupt in host mode. Non-periodic
* transactions may be queued to the DWC_otg controller for the current
* (micro)frame. Periodic transactions may be queued to the controller
* for the next (micro)frame.
*/
static void dwc2_sof_intr(struct dwc2_hsotg *hsotg)
{
struct list_head *qh_entry;
struct dwc2_qh *qh;
enum dwc2_transaction_type tr_type;
/* Clear interrupt */
dwc2_writel(hsotg, GINTSTS_SOF, GINTSTS);
#ifdef DEBUG_SOF
dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n");
#endif
hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
dwc2_track_missed_sofs(hsotg);
/* Determine whether any periodic QHs should be executed */
qh_entry = hsotg->periodic_sched_inactive.next;
while (qh_entry != &hsotg->periodic_sched_inactive) {
qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry);
qh_entry = qh_entry->next;
if (dwc2_frame_num_le(qh->next_active_frame,
hsotg->frame_number)) {
dwc2_sch_vdbg(hsotg, "QH=%p ready fn=%04x, nxt=%04x\n",
qh, hsotg->frame_number,
qh->next_active_frame);
/*
* Move QH to the ready list to be executed next
* (micro)frame
*/
list_move_tail(&qh->qh_list_entry,
&hsotg->periodic_sched_ready);
}
}
tr_type = dwc2_hcd_select_transactions(hsotg);
if (tr_type != DWC2_TRANSACTION_NONE)
dwc2_hcd_queue_transactions(hsotg, tr_type);
}
/*
* Handles the Rx FIFO Level Interrupt, which indicates that there is
* at least one packet in the Rx FIFO. The packets are moved from the FIFO to
* memory if the DWC_otg controller is operating in Slave mode.
*/
static void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg)
{
u32 grxsts, chnum, bcnt, dpid, pktsts;
struct dwc2_host_chan *chan;
if (dbg_perio())
dev_vdbg(hsotg->dev, "--RxFIFO Level Interrupt--\n");
grxsts = dwc2_readl(hsotg, GRXSTSP);
chnum = (grxsts & GRXSTS_HCHNUM_MASK) >> GRXSTS_HCHNUM_SHIFT;
chan = hsotg->hc_ptr_array[chnum];
if (!chan) {
dev_err(hsotg->dev, "Unable to get corresponding channel\n");
return;
}
bcnt = (grxsts & GRXSTS_BYTECNT_MASK) >> GRXSTS_BYTECNT_SHIFT;
dpid = (grxsts & GRXSTS_DPID_MASK) >> GRXSTS_DPID_SHIFT;
pktsts = (grxsts & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT;
/* Packet Status */
if (dbg_perio()) {
dev_vdbg(hsotg->dev, " Ch num = %d\n", chnum);
dev_vdbg(hsotg->dev, " Count = %d\n", bcnt);
dev_vdbg(hsotg->dev, " DPID = %d, chan.dpid = %d\n", dpid,
chan->data_pid_start);
dev_vdbg(hsotg->dev, " PStatus = %d\n", pktsts);
}
switch (pktsts) {
case GRXSTS_PKTSTS_HCHIN:
/* Read the data into the host buffer */
if (bcnt > 0) {
dwc2_read_packet(hsotg, chan->xfer_buf, bcnt);
/* Update the HC fields for the next packet received */
chan->xfer_count += bcnt;
chan->xfer_buf += bcnt;
}
break;
case GRXSTS_PKTSTS_HCHIN_XFER_COMP:
case GRXSTS_PKTSTS_DATATOGGLEERR:
case GRXSTS_PKTSTS_HCHHALTED:
/* Handled in interrupt, just ignore data */
break;
default:
dev_err(hsotg->dev,
"RxFIFO Level Interrupt: Unknown status %d\n", pktsts);
break;
}
}
/*
* This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
* data packets may be written to the FIFO for OUT transfers. More requests
* may be written to the non-periodic request queue for IN transfers. This
* interrupt is enabled only in Slave mode.
*/
static void dwc2_np_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
{
dev_vdbg(hsotg->dev, "--Non-Periodic TxFIFO Empty Interrupt--\n");
dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_NON_PERIODIC);
}
/*
* This interrupt occurs when the periodic Tx FIFO is half-empty. More data
* packets may be written to the FIFO for OUT transfers. More requests may be
* written to the periodic request queue for IN transfers. This interrupt is
* enabled only in Slave mode.
*/
static void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
{
if (dbg_perio())
dev_vdbg(hsotg->dev, "--Periodic TxFIFO Empty Interrupt--\n");
dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_PERIODIC);
}
static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
u32 *hprt0_modify)
{
struct dwc2_core_params *params = &hsotg->params;
int do_reset = 0;
u32 usbcfg;
u32 prtspd;
u32 hcfg;
u32 fslspclksel;
u32 hfir;
dev_vdbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
/* Every time when port enables calculate HFIR.FrInterval */
hfir = dwc2_readl(hsotg, HFIR);
hfir &= ~HFIR_FRINT_MASK;
hfir |= dwc2_calc_frame_interval(hsotg) << HFIR_FRINT_SHIFT &
HFIR_FRINT_MASK;
dwc2_writel(hsotg, hfir, HFIR);
/* Check if we need to adjust the PHY clock speed for low power */
if (!params->host_support_fs_ls_low_power) {
/* Port has been enabled, set the reset change flag */
hsotg->flags.b.port_reset_change = 1;
return;
}
usbcfg = dwc2_readl(hsotg, GUSBCFG);
prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
if (prtspd == HPRT0_SPD_LOW_SPEED || prtspd == HPRT0_SPD_FULL_SPEED) {
/* Low power */
if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL)) {
/* Set PHY low power clock select for FS/LS devices */
usbcfg |= GUSBCFG_PHY_LP_CLK_SEL;
dwc2_writel(hsotg, usbcfg, GUSBCFG);
do_reset = 1;
}
hcfg = dwc2_readl(hsotg, HCFG);
fslspclksel = (hcfg & HCFG_FSLSPCLKSEL_MASK) >>
HCFG_FSLSPCLKSEL_SHIFT;
if (prtspd == HPRT0_SPD_LOW_SPEED &&
params->host_ls_low_power_phy_clk) {
/* 6 MHZ */
dev_vdbg(hsotg->dev,
"FS_PHY programming HCFG to 6 MHz\n");
if (fslspclksel != HCFG_FSLSPCLKSEL_6_MHZ) {
fslspclksel = HCFG_FSLSPCLKSEL_6_MHZ;
hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
dwc2_writel(hsotg, hcfg, HCFG);
do_reset = 1;
}
} else {
/* 48 MHZ */
dev_vdbg(hsotg->dev,
"FS_PHY programming HCFG to 48 MHz\n");
if (fslspclksel != HCFG_FSLSPCLKSEL_48_MHZ) {
fslspclksel = HCFG_FSLSPCLKSEL_48_MHZ;
hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
dwc2_writel(hsotg, hcfg, HCFG);
do_reset = 1;
}
}
} else {
/* Not low power */
if (usbcfg & GUSBCFG_PHY_LP_CLK_SEL) {
usbcfg &= ~GUSBCFG_PHY_LP_CLK_SEL;
dwc2_writel(hsotg, usbcfg, GUSBCFG);
do_reset = 1;
}
}
if (do_reset) {
*hprt0_modify |= HPRT0_RST;
dwc2_writel(hsotg, *hprt0_modify, HPRT0);
queue_delayed_work(hsotg->wq_otg, &hsotg->reset_work,
msecs_to_jiffies(60));
} else {
/* Port has been enabled, set the reset change flag */
hsotg->flags.b.port_reset_change = 1;
}
}
/*
* There are multiple conditions that can cause a port interrupt. This function
* determines which interrupt conditions have occurred and handles them
* appropriately.
*/
static void dwc2_port_intr(struct dwc2_hsotg *hsotg)
{
u32 hprt0;
u32 hprt0_modify;
dev_vdbg(hsotg->dev, "--Port Interrupt--\n");
hprt0 = dwc2_readl(hsotg, HPRT0);
hprt0_modify = hprt0;
/*
* Clear appropriate bits in HPRT0 to clear the interrupt bit in
* GINTSTS
*/
hprt0_modify &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG |
HPRT0_OVRCURRCHG);
/*
* Port Connect Detected
* Set flag and clear if detected
*/
if (hprt0 & HPRT0_CONNDET) {
dwc2_writel(hsotg, hprt0_modify | HPRT0_CONNDET, HPRT0);
dev_vdbg(hsotg->dev,
"--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n",
hprt0);
dwc2_hcd_connect(hsotg);
/*
* The Hub driver asserts a reset when it sees port connect
* status change flag
*/
}
/*
* Port Enable Changed
* Clear if detected - Set internal flag if disabled
*/
if (hprt0 & HPRT0_ENACHG) {
dwc2_writel(hsotg, hprt0_modify | HPRT0_ENACHG, HPRT0);
dev_vdbg(hsotg->dev,
" --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n",
hprt0, !!(hprt0 & HPRT0_ENA));
if (hprt0 & HPRT0_ENA) {
hsotg->new_connection = true;
dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify);
} else {
hsotg->flags.b.port_enable_change = 1;
if (hsotg->params.dma_desc_fs_enable) {
u32 hcfg;
hsotg->params.dma_desc_enable = false;
hsotg->new_connection = false;
hcfg = dwc2_readl(hsotg, HCFG);
hcfg &= ~HCFG_DESCDMA;
dwc2_writel(hsotg, hcfg, HCFG);
}
}
}
/* Overcurrent Change Interrupt */
if (hprt0 & HPRT0_OVRCURRCHG) {
dwc2_writel(hsotg, hprt0_modify | HPRT0_OVRCURRCHG,
HPRT0);
dev_vdbg(hsotg->dev,
" --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n",
hprt0);
hsotg->flags.b.port_over_current_change = 1;
}
}
/*
* Gets the actual length of a transfer after the transfer halts. halt_status
* holds the reason for the halt.
*
* For IN transfers where halt_status is DWC2_HC_XFER_COMPLETE, *short_read
* is set to 1 upon return if less than the requested number of bytes were
* transferred. short_read may also be NULL on entry, in which case it remains
* unchanged.
*/
static u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, int chnum,
struct dwc2_qtd *qtd,
enum dwc2_halt_status halt_status,
int *short_read)
{
u32 hctsiz, count, length;
hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
if (halt_status == DWC2_HC_XFER_COMPLETE) {
if (chan->ep_is_in) {
count = (hctsiz & TSIZ_XFERSIZE_MASK) >>
TSIZ_XFERSIZE_SHIFT;
length = chan->xfer_len - count;
if (short_read)
*short_read = (count != 0);
} else if (chan->qh->do_split) {
length = qtd->ssplit_out_xfer_count;
} else {
length = chan->xfer_len;
}
} else {
/*
* Must use the hctsiz.pktcnt field to determine how much data
* has been transferred. This field reflects the number of
* packets that have been transferred via the USB. This is
* always an integral number of packets if the transfer was
* halted before its normal completion. (Can't use the
* hctsiz.xfersize field because that reflects the number of
* bytes transferred via the AHB, not the USB).
*/
count = (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT;
length = (chan->start_pkt_count - count) * chan->max_packet;
}
return length;
}
/**
* dwc2_update_urb_state() - Updates the state of the URB after a Transfer
* Complete interrupt on the host channel. Updates the actual_length field
* of the URB based on the number of bytes transferred via the host channel.
* Sets the URB status if the data transfer is finished.
*
* @hsotg: Programming view of the DWC_otg controller
* @chan: Programming view of host channel
* @chnum: Channel number
* @urb: Processing URB
* @qtd: Queue transfer descriptor
*
* Return: 1 if the data transfer specified by the URB is completely finished,
* 0 otherwise
*/
static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, int chnum,
struct dwc2_hcd_urb *urb,
struct dwc2_qtd *qtd)
{
u32 hctsiz;
int xfer_done = 0;
int short_read = 0;
int xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
DWC2_HC_XFER_COMPLETE,
&short_read);
if (urb->actual_length + xfer_length > urb->length) {
dev_dbg(hsotg->dev, "%s(): trimming xfer length\n", __func__);
xfer_length = urb->length - urb->actual_length;
}
dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n",
urb->actual_length, xfer_length);
urb->actual_length += xfer_length;
if (xfer_length && chan->ep_type == USB_ENDPOINT_XFER_BULK &&
(urb->flags & URB_SEND_ZERO_PACKET) &&
urb->actual_length >= urb->length &&
!(urb->length % chan->max_packet)) {
xfer_done = 0;
} else if (short_read || urb->actual_length >= urb->length) {
xfer_done = 1;
urb->status = 0;
}
hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
__func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
dev_vdbg(hsotg->dev, " chan->xfer_len %d\n", chan->xfer_len);
dev_vdbg(hsotg->dev, " hctsiz.xfersize %d\n",
(hctsiz & TSIZ_XFERSIZE_MASK) >> TSIZ_XFERSIZE_SHIFT);
dev_vdbg(hsotg->dev, " urb->transfer_buffer_length %d\n", urb->length);
dev_vdbg(hsotg->dev, " urb->actual_length %d\n", urb->actual_length);
dev_vdbg(hsotg->dev, " short_read %d, xfer_done %d\n", short_read,
xfer_done);
return xfer_done;
}
/*
* Save the starting data toggle for the next transfer. The data toggle is
* saved in the QH for non-control transfers and it's saved in the QTD for
* control transfers.
*/
void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, int chnum,
struct dwc2_qtd *qtd)
{
u32 hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
if (WARN(!chan || !chan->qh,
"chan->qh must be specified for non-control eps\n"))
return;
if (pid == TSIZ_SC_MC_PID_DATA0)
chan->qh->data_toggle = DWC2_HC_PID_DATA0;
else
chan->qh->data_toggle = DWC2_HC_PID_DATA1;
} else {
if (WARN(!qtd,
"qtd must be specified for control eps\n"))
return;
if (pid == TSIZ_SC_MC_PID_DATA0)
qtd->data_toggle = DWC2_HC_PID_DATA0;
else
qtd->data_toggle = DWC2_HC_PID_DATA1;
}
}
/**
* dwc2_update_isoc_urb_state() - Updates the state of an Isochronous URB when
* the transfer is stopped for any reason. The fields of the current entry in
* the frame descriptor array are set based on the transfer state and the input
* halt_status. Completes the Isochronous URB if all the URB frames have been
* completed.
*
* @hsotg: Programming view of the DWC_otg controller
* @chan: Programming view of host channel
* @chnum: Channel number
* @halt_status: Reason for halting a host channel
* @qtd: Queue transfer descriptor
*
* Return: DWC2_HC_XFER_COMPLETE if there are more frames remaining to be
* transferred in the URB. Otherwise return DWC2_HC_XFER_URB_COMPLETE.
*/
static enum dwc2_halt_status dwc2_update_isoc_urb_state(
struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
int chnum, struct dwc2_qtd *qtd,
enum dwc2_halt_status halt_status)
{
struct dwc2_hcd_iso_packet_desc *frame_desc;
struct dwc2_hcd_urb *urb = qtd->urb;
if (!urb)
return DWC2_HC_XFER_NO_HALT_STATUS;
frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
switch (halt_status) {
case DWC2_HC_XFER_COMPLETE:
frame_desc->status = 0;
frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
chan, chnum, qtd, halt_status, NULL);
break;
case DWC2_HC_XFER_FRAME_OVERRUN:
urb->error_count++;
if (chan->ep_is_in)
frame_desc->status = -ENOSR;
else
frame_desc->status = -ECOMM;
frame_desc->actual_length = 0;
break;
case DWC2_HC_XFER_BABBLE_ERR:
urb->error_count++;
frame_desc->status = -EOVERFLOW;
/* Don't need to update actual_length in this case */
break;
case DWC2_HC_XFER_XACT_ERR:
urb->error_count++;
frame_desc->status = -EPROTO;
frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
chan, chnum, qtd, halt_status, NULL);
/* Skip whole frame */
if (chan->qh->do_split &&
chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
hsotg->params.host_dma) {
qtd->complete_split = 0;
qtd->isoc_split_offset = 0;
}
break;
default:
dev_err(hsotg->dev, "Unhandled halt_status (%d)\n",
halt_status);
break;
}
if (++qtd->isoc_frame_index == urb->packet_count) {
/*
* urb->status is not used for isoc transfers. The individual
* frame_desc statuses are used instead.
*/
dwc2_host_complete(hsotg, qtd, 0);
halt_status = DWC2_HC_XFER_URB_COMPLETE;
} else {
halt_status = DWC2_HC_XFER_COMPLETE;
}
return halt_status;
}
/*
* Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
* QHs, removes the QH from the active non-periodic schedule. If any QTDs are
* still linked to the QH, the QH is added to the end of the inactive
* non-periodic schedule. For periodic QHs, removes the QH from the periodic
* schedule if no more QTDs are linked to the QH.
*/
static void dwc2_deactivate_qh(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
int free_qtd)
{
int continue_split = 0;
struct dwc2_qtd *qtd;
if (dbg_qh(qh))
dev_vdbg(hsotg->dev, " %s(%p,%p,%d)\n", __func__,
hsotg, qh, free_qtd);
if (list_empty(&qh->qtd_list)) {
dev_dbg(hsotg->dev, "## QTD list empty ##\n");
goto no_qtd;
}
qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
if (qtd->complete_split)
continue_split = 1;
else if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_MID ||
qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_END)
continue_split = 1;
if (free_qtd) {
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
continue_split = 0;
}
no_qtd:
qh->channel = NULL;
dwc2_hcd_qh_deactivate(hsotg, qh, continue_split);
}
/**
* dwc2_release_channel() - Releases a host channel for use by other transfers
*
* @hsotg: The HCD state structure
* @chan: The host channel to release
* @qtd: The QTD associated with the host channel. This QTD may be
* freed if the transfer is complete or an error has occurred.
* @halt_status: Reason the channel is being released. This status
* determines the actions taken by this function.
*
* Also attempts to select and queue more transactions since at least one host
* channel is available.
*/
static void dwc2_release_channel(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan,
struct dwc2_qtd *qtd,
enum dwc2_halt_status halt_status)
{
enum dwc2_transaction_type tr_type;
u32 haintmsk;
int free_qtd = 0;
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, " %s: channel %d, halt_status %d\n",
__func__, chan->hc_num, halt_status);
switch (halt_status) {
case DWC2_HC_XFER_URB_COMPLETE:
free_qtd = 1;
break;
case DWC2_HC_XFER_AHB_ERR:
case DWC2_HC_XFER_STALL:
case DWC2_HC_XFER_BABBLE_ERR:
free_qtd = 1;
break;
case DWC2_HC_XFER_XACT_ERR:
if (qtd && qtd->error_count >= 3) {
dev_vdbg(hsotg->dev,
" Complete URB with transaction error\n");
free_qtd = 1;
dwc2_host_complete(hsotg, qtd, -EPROTO);
}
break;
case DWC2_HC_XFER_URB_DEQUEUE:
/*
* The QTD has already been removed and the QH has been
* deactivated. Don't want to do anything except release the
* host channel and try to queue more transfers.
*/
goto cleanup;
case DWC2_HC_XFER_PERIODIC_INCOMPLETE:
dev_vdbg(hsotg->dev, " Complete URB with I/O error\n");
free_qtd = 1;
dwc2_host_complete(hsotg, qtd, -EIO);
break;
case DWC2_HC_XFER_NO_HALT_STATUS:
default:
break;
}
dwc2_deactivate_qh(hsotg, chan->qh, free_qtd);
cleanup:
/*
* Release the host channel for use by other transfers. The cleanup
* function clears the channel interrupt enables and conditions, so
* there's no need to clear the Channel Halted interrupt separately.
*/
if (!list_empty(&chan->hc_list_entry))
list_del(&chan->hc_list_entry);
dwc2_hc_cleanup(hsotg, chan);
list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
if (hsotg->params.uframe_sched) {
hsotg->available_host_channels++;
} else {
switch (chan->ep_type) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_BULK:
hsotg->non_periodic_channels--;
break;
default:
/*
* Don't release reservations for periodic channels
* here. That's done when a periodic transfer is
* descheduled (i.e. when the QH is removed from the
* periodic schedule).
*/
break;
}
}
haintmsk = dwc2_readl(hsotg, HAINTMSK);
haintmsk &= ~(1 << chan->hc_num);
dwc2_writel(hsotg, haintmsk, HAINTMSK);
/* Try to queue more transfers now that there's a free channel */
tr_type = dwc2_hcd_select_transactions(hsotg);
if (tr_type != DWC2_TRANSACTION_NONE)
dwc2_hcd_queue_transactions(hsotg, tr_type);
}
/*
* Halts a host channel. If the channel cannot be halted immediately because
* the request queue is full, this function ensures that the FIFO empty
* interrupt for the appropriate queue is enabled so that the halt request can
* be queued when there is space in the request queue.
*
* This function may also be called in DMA mode. In that case, the channel is
* simply released since the core always halts the channel automatically in
* DMA mode.
*/
static void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, struct dwc2_qtd *qtd,
enum dwc2_halt_status halt_status)
{
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
if (hsotg->params.host_dma) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "DMA enabled\n");
dwc2_release_channel(hsotg, chan, qtd, halt_status);
return;
}
/* Slave mode processing */
dwc2_hc_halt(hsotg, chan, halt_status);
if (chan->halt_on_queue) {
u32 gintmsk;
dev_vdbg(hsotg->dev, "Halt on queue\n");
if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
chan->ep_type == USB_ENDPOINT_XFER_BULK) {
dev_vdbg(hsotg->dev, "control/bulk\n");
/*
* Make sure the Non-periodic Tx FIFO empty interrupt
* is enabled so that the non-periodic schedule will
* be processed
*/
gintmsk = dwc2_readl(hsotg, GINTMSK);
gintmsk |= GINTSTS_NPTXFEMP;
dwc2_writel(hsotg, gintmsk, GINTMSK);
} else {
dev_vdbg(hsotg->dev, "isoc/intr\n");
/*
* Move the QH from the periodic queued schedule to
* the periodic assigned schedule. This allows the
* halt to be queued when the periodic schedule is
* processed.
*/
list_move_tail(&chan->qh->qh_list_entry,
&hsotg->periodic_sched_assigned);
/*
* Make sure the Periodic Tx FIFO Empty interrupt is
* enabled so that the periodic schedule will be
* processed
*/
gintmsk = dwc2_readl(hsotg, GINTMSK);
gintmsk |= GINTSTS_PTXFEMP;
dwc2_writel(hsotg, gintmsk, GINTMSK);
}
}
}
/*
* Performs common cleanup for non-periodic transfers after a Transfer
* Complete interrupt. This function should be called after any endpoint type
* specific handling is finished to release the host channel.
*/
static void dwc2_complete_non_periodic_xfer(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan,
int chnum, struct dwc2_qtd *qtd,
enum dwc2_halt_status halt_status)
{
dev_vdbg(hsotg->dev, "%s()\n", __func__);
qtd->error_count = 0;
if (chan->hcint & HCINTMSK_NYET) {
/*
* Got a NYET on the last transaction of the transfer. This
* means that the endpoint should be in the PING state at the
* beginning of the next transfer.
*/
dev_vdbg(hsotg->dev, "got NYET\n");
chan->qh->ping_state = 1;
}
/*
* Always halt and release the host channel to make it available for
* more transfers. There may still be more phases for a control
* transfer or more data packets for a bulk transfer at this point,
* but the host channel is still halted. A channel will be reassigned
* to the transfer when the non-periodic schedule is processed after
* the channel is released. This allows transactions to be queued
* properly via dwc2_hcd_queue_transactions, which also enables the
* Tx FIFO Empty interrupt if necessary.
*/
if (chan->ep_is_in) {
/*
* IN transfers in Slave mode require an explicit disable to
* halt the channel. (In DMA mode, this call simply releases
* the channel.)
*/
dwc2_halt_channel(hsotg, chan, qtd, halt_status);
} else {
/*
* The channel is automatically disabled by the core for OUT
* transfers in Slave mode
*/
dwc2_release_channel(hsotg, chan, qtd, halt_status);
}
}
/*
* Performs common cleanup for periodic transfers after a Transfer Complete
* interrupt. This function should be called after any endpoint type specific
* handling is finished to release the host channel.
*/
static void dwc2_complete_periodic_xfer(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, int chnum,
struct dwc2_qtd *qtd,
enum dwc2_halt_status halt_status)
{
u32 hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
qtd->error_count = 0;
if (!chan->ep_is_in || (hctsiz & TSIZ_PKTCNT_MASK) == 0)
/* Core halts channel in these cases */
dwc2_release_channel(hsotg, chan, qtd, halt_status);
else
/* Flush any outstanding requests from the Tx queue */
dwc2_halt_channel(hsotg, chan, qtd, halt_status);
}
static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, int chnum,
struct dwc2_qtd *qtd)
{
struct dwc2_hcd_iso_packet_desc *frame_desc;
u32 len;
u32 hctsiz;
u32 pid;
if (!qtd->urb)
return 0;
frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
DWC2_HC_XFER_COMPLETE, NULL);
if (!len && !qtd->isoc_split_offset) {
qtd->complete_split = 0;
return 0;
}
frame_desc->actual_length += len;
if (chan->align_buf) {
dev_vdbg(hsotg->dev, "non-aligned buffer\n");
dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
DWC2_KMEM_UNALIGNED_BUF_SIZE, DMA_FROM_DEVICE);
memcpy(qtd->urb->buf + (chan->xfer_dma - qtd->urb->dma),
chan->qh->dw_align_buf, len);
}
qtd->isoc_split_offset += len;
hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
if (frame_desc->actual_length >= frame_desc->length || pid == 0) {
frame_desc->status = 0;
qtd->isoc_frame_index++;
qtd->complete_split = 0;
qtd->isoc_split_offset = 0;
}
if (qtd->isoc_frame_index == qtd->urb->packet_count) {
dwc2_host_complete(hsotg, qtd, 0);
dwc2_release_channel(hsotg, chan, qtd,
DWC2_HC_XFER_URB_COMPLETE);
} else {
dwc2_release_channel(hsotg, chan, qtd,
DWC2_HC_XFER_NO_HALT_STATUS);
}
return 1; /* Indicates that channel released */
}
/*
* Handles a host channel Transfer Complete interrupt. This handler may be
* called in either DMA mode or Slave mode.
*/
static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, int chnum,
struct dwc2_qtd *qtd)
{
struct dwc2_hcd_urb *urb = qtd->urb;
enum dwc2_halt_status halt_status = DWC2_HC_XFER_COMPLETE;
int pipe_type;
int urb_xfer_done;
if (dbg_hc(chan))
dev_vdbg(hsotg->dev,
"--Host Channel %d Interrupt: Transfer Complete--\n",
chnum);
if (!urb)
goto handle_xfercomp_done;
pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
if (hsotg->params.dma_desc_enable) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status);
if (pipe_type == USB_ENDPOINT_XFER_ISOC)
/* Do not disable the interrupt, just clear it */
return;
goto handle_xfercomp_done;
}
/* Handle xfer complete on CSPLIT */
if (chan->qh->do_split) {
if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
hsotg->params.host_dma) {
if (qtd->complete_split &&
dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum,
qtd))
goto handle_xfercomp_done;
} else {
qtd->complete_split = 0;
}
}
/* Update the QTD and URB states */
switch (pipe_type) {
case USB_ENDPOINT_XFER_CONTROL:
switch (qtd->control_phase) {
case DWC2_CONTROL_SETUP:
if (urb->length > 0)
qtd->control_phase = DWC2_CONTROL_DATA;
else
qtd->control_phase = DWC2_CONTROL_STATUS;
dev_vdbg(hsotg->dev,
" Control setup transaction done\n");
halt_status = DWC2_HC_XFER_COMPLETE;
break;
case DWC2_CONTROL_DATA:
urb_xfer_done = dwc2_update_urb_state(hsotg, chan,
chnum, urb, qtd);
if (urb_xfer_done) {
qtd->control_phase = DWC2_CONTROL_STATUS;
dev_vdbg(hsotg->dev,
" Control data transfer done\n");
} else {
dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
qtd);
}
halt_status = DWC2_HC_XFER_COMPLETE;
break;
case DWC2_CONTROL_STATUS:
dev_vdbg(hsotg->dev, " Control transfer complete\n");
if (urb->status == -EINPROGRESS)
urb->status = 0;
dwc2_host_complete(hsotg, qtd, urb->status);
halt_status = DWC2_HC_XFER_URB_COMPLETE;
break;
}
dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
halt_status);
break;
case USB_ENDPOINT_XFER_BULK:
dev_vdbg(hsotg->dev, " Bulk transfer complete\n");
urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
qtd);
if (urb_xfer_done) {
dwc2_host_complete(hsotg, qtd, urb->status);
halt_status = DWC2_HC_XFER_URB_COMPLETE;
} else {
halt_status = DWC2_HC_XFER_COMPLETE;
}
dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
halt_status);
break;
case USB_ENDPOINT_XFER_INT:
dev_vdbg(hsotg->dev, " Interrupt transfer complete\n");
urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
qtd);
/*
* Interrupt URB is done on the first transfer complete
* interrupt
*/
if (urb_xfer_done) {
dwc2_host_complete(hsotg, qtd, urb->status);
halt_status = DWC2_HC_XFER_URB_COMPLETE;
} else {
halt_status = DWC2_HC_XFER_COMPLETE;
}
dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
halt_status);
break;
case USB_ENDPOINT_XFER_ISOC:
if (dbg_perio())
dev_vdbg(hsotg->dev, " Isochronous transfer complete\n");
if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_ALL)
halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
chnum, qtd,
DWC2_HC_XFER_COMPLETE);
dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
halt_status);
break;
}
handle_xfercomp_done:
disable_hc_int(hsotg, chnum, HCINTMSK_XFERCOMPL);
}
/*
* Handles a host channel STALL interrupt. This handler may be called in
* either DMA mode or Slave mode.
*/
static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, int chnum,
struct dwc2_qtd *qtd)
{
struct dwc2_hcd_urb *urb = qtd->urb;
int pipe_type;
dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n",
chnum);
if (hsotg->params.dma_desc_enable) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
DWC2_HC_XFER_STALL);
goto handle_stall_done;
}
if (!urb)
goto handle_stall_halt;
pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
if (pipe_type == USB_ENDPOINT_XFER_CONTROL)
dwc2_host_complete(hsotg, qtd, -EPIPE);
if (pipe_type == USB_ENDPOINT_XFER_BULK ||
pipe_type == USB_ENDPOINT_XFER_INT) {
dwc2_host_complete(hsotg, qtd, -EPIPE);
/*
* USB protocol requires resetting the data toggle for bulk
* and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
* setup command is issued to the endpoint. Anticipate the
* CLEAR_FEATURE command since a STALL has occurred and reset
* the data toggle now.
*/
chan->qh->data_toggle = 0;
}
handle_stall_halt:
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_STALL);
handle_stall_done:
disable_hc_int(hsotg, chnum, HCINTMSK_STALL);
}
/*
* Updates the state of the URB when a transfer has been stopped due to an
* abnormal condition before the transfer completes. Modifies the
* actual_length field of the URB to reflect the number of bytes that have
* actually been transferred via the host channel.
*/
static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, int chnum,
struct dwc2_hcd_urb *urb,
struct dwc2_qtd *qtd,
enum dwc2_halt_status halt_status)
{
u32 xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum,
qtd, halt_status, NULL);
u32 hctsiz;
if (urb->actual_length + xfer_length > urb->length) {
dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
xfer_length = urb->length - urb->actual_length;
}
urb->actual_length += xfer_length;
hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
__func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
dev_vdbg(hsotg->dev, " chan->start_pkt_count %d\n",
chan->start_pkt_count);
dev_vdbg(hsotg->dev, " hctsiz.pktcnt %d\n",
(hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT);
dev_vdbg(hsotg->dev, " chan->max_packet %d\n", chan->max_packet);
dev_vdbg(hsotg->dev, " bytes_transferred %d\n",
xfer_length);
dev_vdbg(hsotg->dev, " urb->actual_length %d\n",
urb->actual_length);
dev_vdbg(hsotg->dev, " urb->transfer_buffer_length %d\n",
urb->length);
}
/*
* Handles a host channel NAK interrupt. This handler may be called in either
* DMA mode or Slave mode.
*/
static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, int chnum,
struct dwc2_qtd *qtd)
{
if (!qtd) {
dev_dbg(hsotg->dev, "%s: qtd is NULL\n", __func__);
return;
}
if (!qtd->urb) {
dev_dbg(hsotg->dev, "%s: qtd->urb is NULL\n", __func__);
return;
}
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NAK Received--\n",
chnum);
/*
* Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
* interrupt. Re-start the SSPLIT transfer.
*
* Normally for non-periodic transfers we'll retry right away, but to
* avoid interrupt storms we'll wait before retrying if we've got
* several NAKs. If we didn't do this we'd retry directly from the
* interrupt handler and could end up quickly getting another
* interrupt (another NAK), which we'd retry. Note that we do not
* delay retries for IN parts of control requests, as those are expected
* to complete fairly quickly, and if we delay them we risk confusing
* the device and cause it issue STALL.
*
* Note that in DMA mode software only gets involved to re-send NAKed
* transfers for split transactions, so we only need to apply this
* delaying logic when handling splits. In non-DMA mode presumably we
* might want a similar delay if someone can demonstrate this problem
* affects that code path too.
*/
if (chan->do_split) {
if (chan->complete_split)
qtd->error_count = 0;
qtd->complete_split = 0;
qtd->num_naks++;
qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY &&
!(chan->ep_type == USB_ENDPOINT_XFER_CONTROL &&
chan->ep_is_in);
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
goto handle_nak_done;
}
switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_BULK:
if (hsotg->params.host_dma && chan->ep_is_in) {
/*
* NAK interrupts are enabled on bulk/control IN
* transfers in DMA mode for the sole purpose of
* resetting the error count after a transaction error
* occurs. The core will continue transferring data.
*/
qtd->error_count = 0;
break;
}
/*
* NAK interrupts normally occur during OUT transfers in DMA
* or Slave mode. For IN transfers, more requests will be
* queued as request queue space is available.
*/
qtd->error_count = 0;
if (!chan->qh->ping_state) {
dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
qtd, DWC2_HC_XFER_NAK);
dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
if (chan->speed == USB_SPEED_HIGH)
chan->qh->ping_state = 1;
}
/*
* Halt the channel so the transfer can be re-started from
* the appropriate point or the PING protocol will
* start/continue
*/
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
break;
case USB_ENDPOINT_XFER_INT:
qtd->error_count = 0;
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
break;
case USB_ENDPOINT_XFER_ISOC:
/* Should never get called for isochronous transfers */
dev_err(hsotg->dev, "NACK interrupt for ISOC transfer\n");
break;
}
handle_nak_done:
disable_hc_int(hsotg, chnum, HCINTMSK_NAK);
}
/*
* Handles a host channel ACK interrupt. This interrupt is enabled when
* performing the PING protocol in Slave mode, when errors occur during
* either Slave mode or DMA mode, and during Start Split transactions.
*/
static void dwc2_hc_ack_intr(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, int chnum,
struct dwc2_qtd *qtd)
{
struct dwc2_hcd_iso_packet_desc *frame_desc;
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: ACK Received--\n",
chnum);
if (chan->do_split) {
/* Handle ACK on SSPLIT. ACK should not occur in CSPLIT. */
if (!chan->ep_is_in &&
chan->data_pid_start != DWC2_HC_PID_SETUP)
qtd->ssplit_out_xfer_count = chan->xfer_len;
if (chan->ep_type != USB_ENDPOINT_XFER_ISOC || chan->ep_is_in) {
qtd->complete_split = 1;
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
} else {
/* ISOC OUT */
switch (chan->xact_pos) {
case DWC2_HCSPLT_XACTPOS_ALL:
break;
case DWC2_HCSPLT_XACTPOS_END:
qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
qtd->isoc_split_offset = 0;
break;
case DWC2_HCSPLT_XACTPOS_BEGIN:
case DWC2_HCSPLT_XACTPOS_MID:
/*
* For BEGIN or MID, calculate the length for
* the next microframe to determine the correct
* SSPLIT token, either MID or END
*/
frame_desc = &qtd->urb->iso_descs[
qtd->isoc_frame_index];
qtd->isoc_split_offset += 188;
if (frame_desc->length - qtd->isoc_split_offset
<= 188)
qtd->isoc_split_pos =
DWC2_HCSPLT_XACTPOS_END;
else
qtd->isoc_split_pos =
DWC2_HCSPLT_XACTPOS_MID;
break;
}
}
} else {
qtd->error_count = 0;
if (chan->qh->ping_state) {
chan->qh->ping_state = 0;
/*
* Halt the channel so the transfer can be re-started
* from the appropriate point. This only happens in
* Slave mode. In DMA mode, the ping_state is cleared
* when the transfer is started because the core
* automatically executes the PING, then the transfer.
*/
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
}
}
/*
* If the ACK occurred when _not_ in the PING state, let the channel
* continue transferring data after clearing the error count
*/
disable_hc_int(hsotg, chnum, HCINTMSK_ACK);
}
/*
* Handles a host channel NYET interrupt. This interrupt should only occur on
* Bulk and Control OUT endpoints and for complete split transactions. If a
* NYET occurs at the same time as a Transfer Complete interrupt, it is
* handled in the xfercomp interrupt handler, not here. This handler may be
* called in either DMA mode or Slave mode.
*/
static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, int chnum,
struct dwc2_qtd *qtd)
{
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NYET Received--\n",
chnum);
/*
* NYET on CSPLIT
* re-do the CSPLIT immediately on non-periodic
*/
if (chan->do_split && chan->complete_split) {
if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC &&
hsotg->params.host_dma) {
qtd->complete_split = 0;
qtd->isoc_split_offset = 0;
qtd->isoc_frame_index++;
if (qtd->urb &&
qtd->isoc_frame_index == qtd->urb->packet_count) {
dwc2_host_complete(hsotg, qtd, 0);
dwc2_release_channel(hsotg, chan, qtd,
DWC2_HC_XFER_URB_COMPLETE);
} else {
dwc2_release_channel(hsotg, chan, qtd,
DWC2_HC_XFER_NO_HALT_STATUS);
}
goto handle_nyet_done;
}
if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
struct dwc2_qh *qh = chan->qh;
bool past_end;
if (!hsotg->params.uframe_sched) {
int frnum = dwc2_hcd_get_frame_number(hsotg);
/* Don't have num_hs_transfers; simple logic */
past_end = dwc2_full_frame_num(frnum) !=
dwc2_full_frame_num(qh->next_active_frame);
} else {
int end_frnum;
/*
* Figure out the end frame based on
* schedule.
*
* We don't want to go on trying again
* and again forever. Let's stop when
* we've done all the transfers that
* were scheduled.
*
* We're going to be comparing
* start_active_frame and
* next_active_frame, both of which
* are 1 before the time the packet
* goes on the wire, so that cancels
* out. Basically if had 1 transfer
* and we saw 1 NYET then we're done.
* We're getting a NYET here so if
* next >= (start + num_transfers)
* we're done. The complexity is that
* for all but ISOC_OUT we skip one
* slot.
*/
end_frnum = dwc2_frame_num_inc(
qh->start_active_frame,
qh->num_hs_transfers);
if (qh->ep_type != USB_ENDPOINT_XFER_ISOC ||
qh->ep_is_in)
end_frnum =
dwc2_frame_num_inc(end_frnum, 1);
past_end = dwc2_frame_num_le(
end_frnum, qh->next_active_frame);
}
if (past_end) {
/* Treat this as a transaction error. */
#if 0
/*
* Todo: Fix system performance so this can
* be treated as an error. Right now complete
* splits cannot be scheduled precisely enough
* due to other system activity, so this error
* occurs regularly in Slave mode.
*/
qtd->error_count++;
#endif
qtd->complete_split = 0;
dwc2_halt_channel(hsotg, chan, qtd,
DWC2_HC_XFER_XACT_ERR);
/* Todo: add support for isoc release */
goto handle_nyet_done;
}
}
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
goto handle_nyet_done;
}
chan->qh->ping_state = 1;
qtd->error_count = 0;
dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd,
DWC2_HC_XFER_NYET);
dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
/*
* Halt the channel and re-start the transfer so the PING protocol
* will start
*/
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
handle_nyet_done:
disable_hc_int(hsotg, chnum, HCINTMSK_NYET);
}
/*
* Handles a host channel babble interrupt. This handler may be called in
* either DMA mode or Slave mode.
*/
static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, int chnum,
struct dwc2_qtd *qtd)
{
dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Babble Error--\n",
chnum);
dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
if (hsotg->params.dma_desc_enable) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
DWC2_HC_XFER_BABBLE_ERR);
goto disable_int;
}
if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
dwc2_host_complete(hsotg, qtd, -EOVERFLOW);
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_BABBLE_ERR);
} else {
enum dwc2_halt_status halt_status;
halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
qtd, DWC2_HC_XFER_BABBLE_ERR);
dwc2_halt_channel(hsotg, chan, qtd, halt_status);
}
disable_int:
disable_hc_int(hsotg, chnum, HCINTMSK_BBLERR);
}
/*
* Handles a host channel AHB error interrupt. This handler is only called in
* DMA mode.
*/
static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, int chnum,
struct dwc2_qtd *qtd)
{
struct dwc2_hcd_urb *urb = qtd->urb;
char *pipetype, *speed;
u32 hcchar;
u32 hcsplt;
u32 hctsiz;
u32 hc_dma;
dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: AHB Error--\n",
chnum);
if (!urb)
goto handle_ahberr_halt;
dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
hcchar = dwc2_readl(hsotg, HCCHAR(chnum));
hcsplt = dwc2_readl(hsotg, HCSPLT(chnum));
hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
hc_dma = dwc2_readl(hsotg, HCDMA(chnum));
dev_err(hsotg->dev, "AHB ERROR, Channel %d\n", chnum);
dev_err(hsotg->dev, " hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt);
dev_err(hsotg->dev, " hctsiz 0x%08x, hc_dma 0x%08x\n", hctsiz, hc_dma);
dev_err(hsotg->dev, " Device address: %d\n",
dwc2_hcd_get_dev_addr(&urb->pipe_info));
dev_err(hsotg->dev, " Endpoint: %d, %s\n",
dwc2_hcd_get_ep_num(&urb->pipe_info),
dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
case USB_ENDPOINT_XFER_CONTROL:
pipetype = "CONTROL";
break;
case USB_ENDPOINT_XFER_BULK:
pipetype = "BULK";
break;
case USB_ENDPOINT_XFER_INT:
pipetype = "INTERRUPT";
break;
case USB_ENDPOINT_XFER_ISOC:
pipetype = "ISOCHRONOUS";
break;
default:
pipetype = "UNKNOWN";
break;
}
dev_err(hsotg->dev, " Endpoint type: %s\n", pipetype);
switch (chan->speed) {
case USB_SPEED_HIGH:
speed = "HIGH";
break;
case USB_SPEED_FULL:
speed = "FULL";
break;
case USB_SPEED_LOW:
speed = "LOW";
break;
default:
speed = "UNKNOWN";
break;
}
dev_err(hsotg->dev, " Speed: %s\n", speed);
dev_err(hsotg->dev, " Max packet size: %d (mult %d)\n",
dwc2_hcd_get_maxp(&urb->pipe_info),
dwc2_hcd_get_maxp_mult(&urb->pipe_info));
dev_err(hsotg->dev, " Data buffer length: %d\n", urb->length);
dev_err(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n",
urb->buf, (unsigned long)urb->dma);
dev_err(hsotg->dev, " Setup buffer: %p, Setup DMA: %08lx\n",
urb->setup_packet, (unsigned long)urb->setup_dma);
dev_err(hsotg->dev, " Interval: %d\n", urb->interval);
/* Core halts the channel for Descriptor DMA mode */
if (hsotg->params.dma_desc_enable) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
DWC2_HC_XFER_AHB_ERR);
goto handle_ahberr_done;
}
dwc2_host_complete(hsotg, qtd, -EIO);
handle_ahberr_halt:
/*
* Force a channel halt. Don't call dwc2_halt_channel because that won't
* write to the HCCHARn register in DMA mode to force the halt.
*/
dwc2_hc_halt(hsotg, chan, DWC2_HC_XFER_AHB_ERR);
handle_ahberr_done:
disable_hc_int(hsotg, chnum, HCINTMSK_AHBERR);
}
/*
* Handles a host channel transaction error interrupt. This handler may be
* called in either DMA mode or Slave mode.
*/
static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, int chnum,
struct dwc2_qtd *qtd)
{
dev_dbg(hsotg->dev,
"--Host Channel %d Interrupt: Transaction Error--\n", chnum);
dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
if (hsotg->params.dma_desc_enable) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
DWC2_HC_XFER_XACT_ERR);
goto handle_xacterr_done;
}
switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_BULK:
qtd->error_count++;
if (!chan->qh->ping_state) {
dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
qtd, DWC2_HC_XFER_XACT_ERR);
dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
if (!chan->ep_is_in && chan->speed == USB_SPEED_HIGH)
chan->qh->ping_state = 1;
}
/*
* Halt the channel so the transfer can be re-started from
* the appropriate point or the PING protocol will start
*/
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
break;
case USB_ENDPOINT_XFER_INT:
qtd->error_count++;
if (chan->do_split && chan->complete_split)
qtd->complete_split = 0;
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
break;
case USB_ENDPOINT_XFER_ISOC:
{
enum dwc2_halt_status halt_status;
halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
chnum, qtd, DWC2_HC_XFER_XACT_ERR);
dwc2_halt_channel(hsotg, chan, qtd, halt_status);
}
break;
}
handle_xacterr_done:
disable_hc_int(hsotg, chnum, HCINTMSK_XACTERR);
}
/*
* Handles a host channel frame overrun interrupt. This handler may be called
* in either DMA mode or Slave mode.
*/
static void dwc2_hc_frmovrun_intr(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, int chnum,
struct dwc2_qtd *qtd)
{
enum dwc2_halt_status halt_status;
if (dbg_hc(chan))
dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Frame Overrun--\n",
chnum);
dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_BULK:
break;
case USB_ENDPOINT_XFER_INT:
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_FRAME_OVERRUN);
break;
case USB_ENDPOINT_XFER_ISOC:
halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
qtd, DWC2_HC_XFER_FRAME_OVERRUN);
dwc2_halt_channel(hsotg, chan, qtd, halt_status);
break;
}
disable_hc_int(hsotg, chnum, HCINTMSK_FRMOVRUN);
}
/*
* Handles a host channel data toggle error interrupt. This handler may be
* called in either DMA mode or Slave mode.
*/
static void dwc2_hc_datatglerr_intr(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, int chnum,
struct dwc2_qtd *qtd)
{
dev_dbg(hsotg->dev,
"--Host Channel %d Interrupt: Data Toggle Error--\n", chnum);
if (chan->ep_is_in)
qtd->error_count = 0;
else
dev_err(hsotg->dev,
"Data Toggle Error on OUT transfer, channel %d\n",
chnum);
dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
disable_hc_int(hsotg, chnum, HCINTMSK_DATATGLERR);
}
/*
* For debug only. It checks that a valid halt status is set and that
* HCCHARn.chdis is clear. If there's a problem, corrective action is
* taken and a warning is issued.
*
* Return: true if halt status is ok, false otherwise
*/
static bool dwc2_halt_status_ok(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, int chnum,
struct dwc2_qtd *qtd)
{
#ifdef DEBUG
u32 hcchar;
u32 hctsiz;
u32 hcintmsk;
u32 hcsplt;
if (chan->halt_status == DWC2_HC_XFER_NO_HALT_STATUS) {
/*
* This code is here only as a check. This condition should
* never happen. Ignore the halt if it does occur.
*/
hcchar = dwc2_readl(hsotg, HCCHAR(chnum));
hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
hcsplt = dwc2_readl(hsotg, HCSPLT(chnum));
dev_dbg(hsotg->dev,
"%s: chan->halt_status DWC2_HC_XFER_NO_HALT_STATUS,\n",
__func__);
dev_dbg(hsotg->dev,
"channel %d, hcchar 0x%08x, hctsiz 0x%08x,\n",
chnum, hcchar, hctsiz);
dev_dbg(hsotg->dev,
"hcint 0x%08x, hcintmsk 0x%08x, hcsplt 0x%08x,\n",
chan->hcint, hcintmsk, hcsplt);
if (qtd)
dev_dbg(hsotg->dev, "qtd->complete_split %d\n",
qtd->complete_split);
dev_warn(hsotg->dev,
"%s: no halt status, channel %d, ignoring interrupt\n",
__func__, chnum);
return false;
}
/*
* This code is here only as a check. hcchar.chdis should never be set
* when the halt interrupt occurs. Halt the channel again if it does
* occur.
*/
hcchar = dwc2_readl(hsotg, HCCHAR(chnum));
if (hcchar & HCCHAR_CHDIS) {
dev_warn(hsotg->dev,
"%s: hcchar.chdis set unexpectedly, hcchar 0x%08x, trying to halt again\n",
__func__, hcchar);
chan->halt_pending = 0;
dwc2_halt_channel(hsotg, chan, qtd, chan->halt_status);
return false;
}
#endif
return true;
}
/*
* Handles a host Channel Halted interrupt in DMA mode. This handler
* determines the reason the channel halted and proceeds accordingly.
*/
static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, int chnum,
struct dwc2_qtd *qtd)
{
u32 hcintmsk;
int out_nak_enh = 0;
if (dbg_hc(chan))
dev_vdbg(hsotg->dev,
"--Host Channel %d Interrupt: DMA Channel Halted--\n",
chnum);
/*
* For core with OUT NAK enhancement, the flow for high-speed
* CONTROL/BULK OUT is handled a little differently
*/
if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_71a) {
if (chan->speed == USB_SPEED_HIGH && !chan->ep_is_in &&
(chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
chan->ep_type == USB_ENDPOINT_XFER_BULK)) {
out_nak_enh = 1;
}
}
if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
(chan->halt_status == DWC2_HC_XFER_AHB_ERR &&
!hsotg->params.dma_desc_enable)) {
if (hsotg->params.dma_desc_enable)
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
chan->halt_status);
else
/*
* Just release the channel. A dequeue can happen on a
* transfer timeout. In the case of an AHB Error, the
* channel was forced to halt because there's no way to
* gracefully recover.
*/
dwc2_release_channel(hsotg, chan, qtd,
chan->halt_status);
return;
}
hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
if (chan->hcint & HCINTMSK_XFERCOMPL) {
/*
* Todo: This is here because of a possible hardware bug. Spec
* says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
* interrupt w/ACK bit set should occur, but I only see the
* XFERCOMP bit, even with it masked out. This is a workaround
* for that behavior. Should fix this when hardware is fixed.
*/
if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && !chan->ep_is_in)
dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
} else if (chan->hcint & HCINTMSK_STALL) {
dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
} else if ((chan->hcint & HCINTMSK_XACTERR) &&
!hsotg->params.dma_desc_enable) {
if (out_nak_enh) {
if (chan->hcint &
(HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) {
dev_vdbg(hsotg->dev,
"XactErr with NYET/NAK/ACK\n");
qtd->error_count = 0;
} else {
dev_vdbg(hsotg->dev,
"XactErr without NYET/NAK/ACK\n");
}
}
/*
* Must handle xacterr before nak or ack. Could get a xacterr
* at the same time as either of these on a BULK/CONTROL OUT
* that started with a PING. The xacterr takes precedence.
*/
dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
} else if ((chan->hcint & HCINTMSK_XCS_XACT) &&
hsotg->params.dma_desc_enable) {
dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
} else if ((chan->hcint & HCINTMSK_AHBERR) &&
hsotg->params.dma_desc_enable) {
dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
} else if (chan->hcint & HCINTMSK_BBLERR) {
dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
} else if (chan->hcint & HCINTMSK_FRMOVRUN) {
dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
} else if (!out_nak_enh) {
if (chan->hcint & HCINTMSK_NYET) {
/*
* Must handle nyet before nak or ack. Could get a nyet
* at the same time as either of those on a BULK/CONTROL
* OUT that started with a PING. The nyet takes
* precedence.
*/
dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
} else if ((chan->hcint & HCINTMSK_NAK) &&
!(hcintmsk & HCINTMSK_NAK)) {
/*
* If nak is not masked, it's because a non-split IN
* transfer is in an error state. In that case, the nak
* is handled by the nak interrupt handler, not here.
* Handle nak here for BULK/CONTROL OUT transfers, which
* halt on a NAK to allow rewinding the buffer pointer.
*/
dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
} else if ((chan->hcint & HCINTMSK_ACK) &&
!(hcintmsk & HCINTMSK_ACK)) {
/*
* If ack is not masked, it's because a non-split IN
* transfer is in an error state. In that case, the ack
* is handled by the ack interrupt handler, not here.
* Handle ack here for split transfers. Start splits
* halt on ACK.
*/
dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
} else {
if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
/*
* A periodic transfer halted with no other
* channel interrupts set. Assume it was halted
* by the core because it could not be completed
* in its scheduled (micro)frame.
*/
dev_dbg(hsotg->dev,
"%s: Halt channel %d (assume incomplete periodic transfer)\n",
__func__, chnum);
dwc2_halt_channel(hsotg, chan, qtd,
DWC2_HC_XFER_PERIODIC_INCOMPLETE);
} else {
dev_err(hsotg->dev,
"%s: Channel %d - ChHltd set, but reason is unknown\n",
__func__, chnum);
dev_err(hsotg->dev,
"hcint 0x%08x, intsts 0x%08x\n",
chan->hcint,
dwc2_readl(hsotg, GINTSTS));
goto error;
}
}
} else {
dev_info(hsotg->dev,
"NYET/NAK/ACK/other in non-error case, 0x%08x\n",
chan->hcint);
error:
/* Failthrough: use 3-strikes rule */
qtd->error_count++;
dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
qtd, DWC2_HC_XFER_XACT_ERR);
/*
* We can get here after a completed transaction
* (urb->actual_length >= urb->length) which was not reported
* as completed. If that is the case, and we do not abort
* the transfer, a transfer of size 0 will be enqueued
* subsequently. If urb->actual_length is not DMA-aligned,
* the buffer will then point to an unaligned address, and
* the resulting behavior is undefined. Bail out in that
* situation.
*/
if (qtd->urb->actual_length >= qtd->urb->length)
qtd->error_count = 3;
dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
}
}
/*
* Handles a host channel Channel Halted interrupt
*
* In slave mode, this handler is called only when the driver specifically
* requests a halt. This occurs during handling other host channel interrupts
* (e.g. nak, xacterr, stall, nyet, etc.).
*
* In DMA mode, this is the interrupt that occurs when the core has finished
* processing a transfer on a channel. Other host channel interrupts (except
* ahberr) are disabled in DMA mode.
*/
static void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, int chnum,
struct dwc2_qtd *qtd)
{
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n",
chnum);
if (hsotg->params.host_dma) {
dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd);
} else {
if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd))
return;
dwc2_release_channel(hsotg, chan, qtd, chan->halt_status);
}
}
/*
* Check if the given qtd is still the top of the list (and thus valid).
*
* If dwc2_hcd_qtd_unlink_and_free() has been called since we grabbed
* the qtd from the top of the list, this will return false (otherwise true).
*/
static bool dwc2_check_qtd_still_ok(struct dwc2_qtd *qtd, struct dwc2_qh *qh)
{
struct dwc2_qtd *cur_head;
if (!qh)
return false;
cur_head = list_first_entry(&qh->qtd_list, struct dwc2_qtd,
qtd_list_entry);
return (cur_head == qtd);
}
/* Handles interrupt for a specific Host Channel */
static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
{
struct dwc2_qtd *qtd;
struct dwc2_host_chan *chan;
u32 hcint, hcintmsk;
chan = hsotg->hc_ptr_array[chnum];
hcint = dwc2_readl(hsotg, HCINT(chnum));
hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
if (!chan) {
dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
dwc2_writel(hsotg, hcint, HCINT(chnum));
return;
}
if (dbg_hc(chan)) {
dev_vdbg(hsotg->dev, "--Host Channel Interrupt--, Channel %d\n",
chnum);
dev_vdbg(hsotg->dev,
" hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
hcint, hcintmsk, hcint & hcintmsk);
}
dwc2_writel(hsotg, hcint, HCINT(chnum));
/*
* If we got an interrupt after someone called
* dwc2_hcd_endpoint_disable() we don't want to crash below
*/
if (!chan->qh) {
dev_warn(hsotg->dev, "Interrupt on disabled channel\n");
return;
}
chan->hcint = hcint;
hcint &= hcintmsk;
/*
* If the channel was halted due to a dequeue, the qtd list might
* be empty or at least the first entry will not be the active qtd.
* In this case, take a shortcut and just release the channel.
*/
if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
/*
* If the channel was halted, this should be the only
* interrupt unmasked
*/
WARN_ON(hcint != HCINTMSK_CHHLTD);
if (hsotg->params.dma_desc_enable)
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
chan->halt_status);
else
dwc2_release_channel(hsotg, chan, NULL,
chan->halt_status);
return;
}
if (list_empty(&chan->qh->qtd_list)) {
/*
* TODO: Will this ever happen with the
* DWC2_HC_XFER_URB_DEQUEUE handling above?
*/
dev_dbg(hsotg->dev, "## no QTD queued for channel %d ##\n",
chnum);
dev_dbg(hsotg->dev,
" hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
chan->hcint, hcintmsk, hcint);
chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
disable_hc_int(hsotg, chnum, HCINTMSK_CHHLTD);
chan->hcint = 0;
return;
}
qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd,
qtd_list_entry);
if (!hsotg->params.host_dma) {
if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
hcint &= ~HCINTMSK_CHHLTD;
}
if (hcint & HCINTMSK_XFERCOMPL) {
dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
/*
* If NYET occurred at same time as Xfer Complete, the NYET is
* handled by the Xfer Complete interrupt handler. Don't want
* to call the NYET interrupt handler in this case.
*/
hcint &= ~HCINTMSK_NYET;
}
if (hcint & HCINTMSK_CHHLTD) {
dwc2_hc_chhltd_intr(hsotg, chan, chnum, qtd);
if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
goto exit;
}
if (hcint & HCINTMSK_AHBERR) {
dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
goto exit;
}
if (hcint & HCINTMSK_STALL) {
dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
goto exit;
}
if (hcint & HCINTMSK_NAK) {
dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
goto exit;
}
if (hcint & HCINTMSK_ACK) {
dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
goto exit;
}
if (hcint & HCINTMSK_NYET) {
dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
goto exit;
}
if (hcint & HCINTMSK_XACTERR) {
dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
goto exit;
}
if (hcint & HCINTMSK_BBLERR) {
dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
goto exit;
}
if (hcint & HCINTMSK_FRMOVRUN) {
dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
goto exit;
}
if (hcint & HCINTMSK_DATATGLERR) {
dwc2_hc_datatglerr_intr(hsotg, chan, chnum, qtd);
if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
goto exit;
}
exit:
chan->hcint = 0;
}
/*
* This interrupt indicates that one or more host channels has a pending
* interrupt. There are multiple conditions that can cause each host channel
* interrupt. This function determines which conditions have occurred for each
* host channel interrupt and handles them appropriately.
*/
static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
{
u32 haint;
int i;
struct dwc2_host_chan *chan, *chan_tmp;
haint = dwc2_readl(hsotg, HAINT);
if (dbg_perio()) {
dev_vdbg(hsotg->dev, "%s()\n", __func__);
dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint);
}
/*
* According to USB 2.0 spec section 11.18.8, a host must
* issue complete-split transactions in a microframe for a
* set of full-/low-speed endpoints in the same relative
* order as the start-splits were issued in a microframe for.
*/
list_for_each_entry_safe(chan, chan_tmp, &hsotg->split_order,
split_order_list_entry) {
int hc_num = chan->hc_num;
if (haint & (1 << hc_num)) {
dwc2_hc_n_intr(hsotg, hc_num);
haint &= ~(1 << hc_num);
}
}
for (i = 0; i < hsotg->params.host_channels; i++) {
if (haint & (1 << i))
dwc2_hc_n_intr(hsotg, i);
}
}
/* This function handles interrupts for the HCD */
irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg)
{
u32 gintsts, dbg_gintsts;
irqreturn_t retval = IRQ_HANDLED;
if (!dwc2_is_controller_alive(hsotg)) {
dev_warn(hsotg->dev, "Controller is dead\n");
return retval;
} else {
retval = IRQ_NONE;
}
spin_lock(&hsotg->lock);
/* Check if HOST Mode */
if (dwc2_is_host_mode(hsotg)) {
gintsts = dwc2_read_core_intr(hsotg);
if (!gintsts) {
spin_unlock(&hsotg->lock);
return retval;
}
retval = IRQ_HANDLED;
dbg_gintsts = gintsts;
#ifndef DEBUG_SOF
dbg_gintsts &= ~GINTSTS_SOF;
#endif
if (!dbg_perio())
dbg_gintsts &= ~(GINTSTS_HCHINT | GINTSTS_RXFLVL |
GINTSTS_PTXFEMP);
/* Only print if there are any non-suppressed interrupts left */
if (dbg_gintsts)
dev_vdbg(hsotg->dev,
"DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n",
gintsts);
if (gintsts & GINTSTS_SOF)
dwc2_sof_intr(hsotg);
if (gintsts & GINTSTS_RXFLVL)
dwc2_rx_fifo_level_intr(hsotg);
if (gintsts & GINTSTS_NPTXFEMP)
dwc2_np_tx_fifo_empty_intr(hsotg);
if (gintsts & GINTSTS_PRTINT)
dwc2_port_intr(hsotg);
if (gintsts & GINTSTS_HCHINT)
dwc2_hc_intr(hsotg);
if (gintsts & GINTSTS_PTXFEMP)
dwc2_perio_tx_fifo_empty_intr(hsotg);
if (dbg_gintsts) {
dev_vdbg(hsotg->dev,
"DWC OTG HCD Finished Servicing Interrupts\n");
dev_vdbg(hsotg->dev,
"DWC OTG HCD gintsts=0x%08x gintmsk=0x%08x\n",
dwc2_readl(hsotg, GINTSTS),
dwc2_readl(hsotg, GINTMSK));
}
}
spin_unlock(&hsotg->lock);
return retval;
}
| linux-master | drivers/usb/dwc2/hcd_intr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drd.c - DesignWare USB2 DRD Controller Dual-role support
*
* Copyright (C) 2020 STMicroelectronics
*
* Author(s): Amelie Delaunay <[email protected]>
*/
#include <linux/clk.h>
#include <linux/iopoll.h>
#include <linux/platform_device.h>
#include <linux/usb/role.h>
#include "core.h"
#define dwc2_ovr_gotgctl(gotgctl) \
((gotgctl) |= GOTGCTL_BVALOEN | GOTGCTL_AVALOEN | GOTGCTL_VBVALOEN | \
GOTGCTL_DBNCE_FLTR_BYPASS)
static void dwc2_ovr_init(struct dwc2_hsotg *hsotg)
{
unsigned long flags;
u32 gotgctl;
spin_lock_irqsave(&hsotg->lock, flags);
gotgctl = dwc2_readl(hsotg, GOTGCTL);
dwc2_ovr_gotgctl(gotgctl);
gotgctl &= ~(GOTGCTL_BVALOVAL | GOTGCTL_AVALOVAL | GOTGCTL_VBVALOVAL);
if (hsotg->role_sw_default_mode == USB_DR_MODE_HOST)
gotgctl |= GOTGCTL_AVALOVAL | GOTGCTL_VBVALOVAL;
else if (hsotg->role_sw_default_mode == USB_DR_MODE_PERIPHERAL)
gotgctl |= GOTGCTL_BVALOVAL | GOTGCTL_VBVALOVAL;
dwc2_writel(hsotg, gotgctl, GOTGCTL);
spin_unlock_irqrestore(&hsotg->lock, flags);
dwc2_force_mode(hsotg, (hsotg->dr_mode == USB_DR_MODE_HOST) ||
(hsotg->role_sw_default_mode == USB_DR_MODE_HOST));
}
static int dwc2_ovr_avalid(struct dwc2_hsotg *hsotg, bool valid)
{
u32 gotgctl = dwc2_readl(hsotg, GOTGCTL);
/* Check if A-Session is already in the right state */
if ((valid && (gotgctl & GOTGCTL_ASESVLD)) ||
(!valid && !(gotgctl & GOTGCTL_ASESVLD)))
return -EALREADY;
/* Always enable overrides to handle the resume case */
dwc2_ovr_gotgctl(gotgctl);
gotgctl &= ~GOTGCTL_BVALOVAL;
if (valid)
gotgctl |= GOTGCTL_AVALOVAL | GOTGCTL_VBVALOVAL;
else
gotgctl &= ~(GOTGCTL_AVALOVAL | GOTGCTL_VBVALOVAL);
dwc2_writel(hsotg, gotgctl, GOTGCTL);
return 0;
}
static int dwc2_ovr_bvalid(struct dwc2_hsotg *hsotg, bool valid)
{
u32 gotgctl = dwc2_readl(hsotg, GOTGCTL);
/* Check if B-Session is already in the right state */
if ((valid && (gotgctl & GOTGCTL_BSESVLD)) ||
(!valid && !(gotgctl & GOTGCTL_BSESVLD)))
return -EALREADY;
/* Always enable overrides to handle the resume case */
dwc2_ovr_gotgctl(gotgctl);
gotgctl &= ~GOTGCTL_AVALOVAL;
if (valid)
gotgctl |= GOTGCTL_BVALOVAL | GOTGCTL_VBVALOVAL;
else
gotgctl &= ~(GOTGCTL_BVALOVAL | GOTGCTL_VBVALOVAL);
dwc2_writel(hsotg, gotgctl, GOTGCTL);
return 0;
}
static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role)
{
struct dwc2_hsotg *hsotg = usb_role_switch_get_drvdata(sw);
unsigned long flags;
int already = 0;
/* Skip session not in line with dr_mode */
if ((role == USB_ROLE_DEVICE && hsotg->dr_mode == USB_DR_MODE_HOST) ||
(role == USB_ROLE_HOST && hsotg->dr_mode == USB_DR_MODE_PERIPHERAL))
return -EINVAL;
#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
/* Skip session if core is in test mode */
if (role == USB_ROLE_NONE && hsotg->test_mode) {
dev_dbg(hsotg->dev, "Core is in test mode\n");
return -EBUSY;
}
#endif
/*
* In case of USB_DR_MODE_PERIPHERAL, clock is disabled at the end of
* the probe and enabled on udc_start.
* If role-switch set is called before the udc_start, we need to enable
* the clock to read/write GOTGCTL and GUSBCFG registers to override
* mode and sessions. It is the case if cable is plugged at boot.
*/
if (!hsotg->ll_hw_enabled && hsotg->clk) {
int ret = clk_prepare_enable(hsotg->clk);
if (ret)
return ret;
}
spin_lock_irqsave(&hsotg->lock, flags);
if (role == USB_ROLE_NONE) {
/* default operation mode when usb role is USB_ROLE_NONE */
if (hsotg->role_sw_default_mode == USB_DR_MODE_HOST)
role = USB_ROLE_HOST;
else if (hsotg->role_sw_default_mode == USB_DR_MODE_PERIPHERAL)
role = USB_ROLE_DEVICE;
}
if (role == USB_ROLE_HOST) {
already = dwc2_ovr_avalid(hsotg, true);
} else if (role == USB_ROLE_DEVICE) {
already = dwc2_ovr_bvalid(hsotg, true);
if (dwc2_is_device_enabled(hsotg)) {
/* This clear DCTL.SFTDISCON bit */
dwc2_hsotg_core_connect(hsotg);
}
} else {
if (dwc2_is_device_mode(hsotg)) {
if (!dwc2_ovr_bvalid(hsotg, false))
/* This set DCTL.SFTDISCON bit */
dwc2_hsotg_core_disconnect(hsotg);
} else {
dwc2_ovr_avalid(hsotg, false);
}
}
spin_unlock_irqrestore(&hsotg->lock, flags);
if (!already && hsotg->dr_mode == USB_DR_MODE_OTG)
/* This will raise a Connector ID Status Change Interrupt */
dwc2_force_mode(hsotg, role == USB_ROLE_HOST);
if (!hsotg->ll_hw_enabled && hsotg->clk)
clk_disable_unprepare(hsotg->clk);
dev_dbg(hsotg->dev, "%s-session valid\n",
role == USB_ROLE_NONE ? "No" :
role == USB_ROLE_HOST ? "A" : "B");
return 0;
}
int dwc2_drd_init(struct dwc2_hsotg *hsotg)
{
struct usb_role_switch_desc role_sw_desc = {0};
struct usb_role_switch *role_sw;
int ret;
if (!device_property_read_bool(hsotg->dev, "usb-role-switch"))
return 0;
hsotg->role_sw_default_mode = usb_get_role_switch_default_mode(hsotg->dev);
role_sw_desc.driver_data = hsotg;
role_sw_desc.fwnode = dev_fwnode(hsotg->dev);
role_sw_desc.set = dwc2_drd_role_sw_set;
role_sw_desc.allow_userspace_control = true;
role_sw = usb_role_switch_register(hsotg->dev, &role_sw_desc);
if (IS_ERR(role_sw)) {
ret = PTR_ERR(role_sw);
dev_err(hsotg->dev,
"failed to register role switch: %d\n", ret);
return ret;
}
hsotg->role_sw = role_sw;
/* Enable override and initialize values */
dwc2_ovr_init(hsotg);
return 0;
}
void dwc2_drd_suspend(struct dwc2_hsotg *hsotg)
{
u32 gintsts, gintmsk;
if (hsotg->role_sw && !hsotg->params.external_id_pin_ctl) {
gintmsk = dwc2_readl(hsotg, GINTMSK);
gintmsk &= ~GINTSTS_CONIDSTSCHNG;
dwc2_writel(hsotg, gintmsk, GINTMSK);
gintsts = dwc2_readl(hsotg, GINTSTS);
dwc2_writel(hsotg, gintsts | GINTSTS_CONIDSTSCHNG, GINTSTS);
}
}
void dwc2_drd_resume(struct dwc2_hsotg *hsotg)
{
u32 gintsts, gintmsk;
enum usb_role role;
if (hsotg->role_sw) {
/* get last known role (as the get ops isn't implemented by this driver) */
role = usb_role_switch_get_role(hsotg->role_sw);
if (role == USB_ROLE_NONE) {
if (hsotg->role_sw_default_mode == USB_DR_MODE_HOST)
role = USB_ROLE_HOST;
else if (hsotg->role_sw_default_mode == USB_DR_MODE_PERIPHERAL)
role = USB_ROLE_DEVICE;
}
/* restore last role that may have been lost */
if (role == USB_ROLE_HOST)
dwc2_ovr_avalid(hsotg, true);
else if (role == USB_ROLE_DEVICE)
dwc2_ovr_bvalid(hsotg, true);
dwc2_force_mode(hsotg, role == USB_ROLE_HOST);
dev_dbg(hsotg->dev, "resuming %s-session valid\n",
role == USB_ROLE_NONE ? "No" :
role == USB_ROLE_HOST ? "A" : "B");
}
if (hsotg->role_sw && !hsotg->params.external_id_pin_ctl) {
gintsts = dwc2_readl(hsotg, GINTSTS);
dwc2_writel(hsotg, gintsts | GINTSTS_CONIDSTSCHNG, GINTSTS);
gintmsk = dwc2_readl(hsotg, GINTMSK);
gintmsk |= GINTSTS_CONIDSTSCHNG;
dwc2_writel(hsotg, gintmsk, GINTMSK);
}
}
void dwc2_drd_exit(struct dwc2_hsotg *hsotg)
{
if (hsotg->role_sw)
usb_role_switch_unregister(hsotg->role_sw);
}
| linux-master | drivers/usb/dwc2/drd.c |
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright (C) 2004-2016 Synopsys, Inc.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/usb/of.h>
#include <linux/pci_ids.h>
#include <linux/pci.h>
#include "core.h"
#define PCI_PRODUCT_ID_HAPS_HSOTG 0xabc0
#define PCI_DEVICE_ID_LOONGSON_DWC2 0x7a04
static void dwc2_set_bcm_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
p->host_rx_fifo_size = 774;
p->max_transfer_size = 65535;
p->max_packet_count = 511;
p->ahbcfg = 0x10;
}
static void dwc2_set_his_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
p->otg_caps.hnp_support = false;
p->otg_caps.srp_support = false;
p->speed = DWC2_SPEED_PARAM_HIGH;
p->host_rx_fifo_size = 512;
p->host_nperio_tx_fifo_size = 512;
p->host_perio_tx_fifo_size = 512;
p->max_transfer_size = 65535;
p->max_packet_count = 511;
p->host_channels = 16;
p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
p->phy_utmi_width = 8;
p->i2c_enable = false;
p->reload_ctl = false;
p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
GAHBCFG_HBSTLEN_SHIFT;
p->change_speed_quirk = true;
p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
}
static void dwc2_set_jz4775_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
p->otg_caps.hnp_support = false;
p->speed = DWC2_SPEED_PARAM_HIGH;
p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
p->phy_utmi_width = 16;
p->activate_ingenic_overcurrent_detection =
!device_property_read_bool(hsotg->dev, "disable-over-current");
}
static void dwc2_set_loongson_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
p->phy_utmi_width = 8;
p->power_down = DWC2_POWER_DOWN_PARAM_PARTIAL;
}
static void dwc2_set_x1600_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
p->otg_caps.hnp_support = false;
p->speed = DWC2_SPEED_PARAM_HIGH;
p->host_channels = 16;
p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
p->phy_utmi_width = 16;
p->activate_ingenic_overcurrent_detection =
!device_property_read_bool(hsotg->dev, "disable-over-current");
}
static void dwc2_set_x2000_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
p->otg_caps.hnp_support = false;
p->speed = DWC2_SPEED_PARAM_HIGH;
p->host_rx_fifo_size = 1024;
p->host_nperio_tx_fifo_size = 1024;
p->host_perio_tx_fifo_size = 1024;
p->host_channels = 16;
p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
p->phy_utmi_width = 16;
p->activate_ingenic_overcurrent_detection =
!device_property_read_bool(hsotg->dev, "disable-over-current");
}
static void dwc2_set_s3c6400_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
p->no_clock_gating = true;
p->phy_utmi_width = 8;
}
static void dwc2_set_socfpga_agilex_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
p->no_clock_gating = true;
}
static void dwc2_set_rk_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
p->otg_caps.hnp_support = false;
p->otg_caps.srp_support = false;
p->host_rx_fifo_size = 525;
p->host_nperio_tx_fifo_size = 128;
p->host_perio_tx_fifo_size = 256;
p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
GAHBCFG_HBSTLEN_SHIFT;
p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
p->lpm = false;
p->lpm_clock_gating = false;
p->besl = false;
p->hird_threshold_en = false;
}
static void dwc2_set_ltq_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
p->otg_caps.hnp_support = false;
p->otg_caps.srp_support = false;
p->host_rx_fifo_size = 288;
p->host_nperio_tx_fifo_size = 128;
p->host_perio_tx_fifo_size = 96;
p->max_transfer_size = 65535;
p->max_packet_count = 511;
p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
GAHBCFG_HBSTLEN_SHIFT;
}
static void dwc2_set_amlogic_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
p->otg_caps.hnp_support = false;
p->otg_caps.srp_support = false;
p->speed = DWC2_SPEED_PARAM_HIGH;
p->host_rx_fifo_size = 512;
p->host_nperio_tx_fifo_size = 500;
p->host_perio_tx_fifo_size = 500;
p->host_channels = 16;
p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
p->ahbcfg = GAHBCFG_HBSTLEN_INCR8 <<
GAHBCFG_HBSTLEN_SHIFT;
p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
}
static void dwc2_set_amlogic_g12a_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
p->lpm = false;
p->lpm_clock_gating = false;
p->besl = false;
p->hird_threshold_en = false;
}
static void dwc2_set_amlogic_a1_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
p->otg_caps.hnp_support = false;
p->otg_caps.srp_support = false;
p->speed = DWC2_SPEED_PARAM_HIGH;
p->host_rx_fifo_size = 192;
p->host_nperio_tx_fifo_size = 128;
p->host_perio_tx_fifo_size = 128;
p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
p->phy_utmi_width = 8;
p->ahbcfg = GAHBCFG_HBSTLEN_INCR8 << GAHBCFG_HBSTLEN_SHIFT;
p->lpm = false;
p->lpm_clock_gating = false;
p->besl = false;
p->hird_threshold_en = false;
}
static void dwc2_set_amcc_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 << GAHBCFG_HBSTLEN_SHIFT;
}
static void dwc2_set_stm32f4x9_fsotg_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
p->otg_caps.hnp_support = false;
p->otg_caps.srp_support = false;
p->speed = DWC2_SPEED_PARAM_FULL;
p->host_rx_fifo_size = 128;
p->host_nperio_tx_fifo_size = 96;
p->host_perio_tx_fifo_size = 96;
p->max_packet_count = 256;
p->phy_type = DWC2_PHY_TYPE_PARAM_FS;
p->i2c_enable = false;
p->activate_stm_fs_transceiver = true;
}
static void dwc2_set_stm32f7_hsotg_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
p->host_rx_fifo_size = 622;
p->host_nperio_tx_fifo_size = 128;
p->host_perio_tx_fifo_size = 256;
}
static void dwc2_set_stm32mp15_fsotg_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
p->otg_caps.hnp_support = false;
p->otg_caps.srp_support = false;
p->otg_caps.otg_rev = 0x200;
p->speed = DWC2_SPEED_PARAM_FULL;
p->host_rx_fifo_size = 128;
p->host_nperio_tx_fifo_size = 96;
p->host_perio_tx_fifo_size = 96;
p->max_packet_count = 256;
p->phy_type = DWC2_PHY_TYPE_PARAM_FS;
p->i2c_enable = false;
p->activate_stm_fs_transceiver = true;
p->activate_stm_id_vb_detection = true;
p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 << GAHBCFG_HBSTLEN_SHIFT;
p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
p->host_support_fs_ls_low_power = true;
p->host_ls_low_power_phy_clk = true;
}
static void dwc2_set_stm32mp15_hsotg_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
p->otg_caps.hnp_support = false;
p->otg_caps.srp_support = false;
p->otg_caps.otg_rev = 0x200;
p->activate_stm_id_vb_detection = !device_property_read_bool(hsotg->dev, "usb-role-switch");
p->host_rx_fifo_size = 440;
p->host_nperio_tx_fifo_size = 256;
p->host_perio_tx_fifo_size = 256;
p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 << GAHBCFG_HBSTLEN_SHIFT;
p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
p->lpm = false;
p->lpm_clock_gating = false;
p->besl = false;
p->hird_threshold_en = false;
}
const struct of_device_id dwc2_of_match_table[] = {
{ .compatible = "brcm,bcm2835-usb", .data = dwc2_set_bcm_params },
{ .compatible = "hisilicon,hi6220-usb", .data = dwc2_set_his_params },
{ .compatible = "ingenic,jz4775-otg", .data = dwc2_set_jz4775_params },
{ .compatible = "ingenic,jz4780-otg", .data = dwc2_set_jz4775_params },
{ .compatible = "ingenic,x1000-otg", .data = dwc2_set_jz4775_params },
{ .compatible = "ingenic,x1600-otg", .data = dwc2_set_x1600_params },
{ .compatible = "ingenic,x1700-otg", .data = dwc2_set_x1600_params },
{ .compatible = "ingenic,x1830-otg", .data = dwc2_set_x1600_params },
{ .compatible = "ingenic,x2000-otg", .data = dwc2_set_x2000_params },
{ .compatible = "rockchip,rk3066-usb", .data = dwc2_set_rk_params },
{ .compatible = "lantiq,arx100-usb", .data = dwc2_set_ltq_params },
{ .compatible = "lantiq,xrx200-usb", .data = dwc2_set_ltq_params },
{ .compatible = "snps,dwc2" },
{ .compatible = "samsung,s3c6400-hsotg",
.data = dwc2_set_s3c6400_params },
{ .compatible = "amlogic,meson8-usb",
.data = dwc2_set_amlogic_params },
{ .compatible = "amlogic,meson8b-usb",
.data = dwc2_set_amlogic_params },
{ .compatible = "amlogic,meson-gxbb-usb",
.data = dwc2_set_amlogic_params },
{ .compatible = "amlogic,meson-g12a-usb",
.data = dwc2_set_amlogic_g12a_params },
{ .compatible = "amlogic,meson-a1-usb",
.data = dwc2_set_amlogic_a1_params },
{ .compatible = "amcc,dwc-otg", .data = dwc2_set_amcc_params },
{ .compatible = "apm,apm82181-dwc-otg", .data = dwc2_set_amcc_params },
{ .compatible = "st,stm32f4x9-fsotg",
.data = dwc2_set_stm32f4x9_fsotg_params },
{ .compatible = "st,stm32f4x9-hsotg" },
{ .compatible = "st,stm32f7-hsotg",
.data = dwc2_set_stm32f7_hsotg_params },
{ .compatible = "st,stm32mp15-fsotg",
.data = dwc2_set_stm32mp15_fsotg_params },
{ .compatible = "st,stm32mp15-hsotg",
.data = dwc2_set_stm32mp15_hsotg_params },
{ .compatible = "intel,socfpga-agilex-hsotg",
.data = dwc2_set_socfpga_agilex_params },
{},
};
MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
const struct acpi_device_id dwc2_acpi_match[] = {
{ "BCM2848", (kernel_ulong_t)dwc2_set_bcm_params },
{ },
};
MODULE_DEVICE_TABLE(acpi, dwc2_acpi_match);
const struct pci_device_id dwc2_pci_ids[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, PCI_PRODUCT_ID_HAPS_HSOTG),
},
{
PCI_DEVICE(PCI_VENDOR_ID_STMICRO,
PCI_DEVICE_ID_STMICRO_USB_OTG),
},
{
PCI_DEVICE(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DWC2),
.driver_data = (unsigned long)dwc2_set_loongson_params,
},
{ /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, dwc2_pci_ids);
EXPORT_SYMBOL_GPL(dwc2_pci_ids);
static void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg)
{
switch (hsotg->hw_params.op_mode) {
case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
hsotg->params.otg_caps.hnp_support = true;
hsotg->params.otg_caps.srp_support = true;
break;
case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
hsotg->params.otg_caps.hnp_support = false;
hsotg->params.otg_caps.srp_support = true;
break;
default:
hsotg->params.otg_caps.hnp_support = false;
hsotg->params.otg_caps.srp_support = false;
break;
}
}
static void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg)
{
int val;
u32 hs_phy_type = hsotg->hw_params.hs_phy_type;
val = DWC2_PHY_TYPE_PARAM_FS;
if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
val = DWC2_PHY_TYPE_PARAM_UTMI;
else
val = DWC2_PHY_TYPE_PARAM_ULPI;
}
if (dwc2_is_fs_iot(hsotg))
hsotg->params.phy_type = DWC2_PHY_TYPE_PARAM_FS;
hsotg->params.phy_type = val;
}
static void dwc2_set_param_speed(struct dwc2_hsotg *hsotg)
{
int val;
val = hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS ?
DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
if (dwc2_is_fs_iot(hsotg))
val = DWC2_SPEED_PARAM_FULL;
if (dwc2_is_hs_iot(hsotg))
val = DWC2_SPEED_PARAM_HIGH;
hsotg->params.speed = val;
}
static void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg)
{
int val;
val = (hsotg->hw_params.utmi_phy_data_width ==
GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
if (hsotg->phy) {
/*
* If using the generic PHY framework, check if the PHY bus
* width is 8-bit and set the phyif appropriately.
*/
if (phy_get_bus_width(hsotg->phy) == 8)
val = 8;
}
hsotg->params.phy_utmi_width = val;
}
static void dwc2_set_param_tx_fifo_sizes(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
int depth_average;
int fifo_count;
int i;
fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
memset(p->g_tx_fifo_size, 0, sizeof(p->g_tx_fifo_size));
depth_average = dwc2_hsotg_tx_fifo_average_depth(hsotg);
for (i = 1; i <= fifo_count; i++)
p->g_tx_fifo_size[i] = depth_average;
}
static void dwc2_set_param_power_down(struct dwc2_hsotg *hsotg)
{
int val;
if (hsotg->hw_params.hibernation)
val = DWC2_POWER_DOWN_PARAM_HIBERNATION;
else if (hsotg->hw_params.power_optimized)
val = DWC2_POWER_DOWN_PARAM_PARTIAL;
else
val = DWC2_POWER_DOWN_PARAM_NONE;
hsotg->params.power_down = val;
}
static void dwc2_set_param_lpm(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
p->lpm = hsotg->hw_params.lpm_mode;
if (p->lpm) {
p->lpm_clock_gating = true;
p->besl = true;
p->hird_threshold_en = true;
p->hird_threshold = 4;
} else {
p->lpm_clock_gating = false;
p->besl = false;
p->hird_threshold_en = false;
}
}
/**
* dwc2_set_default_params() - Set all core parameters to their
* auto-detected default values.
*
* @hsotg: Programming view of the DWC_otg controller
*
*/
static void dwc2_set_default_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_hw_params *hw = &hsotg->hw_params;
struct dwc2_core_params *p = &hsotg->params;
bool dma_capable = !(hw->arch == GHWCFG2_SLAVE_ONLY_ARCH);
dwc2_set_param_otg_cap(hsotg);
dwc2_set_param_phy_type(hsotg);
dwc2_set_param_speed(hsotg);
dwc2_set_param_phy_utmi_width(hsotg);
dwc2_set_param_power_down(hsotg);
dwc2_set_param_lpm(hsotg);
p->phy_ulpi_ddr = false;
p->phy_ulpi_ext_vbus = false;
p->enable_dynamic_fifo = hw->enable_dynamic_fifo;
p->en_multiple_tx_fifo = hw->en_multiple_tx_fifo;
p->i2c_enable = hw->i2c_enable;
p->acg_enable = hw->acg_enable;
p->ulpi_fs_ls = false;
p->ts_dline = false;
p->reload_ctl = (hw->snpsid >= DWC2_CORE_REV_2_92a);
p->uframe_sched = true;
p->external_id_pin_ctl = false;
p->ipg_isoc_en = false;
p->service_interval = false;
p->max_packet_count = hw->max_packet_count;
p->max_transfer_size = hw->max_transfer_size;
p->ahbcfg = GAHBCFG_HBSTLEN_INCR << GAHBCFG_HBSTLEN_SHIFT;
p->ref_clk_per = 33333;
p->sof_cnt_wkup_alert = 100;
if ((hsotg->dr_mode == USB_DR_MODE_HOST) ||
(hsotg->dr_mode == USB_DR_MODE_OTG)) {
p->host_dma = dma_capable;
p->dma_desc_enable = false;
p->dma_desc_fs_enable = false;
p->host_support_fs_ls_low_power = false;
p->host_ls_low_power_phy_clk = false;
p->host_channels = hw->host_channels;
p->host_rx_fifo_size = hw->rx_fifo_size;
p->host_nperio_tx_fifo_size = hw->host_nperio_tx_fifo_size;
p->host_perio_tx_fifo_size = hw->host_perio_tx_fifo_size;
}
if ((hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) ||
(hsotg->dr_mode == USB_DR_MODE_OTG)) {
p->g_dma = dma_capable;
p->g_dma_desc = hw->dma_desc_enable;
/*
* The values for g_rx_fifo_size (2048) and
* g_np_tx_fifo_size (1024) come from the legacy s3c
* gadget driver. These defaults have been hard-coded
* for some time so many platforms depend on these
* values. Leave them as defaults for now and only
* auto-detect if the hardware does not support the
* default.
*/
p->g_rx_fifo_size = 2048;
p->g_np_tx_fifo_size = 1024;
dwc2_set_param_tx_fifo_sizes(hsotg);
}
}
/**
* dwc2_get_device_properties() - Read in device properties.
*
* @hsotg: Programming view of the DWC_otg controller
*
* Read in the device properties and adjust core parameters if needed.
*/
static void dwc2_get_device_properties(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
int num;
if ((hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) ||
(hsotg->dr_mode == USB_DR_MODE_OTG)) {
device_property_read_u32(hsotg->dev, "g-rx-fifo-size",
&p->g_rx_fifo_size);
device_property_read_u32(hsotg->dev, "g-np-tx-fifo-size",
&p->g_np_tx_fifo_size);
num = device_property_count_u32(hsotg->dev, "g-tx-fifo-size");
if (num > 0) {
num = min(num, 15);
memset(p->g_tx_fifo_size, 0,
sizeof(p->g_tx_fifo_size));
device_property_read_u32_array(hsotg->dev,
"g-tx-fifo-size",
&p->g_tx_fifo_size[1],
num);
}
of_usb_update_otg_caps(hsotg->dev->of_node, &p->otg_caps);
}
p->oc_disable = of_property_read_bool(hsotg->dev->of_node, "disable-over-current");
}
static void dwc2_check_param_otg_cap(struct dwc2_hsotg *hsotg)
{
int valid = 1;
if (hsotg->params.otg_caps.hnp_support && hsotg->params.otg_caps.srp_support) {
/* check HNP && SRP capable */
if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
valid = 0;
} else if (!hsotg->params.otg_caps.hnp_support) {
/* check SRP only capable */
if (hsotg->params.otg_caps.srp_support) {
switch (hsotg->hw_params.op_mode) {
case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
break;
default:
valid = 0;
break;
}
}
/* else: NO HNP && NO SRP capable: always valid */
} else {
valid = 0;
}
if (!valid)
dwc2_set_param_otg_cap(hsotg);
}
static void dwc2_check_param_phy_type(struct dwc2_hsotg *hsotg)
{
int valid = 0;
u32 hs_phy_type;
u32 fs_phy_type;
hs_phy_type = hsotg->hw_params.hs_phy_type;
fs_phy_type = hsotg->hw_params.fs_phy_type;
switch (hsotg->params.phy_type) {
case DWC2_PHY_TYPE_PARAM_FS:
if (fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
valid = 1;
break;
case DWC2_PHY_TYPE_PARAM_UTMI:
if ((hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI) ||
(hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
valid = 1;
break;
case DWC2_PHY_TYPE_PARAM_ULPI:
if ((hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI) ||
(hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
valid = 1;
break;
default:
break;
}
if (!valid)
dwc2_set_param_phy_type(hsotg);
}
static void dwc2_check_param_speed(struct dwc2_hsotg *hsotg)
{
int valid = 1;
int phy_type = hsotg->params.phy_type;
int speed = hsotg->params.speed;
switch (speed) {
case DWC2_SPEED_PARAM_HIGH:
if ((hsotg->params.speed == DWC2_SPEED_PARAM_HIGH) &&
(phy_type == DWC2_PHY_TYPE_PARAM_FS))
valid = 0;
break;
case DWC2_SPEED_PARAM_FULL:
case DWC2_SPEED_PARAM_LOW:
break;
default:
valid = 0;
break;
}
if (!valid)
dwc2_set_param_speed(hsotg);
}
static void dwc2_check_param_phy_utmi_width(struct dwc2_hsotg *hsotg)
{
int valid = 0;
int param = hsotg->params.phy_utmi_width;
int width = hsotg->hw_params.utmi_phy_data_width;
switch (width) {
case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
valid = (param == 8);
break;
case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
valid = (param == 16);
break;
case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
valid = (param == 8 || param == 16);
break;
}
if (!valid)
dwc2_set_param_phy_utmi_width(hsotg);
}
static void dwc2_check_param_power_down(struct dwc2_hsotg *hsotg)
{
int param = hsotg->params.power_down;
switch (param) {
case DWC2_POWER_DOWN_PARAM_NONE:
break;
case DWC2_POWER_DOWN_PARAM_PARTIAL:
if (hsotg->hw_params.power_optimized)
break;
dev_dbg(hsotg->dev,
"Partial power down isn't supported by HW\n");
param = DWC2_POWER_DOWN_PARAM_NONE;
break;
case DWC2_POWER_DOWN_PARAM_HIBERNATION:
if (hsotg->hw_params.hibernation)
break;
dev_dbg(hsotg->dev,
"Hibernation isn't supported by HW\n");
param = DWC2_POWER_DOWN_PARAM_NONE;
break;
default:
dev_err(hsotg->dev,
"%s: Invalid parameter power_down=%d\n",
__func__, param);
param = DWC2_POWER_DOWN_PARAM_NONE;
break;
}
hsotg->params.power_down = param;
}
static void dwc2_check_param_tx_fifo_sizes(struct dwc2_hsotg *hsotg)
{
int fifo_count;
int fifo;
int min;
u32 total = 0;
u32 dptxfszn;
fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
min = hsotg->hw_params.en_multiple_tx_fifo ? 16 : 4;
for (fifo = 1; fifo <= fifo_count; fifo++)
total += hsotg->params.g_tx_fifo_size[fifo];
if (total > dwc2_hsotg_tx_fifo_total_depth(hsotg) || !total) {
dev_warn(hsotg->dev, "%s: Invalid parameter g-tx-fifo-size, setting to default average\n",
__func__);
dwc2_set_param_tx_fifo_sizes(hsotg);
}
for (fifo = 1; fifo <= fifo_count; fifo++) {
dptxfszn = hsotg->hw_params.g_tx_fifo_size[fifo];
if (hsotg->params.g_tx_fifo_size[fifo] < min ||
hsotg->params.g_tx_fifo_size[fifo] > dptxfszn) {
dev_warn(hsotg->dev, "%s: Invalid parameter g_tx_fifo_size[%d]=%d\n",
__func__, fifo,
hsotg->params.g_tx_fifo_size[fifo]);
hsotg->params.g_tx_fifo_size[fifo] = dptxfszn;
}
}
}
#define CHECK_RANGE(_param, _min, _max, _def) do { \
if ((int)(hsotg->params._param) < (_min) || \
(hsotg->params._param) > (_max)) { \
dev_warn(hsotg->dev, "%s: Invalid parameter %s=%d\n", \
__func__, #_param, hsotg->params._param); \
hsotg->params._param = (_def); \
} \
} while (0)
#define CHECK_BOOL(_param, _check) do { \
if (hsotg->params._param && !(_check)) { \
dev_warn(hsotg->dev, "%s: Invalid parameter %s=%d\n", \
__func__, #_param, hsotg->params._param); \
hsotg->params._param = false; \
} \
} while (0)
static void dwc2_check_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_hw_params *hw = &hsotg->hw_params;
struct dwc2_core_params *p = &hsotg->params;
bool dma_capable = !(hw->arch == GHWCFG2_SLAVE_ONLY_ARCH);
dwc2_check_param_otg_cap(hsotg);
dwc2_check_param_phy_type(hsotg);
dwc2_check_param_speed(hsotg);
dwc2_check_param_phy_utmi_width(hsotg);
dwc2_check_param_power_down(hsotg);
CHECK_BOOL(enable_dynamic_fifo, hw->enable_dynamic_fifo);
CHECK_BOOL(en_multiple_tx_fifo, hw->en_multiple_tx_fifo);
CHECK_BOOL(i2c_enable, hw->i2c_enable);
CHECK_BOOL(ipg_isoc_en, hw->ipg_isoc_en);
CHECK_BOOL(acg_enable, hw->acg_enable);
CHECK_BOOL(reload_ctl, (hsotg->hw_params.snpsid > DWC2_CORE_REV_2_92a));
CHECK_BOOL(lpm, (hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_80a));
CHECK_BOOL(lpm, hw->lpm_mode);
CHECK_BOOL(lpm_clock_gating, hsotg->params.lpm);
CHECK_BOOL(besl, hsotg->params.lpm);
CHECK_BOOL(besl, (hsotg->hw_params.snpsid >= DWC2_CORE_REV_3_00a));
CHECK_BOOL(hird_threshold_en, hsotg->params.lpm);
CHECK_RANGE(hird_threshold, 0, hsotg->params.besl ? 12 : 7, 0);
CHECK_BOOL(service_interval, hw->service_interval_mode);
CHECK_RANGE(max_packet_count,
15, hw->max_packet_count,
hw->max_packet_count);
CHECK_RANGE(max_transfer_size,
2047, hw->max_transfer_size,
hw->max_transfer_size);
if ((hsotg->dr_mode == USB_DR_MODE_HOST) ||
(hsotg->dr_mode == USB_DR_MODE_OTG)) {
CHECK_BOOL(host_dma, dma_capable);
CHECK_BOOL(dma_desc_enable, p->host_dma);
CHECK_BOOL(dma_desc_fs_enable, p->dma_desc_enable);
CHECK_BOOL(host_ls_low_power_phy_clk,
p->phy_type == DWC2_PHY_TYPE_PARAM_FS);
CHECK_RANGE(host_channels,
1, hw->host_channels,
hw->host_channels);
CHECK_RANGE(host_rx_fifo_size,
16, hw->rx_fifo_size,
hw->rx_fifo_size);
CHECK_RANGE(host_nperio_tx_fifo_size,
16, hw->host_nperio_tx_fifo_size,
hw->host_nperio_tx_fifo_size);
CHECK_RANGE(host_perio_tx_fifo_size,
16, hw->host_perio_tx_fifo_size,
hw->host_perio_tx_fifo_size);
}
if ((hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) ||
(hsotg->dr_mode == USB_DR_MODE_OTG)) {
CHECK_BOOL(g_dma, dma_capable);
CHECK_BOOL(g_dma_desc, (p->g_dma && hw->dma_desc_enable));
CHECK_RANGE(g_rx_fifo_size,
16, hw->rx_fifo_size,
hw->rx_fifo_size);
CHECK_RANGE(g_np_tx_fifo_size,
16, hw->dev_nperio_tx_fifo_size,
hw->dev_nperio_tx_fifo_size);
dwc2_check_param_tx_fifo_sizes(hsotg);
}
}
/*
* Gets host hardware parameters. Forces host mode if not currently in
* host mode. Should be called immediately after a core soft reset in
* order to get the reset values.
*/
static void dwc2_get_host_hwparams(struct dwc2_hsotg *hsotg)
{
struct dwc2_hw_params *hw = &hsotg->hw_params;
u32 gnptxfsiz;
u32 hptxfsiz;
if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
return;
dwc2_force_mode(hsotg, true);
gnptxfsiz = dwc2_readl(hsotg, GNPTXFSIZ);
hptxfsiz = dwc2_readl(hsotg, HPTXFSIZ);
hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
FIFOSIZE_DEPTH_SHIFT;
hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
FIFOSIZE_DEPTH_SHIFT;
}
/*
* Gets device hardware parameters. Forces device mode if not
* currently in device mode. Should be called immediately after a core
* soft reset in order to get the reset values.
*/
static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
{
struct dwc2_hw_params *hw = &hsotg->hw_params;
u32 gnptxfsiz;
int fifo, fifo_count;
if (hsotg->dr_mode == USB_DR_MODE_HOST)
return;
dwc2_force_mode(hsotg, false);
gnptxfsiz = dwc2_readl(hsotg, GNPTXFSIZ);
fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
for (fifo = 1; fifo <= fifo_count; fifo++) {
hw->g_tx_fifo_size[fifo] =
(dwc2_readl(hsotg, DPTXFSIZN(fifo)) &
FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT;
}
hw->dev_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
FIFOSIZE_DEPTH_SHIFT;
}
/**
* dwc2_get_hwparams() - During device initialization, read various hardware
* configuration registers and interpret the contents.
*
* @hsotg: Programming view of the DWC_otg controller
*
*/
int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
{
struct dwc2_hw_params *hw = &hsotg->hw_params;
unsigned int width;
u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
u32 grxfsiz;
hwcfg1 = dwc2_readl(hsotg, GHWCFG1);
hwcfg2 = dwc2_readl(hsotg, GHWCFG2);
hwcfg3 = dwc2_readl(hsotg, GHWCFG3);
hwcfg4 = dwc2_readl(hsotg, GHWCFG4);
grxfsiz = dwc2_readl(hsotg, GRXFSIZ);
/* hwcfg1 */
hw->dev_ep_dirs = hwcfg1;
/* hwcfg2 */
hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
GHWCFG2_OP_MODE_SHIFT;
hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
GHWCFG2_ARCHITECTURE_SHIFT;
hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
GHWCFG2_NUM_HOST_CHAN_SHIFT);
hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
GHWCFG2_HS_PHY_TYPE_SHIFT;
hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
GHWCFG2_FS_PHY_TYPE_SHIFT;
hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
GHWCFG2_NUM_DEV_EP_SHIFT;
hw->nperio_tx_q_depth =
(hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
hw->host_perio_tx_q_depth =
(hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
hw->dev_token_q_depth =
(hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
/* hwcfg3 */
width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
hw->max_transfer_size = (1 << (width + 11)) - 1;
width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
hw->max_packet_count = (1 << (width + 4)) - 1;
hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
GHWCFG3_DFIFO_DEPTH_SHIFT;
hw->lpm_mode = !!(hwcfg3 & GHWCFG3_OTG_LPM_EN);
/* hwcfg4 */
hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
hw->num_dev_in_eps = (hwcfg4 & GHWCFG4_NUM_IN_EPS_MASK) >>
GHWCFG4_NUM_IN_EPS_SHIFT;
hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
hw->hibernation = !!(hwcfg4 & GHWCFG4_HIBER);
hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
hw->acg_enable = !!(hwcfg4 & GHWCFG4_ACG_SUPPORTED);
hw->ipg_isoc_en = !!(hwcfg4 & GHWCFG4_IPG_ISOC_SUPPORTED);
hw->service_interval_mode = !!(hwcfg4 &
GHWCFG4_SERVICE_INTERVAL_SUPPORTED);
/* fifo sizes */
hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
GRXFSIZ_DEPTH_SHIFT;
/*
* Host specific hardware parameters. Reading these parameters
* requires the controller to be in host mode. The mode will
* be forced, if necessary, to read these values.
*/
dwc2_get_host_hwparams(hsotg);
dwc2_get_dev_hwparams(hsotg);
return 0;
}
typedef void (*set_params_cb)(struct dwc2_hsotg *data);
int dwc2_init_params(struct dwc2_hsotg *hsotg)
{
const struct of_device_id *match;
set_params_cb set_params;
dwc2_set_default_params(hsotg);
dwc2_get_device_properties(hsotg);
match = of_match_device(dwc2_of_match_table, hsotg->dev);
if (match && match->data) {
set_params = match->data;
set_params(hsotg);
} else if (!match) {
const struct acpi_device_id *amatch;
const struct pci_device_id *pmatch = NULL;
amatch = acpi_match_device(dwc2_acpi_match, hsotg->dev);
if (amatch && amatch->driver_data) {
set_params = (set_params_cb)amatch->driver_data;
set_params(hsotg);
} else if (!amatch)
pmatch = pci_match_id(dwc2_pci_ids, to_pci_dev(hsotg->dev->parent));
if (pmatch && pmatch->driver_data) {
set_params = (set_params_cb)pmatch->driver_data;
set_params(hsotg);
}
}
dwc2_check_params(hsotg);
return 0;
}
| linux-master | drivers/usb/dwc2/params.c |
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* hcd_queue.c - DesignWare HS OTG Controller host queuing routines
*
* Copyright (C) 2004-2013 Synopsys, Inc.
*/
/*
* This file contains the functions to manage Queue Heads and Queue
* Transfer Descriptors for Host mode
*/
#include <linux/gcd.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/ch11.h>
#include "core.h"
#include "hcd.h"
/* Wait this long before releasing periodic reservation */
#define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
/* If we get a NAK, wait this long before retrying */
#define DWC2_RETRY_WAIT_DELAY (1 * NSEC_PER_MSEC)
/**
* dwc2_periodic_channel_available() - Checks that a channel is available for a
* periodic transfer
*
* @hsotg: The HCD state structure for the DWC OTG controller
*
* Return: 0 if successful, negative error code otherwise
*/
static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg)
{
/*
* Currently assuming that there is a dedicated host channel for
* each periodic transaction plus at least one host channel for
* non-periodic transactions
*/
int status;
int num_channels;
num_channels = hsotg->params.host_channels;
if ((hsotg->periodic_channels + hsotg->non_periodic_channels <
num_channels) && (hsotg->periodic_channels < num_channels - 1)) {
status = 0;
} else {
dev_dbg(hsotg->dev,
"%s: Total channels: %d, Periodic: %d, Non-periodic: %d\n",
__func__, num_channels,
hsotg->periodic_channels, hsotg->non_periodic_channels);
status = -ENOSPC;
}
return status;
}
/**
* dwc2_check_periodic_bandwidth() - Checks that there is sufficient bandwidth
* for the specified QH in the periodic schedule
*
* @hsotg: The HCD state structure for the DWC OTG controller
* @qh: QH containing periodic bandwidth required
*
* Return: 0 if successful, negative error code otherwise
*
* For simplicity, this calculation assumes that all the transfers in the
* periodic schedule may occur in the same (micro)frame
*/
static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh)
{
int status;
s16 max_claimed_usecs;
status = 0;
if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
/*
* High speed mode
* Max periodic usecs is 80% x 125 usec = 100 usec
*/
max_claimed_usecs = 100 - qh->host_us;
} else {
/*
* Full speed mode
* Max periodic usecs is 90% x 1000 usec = 900 usec
*/
max_claimed_usecs = 900 - qh->host_us;
}
if (hsotg->periodic_usecs > max_claimed_usecs) {
dev_err(hsotg->dev,
"%s: already claimed usecs %d, required usecs %d\n",
__func__, hsotg->periodic_usecs, qh->host_us);
status = -ENOSPC;
}
return status;
}
/**
* pmap_schedule() - Schedule time in a periodic bitmap (pmap).
*
* @map: The bitmap representing the schedule; will be updated
* upon success.
* @bits_per_period: The schedule represents several periods. This is how many
* bits are in each period. It's assumed that the beginning
* of the schedule will repeat after its end.
* @periods_in_map: The number of periods in the schedule.
* @num_bits: The number of bits we need per period we want to reserve
* in this function call.
* @interval: How often we need to be scheduled for the reservation this
* time. 1 means every period. 2 means every other period.
* ...you get the picture?
* @start: The bit number to start at. Normally 0. Must be within
* the interval or we return failure right away.
* @only_one_period: Normally we'll allow picking a start anywhere within the
* first interval, since we can still make all repetition
* requirements by doing that. However, if you pass true
* here then we'll return failure if we can't fit within
* the period that "start" is in.
*
* The idea here is that we want to schedule time for repeating events that all
* want the same resource. The resource is divided into fixed-sized periods
* and the events want to repeat every "interval" periods. The schedule
* granularity is one bit.
*
* To keep things "simple", we'll represent our schedule with a bitmap that
* contains a fixed number of periods. This gets rid of a lot of complexity
* but does mean that we need to handle things specially (and non-ideally) if
* the number of the periods in the schedule doesn't match well with the
* intervals that we're trying to schedule.
*
* Here's an explanation of the scheme we'll implement, assuming 8 periods.
* - If interval is 1, we need to take up space in each of the 8
* periods we're scheduling. Easy.
* - If interval is 2, we need to take up space in half of the
* periods. Again, easy.
* - If interval is 3, we actually need to fall back to interval 1.
* Why? Because we might need time in any period. AKA for the
* first 8 periods, we'll be in slot 0, 3, 6. Then we'll be
* in slot 1, 4, 7. Then we'll be in 2, 5. Then we'll be back to
* 0, 3, and 6. Since we could be in any frame we need to reserve
* for all of them. Sucks, but that's what you gotta do. Note that
* if we were instead scheduling 8 * 3 = 24 we'd do much better, but
* then we need more memory and time to do scheduling.
* - If interval is 4, easy.
* - If interval is 5, we again need interval 1. The schedule will be
* 0, 5, 2, 7, 4, 1, 6, 3, 0
* - If interval is 6, we need interval 2. 0, 6, 4, 2.
* - If interval is 7, we need interval 1.
* - If interval is 8, we need interval 8.
*
* If you do the math, you'll see that we need to pretend that interval is
* equal to the greatest_common_divisor(interval, periods_in_map).
*
* Note that at the moment this function tends to front-pack the schedule.
* In some cases that's really non-ideal (it's hard to schedule things that
* need to repeat every period). In other cases it's perfect (you can easily
* schedule bigger, less often repeating things).
*
* Here's the algorithm in action (8 periods, 5 bits per period):
* |** | |** | |** | |** | | OK 2 bits, intv 2 at 0
* |*****| ***|*****| ***|*****| ***|*****| ***| OK 3 bits, intv 3 at 2
* |*****|* ***|*****| ***|*****|* ***|*****| ***| OK 1 bits, intv 4 at 5
* |** |* |** | |** |* |** | | Remv 3 bits, intv 3 at 2
* |*** |* |*** | |*** |* |*** | | OK 1 bits, intv 6 at 2
* |**** |* * |**** | * |**** |* * |**** | * | OK 1 bits, intv 1 at 3
* |**** |**** |**** | *** |**** |**** |**** | *** | OK 2 bits, intv 2 at 6
* |*****|*****|*****| ****|*****|*****|*****| ****| OK 1 bits, intv 1 at 4
* |*****|*****|*****| ****|*****|*****|*****| ****| FAIL 1 bits, intv 1
* | ***|*****| ***| ****| ***|*****| ***| ****| Remv 2 bits, intv 2 at 0
* | ***| ****| ***| ****| ***| ****| ***| ****| Remv 1 bits, intv 4 at 5
* | **| ****| **| ****| **| ****| **| ****| Remv 1 bits, intv 6 at 2
* | *| ** *| *| ** *| *| ** *| *| ** *| Remv 1 bits, intv 1 at 3
* | *| *| *| *| *| *| *| *| Remv 2 bits, intv 2 at 6
* | | | | | | | | | Remv 1 bits, intv 1 at 4
* |** | |** | |** | |** | | OK 2 bits, intv 2 at 0
* |*** | |** | |*** | |** | | OK 1 bits, intv 4 at 2
* |*****| |** **| |*****| |** **| | OK 2 bits, intv 2 at 3
* |*****|* |** **| |*****|* |** **| | OK 1 bits, intv 4 at 5
* |*****|*** |** **| ** |*****|*** |** **| ** | OK 2 bits, intv 2 at 6
* |*****|*****|** **| ****|*****|*****|** **| ****| OK 2 bits, intv 2 at 8
* |*****|*****|*****| ****|*****|*****|*****| ****| OK 1 bits, intv 4 at 12
*
* This function is pretty generic and could be easily abstracted if anything
* needed similar scheduling.
*
* Returns either -ENOSPC or a >= 0 start bit which should be passed to the
* unschedule routine. The map bitmap will be updated on a non-error result.
*/
static int pmap_schedule(unsigned long *map, int bits_per_period,
int periods_in_map, int num_bits,
int interval, int start, bool only_one_period)
{
int interval_bits;
int to_reserve;
int first_end;
int i;
if (num_bits > bits_per_period)
return -ENOSPC;
/* Adjust interval as per description */
interval = gcd(interval, periods_in_map);
interval_bits = bits_per_period * interval;
to_reserve = periods_in_map / interval;
/* If start has gotten us past interval then we can't schedule */
if (start >= interval_bits)
return -ENOSPC;
if (only_one_period)
/* Must fit within same period as start; end at begin of next */
first_end = (start / bits_per_period + 1) * bits_per_period;
else
/* Can fit anywhere in the first interval */
first_end = interval_bits;
/*
* We'll try to pick the first repetition, then see if that time
* is free for each of the subsequent repetitions. If it's not
* we'll adjust the start time for the next search of the first
* repetition.
*/
while (start + num_bits <= first_end) {
int end;
/* Need to stay within this period */
end = (start / bits_per_period + 1) * bits_per_period;
/* Look for num_bits us in this microframe starting at start */
start = bitmap_find_next_zero_area(map, end, start, num_bits,
0);
/*
* We should get start >= end if we fail. We might be
* able to check the next microframe depending on the
* interval, so continue on (start already updated).
*/
if (start >= end) {
start = end;
continue;
}
/* At this point we have a valid point for first one */
for (i = 1; i < to_reserve; i++) {
int ith_start = start + interval_bits * i;
int ith_end = end + interval_bits * i;
int ret;
/* Use this as a dumb "check if bits are 0" */
ret = bitmap_find_next_zero_area(
map, ith_start + num_bits, ith_start, num_bits,
0);
/* We got the right place, continue checking */
if (ret == ith_start)
continue;
/* Move start up for next time and exit for loop */
ith_start = bitmap_find_next_zero_area(
map, ith_end, ith_start, num_bits, 0);
if (ith_start >= ith_end)
/* Need a while new period next time */
start = end;
else
start = ith_start - interval_bits * i;
break;
}
/* If didn't exit the for loop with a break, we have success */
if (i == to_reserve)
break;
}
if (start + num_bits > first_end)
return -ENOSPC;
for (i = 0; i < to_reserve; i++) {
int ith_start = start + interval_bits * i;
bitmap_set(map, ith_start, num_bits);
}
return start;
}
/**
* pmap_unschedule() - Undo work done by pmap_schedule()
*
* @map: See pmap_schedule().
* @bits_per_period: See pmap_schedule().
* @periods_in_map: See pmap_schedule().
* @num_bits: The number of bits that was passed to schedule.
* @interval: The interval that was passed to schedule.
* @start: The return value from pmap_schedule().
*/
static void pmap_unschedule(unsigned long *map, int bits_per_period,
int periods_in_map, int num_bits,
int interval, int start)
{
int interval_bits;
int to_release;
int i;
/* Adjust interval as per description in pmap_schedule() */
interval = gcd(interval, periods_in_map);
interval_bits = bits_per_period * interval;
to_release = periods_in_map / interval;
for (i = 0; i < to_release; i++) {
int ith_start = start + interval_bits * i;
bitmap_clear(map, ith_start, num_bits);
}
}
/**
* dwc2_get_ls_map() - Get the map used for the given qh
*
* @hsotg: The HCD state structure for the DWC OTG controller.
* @qh: QH for the periodic transfer.
*
* We'll always get the periodic map out of our TT. Note that even if we're
* running the host straight in low speed / full speed mode it appears as if
* a TT is allocated for us, so we'll use it. If that ever changes we can
* add logic here to get a map out of "hsotg" if !qh->do_split.
*
* Returns: the map or NULL if a map couldn't be found.
*/
static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh)
{
unsigned long *map;
/* Don't expect to be missing a TT and be doing low speed scheduling */
if (WARN_ON(!qh->dwc_tt))
return NULL;
/* Get the map and adjust if this is a multi_tt hub */
map = qh->dwc_tt->periodic_bitmaps;
if (qh->dwc_tt->usb_tt->multi)
map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1);
return map;
}
#ifdef DWC2_PRINT_SCHEDULE
/*
* cat_printf() - A printf() + strcat() helper
*
* This is useful for concatenating a bunch of strings where each string is
* constructed using printf.
*
* @buf: The destination buffer; will be updated to point after the printed
* data.
* @size: The number of bytes in the buffer (includes space for '\0').
* @fmt: The format for printf.
* @...: The args for printf.
*/
static __printf(3, 4)
void cat_printf(char **buf, size_t *size, const char *fmt, ...)
{
va_list args;
int i;
if (*size == 0)
return;
va_start(args, fmt);
i = vsnprintf(*buf, *size, fmt, args);
va_end(args);
if (i >= *size) {
(*buf)[*size - 1] = '\0';
*buf += *size;
*size = 0;
} else {
*buf += i;
*size -= i;
}
}
/*
* pmap_print() - Print the given periodic map
*
* Will attempt to print out the periodic schedule.
*
* @map: See pmap_schedule().
* @bits_per_period: See pmap_schedule().
* @periods_in_map: See pmap_schedule().
* @period_name: The name of 1 period, like "uFrame"
* @units: The name of the units, like "us".
* @print_fn: The function to call for printing.
* @print_data: Opaque data to pass to the print function.
*/
static void pmap_print(unsigned long *map, int bits_per_period,
int periods_in_map, const char *period_name,
const char *units,
void (*print_fn)(const char *str, void *data),
void *print_data)
{
int period;
for (period = 0; period < periods_in_map; period++) {
char tmp[64];
char *buf = tmp;
size_t buf_size = sizeof(tmp);
int period_start = period * bits_per_period;
int period_end = period_start + bits_per_period;
int start = 0;
int count = 0;
bool printed = false;
int i;
for (i = period_start; i < period_end + 1; i++) {
/* Handle case when ith bit is set */
if (i < period_end &&
bitmap_find_next_zero_area(map, i + 1,
i, 1, 0) != i) {
if (count == 0)
start = i - period_start;
count++;
continue;
}
/* ith bit isn't set; don't care if count == 0 */
if (count == 0)
continue;
if (!printed)
cat_printf(&buf, &buf_size, "%s %d: ",
period_name, period);
else
cat_printf(&buf, &buf_size, ", ");
printed = true;
cat_printf(&buf, &buf_size, "%d %s -%3d %s", start,
units, start + count - 1, units);
count = 0;
}
if (printed)
print_fn(tmp, print_data);
}
}
struct dwc2_qh_print_data {
struct dwc2_hsotg *hsotg;
struct dwc2_qh *qh;
};
/**
* dwc2_qh_print() - Helper function for dwc2_qh_schedule_print()
*
* @str: The string to print
* @data: A pointer to a struct dwc2_qh_print_data
*/
static void dwc2_qh_print(const char *str, void *data)
{
struct dwc2_qh_print_data *print_data = data;
dwc2_sch_dbg(print_data->hsotg, "QH=%p ...%s\n", print_data->qh, str);
}
/**
* dwc2_qh_schedule_print() - Print the periodic schedule
*
* @hsotg: The HCD state structure for the DWC OTG controller.
* @qh: QH to print.
*/
static void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh)
{
struct dwc2_qh_print_data print_data = { hsotg, qh };
int i;
/*
* The printing functions are quite slow and inefficient.
* If we don't have tracing turned on, don't run unless the special
* define is turned on.
*/
if (qh->schedule_low_speed) {
unsigned long *map = dwc2_get_ls_map(hsotg, qh);
dwc2_sch_dbg(hsotg, "QH=%p LS/FS trans: %d=>%d us @ %d us",
qh, qh->device_us,
DWC2_ROUND_US_TO_SLICE(qh->device_us),
DWC2_US_PER_SLICE * qh->ls_start_schedule_slice);
if (map) {
dwc2_sch_dbg(hsotg,
"QH=%p Whole low/full speed map %p now:\n",
qh, map);
pmap_print(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
DWC2_LS_SCHEDULE_FRAMES, "Frame ", "slices",
dwc2_qh_print, &print_data);
}
}
for (i = 0; i < qh->num_hs_transfers; i++) {
struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + i;
int uframe = trans_time->start_schedule_us /
DWC2_HS_PERIODIC_US_PER_UFRAME;
int rel_us = trans_time->start_schedule_us %
DWC2_HS_PERIODIC_US_PER_UFRAME;
dwc2_sch_dbg(hsotg,
"QH=%p HS trans #%d: %d us @ uFrame %d + %d us\n",
qh, i, trans_time->duration_us, uframe, rel_us);
}
if (qh->num_hs_transfers) {
dwc2_sch_dbg(hsotg, "QH=%p Whole high speed map now:\n", qh);
pmap_print(hsotg->hs_periodic_bitmap,
DWC2_HS_PERIODIC_US_PER_UFRAME,
DWC2_HS_SCHEDULE_UFRAMES, "uFrame", "us",
dwc2_qh_print, &print_data);
}
}
#else
static inline void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh) {};
#endif
/**
* dwc2_ls_pmap_schedule() - Schedule a low speed QH
*
* @hsotg: The HCD state structure for the DWC OTG controller.
* @qh: QH for the periodic transfer.
* @search_slice: We'll start trying to schedule at the passed slice.
* Remember that slices are the units of the low speed
* schedule (think 25us or so).
*
* Wraps pmap_schedule() with the right parameters for low speed scheduling.
*
* Normally we schedule low speed devices on the map associated with the TT.
*
* Returns: 0 for success or an error code.
*/
static int dwc2_ls_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
int search_slice)
{
int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE);
unsigned long *map = dwc2_get_ls_map(hsotg, qh);
int slice;
if (!map)
return -EINVAL;
/*
* Schedule on the proper low speed map with our low speed scheduling
* parameters. Note that we use the "device_interval" here since
* we want the low speed interval and the only way we'd be in this
* function is if the device is low speed.
*
* If we happen to be doing low speed and high speed scheduling for the
* same transaction (AKA we have a split) we always do low speed first.
* That means we can always pass "false" for only_one_period (that
* parameters is only useful when we're trying to get one schedule to
* match what we already planned in the other schedule).
*/
slice = pmap_schedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
DWC2_LS_SCHEDULE_FRAMES, slices,
qh->device_interval, search_slice, false);
if (slice < 0)
return slice;
qh->ls_start_schedule_slice = slice;
return 0;
}
/**
* dwc2_ls_pmap_unschedule() - Undo work done by dwc2_ls_pmap_schedule()
*
* @hsotg: The HCD state structure for the DWC OTG controller.
* @qh: QH for the periodic transfer.
*/
static void dwc2_ls_pmap_unschedule(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh)
{
int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE);
unsigned long *map = dwc2_get_ls_map(hsotg, qh);
/* Schedule should have failed, so no worries about no error code */
if (!map)
return;
pmap_unschedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
DWC2_LS_SCHEDULE_FRAMES, slices, qh->device_interval,
qh->ls_start_schedule_slice);
}
/**
* dwc2_hs_pmap_schedule - Schedule in the main high speed schedule
*
* This will schedule something on the main dwc2 schedule.
*
* We'll start looking in qh->hs_transfers[index].start_schedule_us. We'll
* update this with the result upon success. We also use the duration from
* the same structure.
*
* @hsotg: The HCD state structure for the DWC OTG controller.
* @qh: QH for the periodic transfer.
* @only_one_period: If true we will limit ourselves to just looking at
* one period (aka one 100us chunk). This is used if we have
* already scheduled something on the low speed schedule and
* need to find something that matches on the high speed one.
* @index: The index into qh->hs_transfers that we're working with.
*
* Returns: 0 for success or an error code. Upon success the
* dwc2_hs_transfer_time specified by "index" will be updated.
*/
static int dwc2_hs_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
bool only_one_period, int index)
{
struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + index;
int us;
us = pmap_schedule(hsotg->hs_periodic_bitmap,
DWC2_HS_PERIODIC_US_PER_UFRAME,
DWC2_HS_SCHEDULE_UFRAMES, trans_time->duration_us,
qh->host_interval, trans_time->start_schedule_us,
only_one_period);
if (us < 0)
return us;
trans_time->start_schedule_us = us;
return 0;
}
/**
* dwc2_hs_pmap_unschedule() - Undo work done by dwc2_hs_pmap_schedule()
*
* @hsotg: The HCD state structure for the DWC OTG controller.
* @qh: QH for the periodic transfer.
* @index: Transfer index
*/
static void dwc2_hs_pmap_unschedule(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh, int index)
{
struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + index;
pmap_unschedule(hsotg->hs_periodic_bitmap,
DWC2_HS_PERIODIC_US_PER_UFRAME,
DWC2_HS_SCHEDULE_UFRAMES, trans_time->duration_us,
qh->host_interval, trans_time->start_schedule_us);
}
/**
* dwc2_uframe_schedule_split - Schedule a QH for a periodic split xfer.
*
* This is the most complicated thing in USB. We have to find matching time
* in both the global high speed schedule for the port and the low speed
* schedule for the TT associated with the given device.
*
* Being here means that the host must be running in high speed mode and the
* device is in low or full speed mode (and behind a hub).
*
* @hsotg: The HCD state structure for the DWC OTG controller.
* @qh: QH for the periodic transfer.
*/
static int dwc2_uframe_schedule_split(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh)
{
int bytecount = qh->maxp_mult * qh->maxp;
int ls_search_slice;
int err = 0;
int host_interval_in_sched;
/*
* The interval (how often to repeat) in the actual host schedule.
* See pmap_schedule() for gcd() explanation.
*/
host_interval_in_sched = gcd(qh->host_interval,
DWC2_HS_SCHEDULE_UFRAMES);
/*
* We always try to find space in the low speed schedule first, then
* try to find high speed time that matches. If we don't, we'll bump
* up the place we start searching in the low speed schedule and try
* again. To start we'll look right at the beginning of the low speed
* schedule.
*
* Note that this will tend to front-load the high speed schedule.
* We may eventually want to try to avoid this by either considering
* both schedules together or doing some sort of round robin.
*/
ls_search_slice = 0;
while (ls_search_slice < DWC2_LS_SCHEDULE_SLICES) {
int start_s_uframe;
int ssplit_s_uframe;
int second_s_uframe;
int rel_uframe;
int first_count;
int middle_count;
int end_count;
int first_data_bytes;
int other_data_bytes;
int i;
if (qh->schedule_low_speed) {
err = dwc2_ls_pmap_schedule(hsotg, qh, ls_search_slice);
/*
* If we got an error here there's no other magic we
* can do, so bail. All the looping above is only
* helpful to redo things if we got a low speed slot
* and then couldn't find a matching high speed slot.
*/
if (err)
return err;
} else {
/* Must be missing the tt structure? Why? */
WARN_ON_ONCE(1);
}
/*
* This will give us a number 0 - 7 if
* DWC2_LS_SCHEDULE_FRAMES == 1, or 0 - 15 if == 2, or ...
*/
start_s_uframe = qh->ls_start_schedule_slice /
DWC2_SLICES_PER_UFRAME;
/* Get a number that's always 0 - 7 */
rel_uframe = (start_s_uframe % 8);
/*
* If we were going to start in uframe 7 then we would need to
* issue a start split in uframe 6, which spec says is not OK.
* Move on to the next full frame (assuming there is one).
*
* See 11.18.4 Host Split Transaction Scheduling Requirements
* bullet 1.
*/
if (rel_uframe == 7) {
if (qh->schedule_low_speed)
dwc2_ls_pmap_unschedule(hsotg, qh);
ls_search_slice =
(qh->ls_start_schedule_slice /
DWC2_LS_PERIODIC_SLICES_PER_FRAME + 1) *
DWC2_LS_PERIODIC_SLICES_PER_FRAME;
continue;
}
/*
* For ISOC in:
* - start split (frame -1)
* - complete split w/ data (frame +1)
* - complete split w/ data (frame +2)
* - ...
* - complete split w/ data (frame +num_data_packets)
* - complete split w/ data (frame +num_data_packets+1)
* - complete split w/ data (frame +num_data_packets+2, max 8)
* ...though if frame was "0" then max is 7...
*
* For ISOC out we might need to do:
* - start split w/ data (frame -1)
* - start split w/ data (frame +0)
* - ...
* - start split w/ data (frame +num_data_packets-2)
*
* For INTERRUPT in we might need to do:
* - start split (frame -1)
* - complete split w/ data (frame +1)
* - complete split w/ data (frame +2)
* - complete split w/ data (frame +3, max 8)
*
* For INTERRUPT out we might need to do:
* - start split w/ data (frame -1)
* - complete split (frame +1)
* - complete split (frame +2)
* - complete split (frame +3, max 8)
*
* Start adjusting!
*/
ssplit_s_uframe = (start_s_uframe +
host_interval_in_sched - 1) %
host_interval_in_sched;
if (qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in)
second_s_uframe = start_s_uframe;
else
second_s_uframe = start_s_uframe + 1;
/* First data transfer might not be all 188 bytes. */
first_data_bytes = 188 -
DIV_ROUND_UP(188 * (qh->ls_start_schedule_slice %
DWC2_SLICES_PER_UFRAME),
DWC2_SLICES_PER_UFRAME);
if (first_data_bytes > bytecount)
first_data_bytes = bytecount;
other_data_bytes = bytecount - first_data_bytes;
/*
* For now, skip OUT xfers where first xfer is partial
*
* Main dwc2 code assumes:
* - INT transfers never get split in two.
* - ISOC transfers can always transfer 188 bytes the first
* time.
*
* Until that code is fixed, try again if the first transfer
* couldn't transfer everything.
*
* This code can be removed if/when the rest of dwc2 handles
* the above cases. Until it's fixed we just won't be able
* to schedule quite as tightly.
*/
if (!qh->ep_is_in &&
(first_data_bytes != min_t(int, 188, bytecount))) {
dwc2_sch_dbg(hsotg,
"QH=%p avoiding broken 1st xfer (%d, %d)\n",
qh, first_data_bytes, bytecount);
if (qh->schedule_low_speed)
dwc2_ls_pmap_unschedule(hsotg, qh);
ls_search_slice = (start_s_uframe + 1) *
DWC2_SLICES_PER_UFRAME;
continue;
}
/* Start by assuming transfers for the bytes */
qh->num_hs_transfers = 1 + DIV_ROUND_UP(other_data_bytes, 188);
/*
* Everything except ISOC OUT has extra transfers. Rules are
* complicated. See 11.18.4 Host Split Transaction Scheduling
* Requirements bullet 3.
*/
if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
if (rel_uframe == 6)
qh->num_hs_transfers += 2;
else
qh->num_hs_transfers += 3;
if (qh->ep_is_in) {
/*
* First is start split, middle/end is data.
* Allocate full data bytes for all data.
*/
first_count = 4;
middle_count = bytecount;
end_count = bytecount;
} else {
/*
* First is data, middle/end is complete.
* First transfer and second can have data.
* Rest should just have complete split.
*/
first_count = first_data_bytes;
middle_count = max_t(int, 4, other_data_bytes);
end_count = 4;
}
} else {
if (qh->ep_is_in) {
int last;
/* Account for the start split */
qh->num_hs_transfers++;
/* Calculate "L" value from spec */
last = rel_uframe + qh->num_hs_transfers + 1;
/* Start with basic case */
if (last <= 6)
qh->num_hs_transfers += 2;
else
qh->num_hs_transfers += 1;
/* Adjust downwards */
if (last >= 6 && rel_uframe == 0)
qh->num_hs_transfers--;
/* 1st = start; rest can contain data */
first_count = 4;
middle_count = min_t(int, 188, bytecount);
end_count = middle_count;
} else {
/* All contain data, last might be smaller */
first_count = first_data_bytes;
middle_count = min_t(int, 188,
other_data_bytes);
end_count = other_data_bytes % 188;
}
}
/* Assign durations per uFrame */
qh->hs_transfers[0].duration_us = HS_USECS_ISO(first_count);
for (i = 1; i < qh->num_hs_transfers - 1; i++)
qh->hs_transfers[i].duration_us =
HS_USECS_ISO(middle_count);
if (qh->num_hs_transfers > 1)
qh->hs_transfers[qh->num_hs_transfers - 1].duration_us =
HS_USECS_ISO(end_count);
/*
* Assign start us. The call below to dwc2_hs_pmap_schedule()
* will start with these numbers but may adjust within the same
* microframe.
*/
qh->hs_transfers[0].start_schedule_us =
ssplit_s_uframe * DWC2_HS_PERIODIC_US_PER_UFRAME;
for (i = 1; i < qh->num_hs_transfers; i++)
qh->hs_transfers[i].start_schedule_us =
((second_s_uframe + i - 1) %
DWC2_HS_SCHEDULE_UFRAMES) *
DWC2_HS_PERIODIC_US_PER_UFRAME;
/* Try to schedule with filled in hs_transfers above */
for (i = 0; i < qh->num_hs_transfers; i++) {
err = dwc2_hs_pmap_schedule(hsotg, qh, true, i);
if (err)
break;
}
/* If we scheduled all w/out breaking out then we're all good */
if (i == qh->num_hs_transfers)
break;
for (; i >= 0; i--)
dwc2_hs_pmap_unschedule(hsotg, qh, i);
if (qh->schedule_low_speed)
dwc2_ls_pmap_unschedule(hsotg, qh);
/* Try again starting in the next microframe */
ls_search_slice = (start_s_uframe + 1) * DWC2_SLICES_PER_UFRAME;
}
if (ls_search_slice >= DWC2_LS_SCHEDULE_SLICES)
return -ENOSPC;
return 0;
}
/**
* dwc2_uframe_schedule_hs - Schedule a QH for a periodic high speed xfer.
*
* Basically this just wraps dwc2_hs_pmap_schedule() to provide a clean
* interface.
*
* @hsotg: The HCD state structure for the DWC OTG controller.
* @qh: QH for the periodic transfer.
*/
static int dwc2_uframe_schedule_hs(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
/* In non-split host and device time are the same */
WARN_ON(qh->host_us != qh->device_us);
WARN_ON(qh->host_interval != qh->device_interval);
WARN_ON(qh->num_hs_transfers != 1);
/* We'll have one transfer; init start to 0 before calling scheduler */
qh->hs_transfers[0].start_schedule_us = 0;
qh->hs_transfers[0].duration_us = qh->host_us;
return dwc2_hs_pmap_schedule(hsotg, qh, false, 0);
}
/**
* dwc2_uframe_schedule_ls - Schedule a QH for a periodic low/full speed xfer.
*
* Basically this just wraps dwc2_ls_pmap_schedule() to provide a clean
* interface.
*
* @hsotg: The HCD state structure for the DWC OTG controller.
* @qh: QH for the periodic transfer.
*/
static int dwc2_uframe_schedule_ls(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
/* In non-split host and device time are the same */
WARN_ON(qh->host_us != qh->device_us);
WARN_ON(qh->host_interval != qh->device_interval);
WARN_ON(!qh->schedule_low_speed);
/* Run on the main low speed schedule (no split = no hub = no TT) */
return dwc2_ls_pmap_schedule(hsotg, qh, 0);
}
/**
* dwc2_uframe_schedule - Schedule a QH for a periodic xfer.
*
* Calls one of the 3 sub-function depending on what type of transfer this QH
* is for. Also adds some printing.
*
* @hsotg: The HCD state structure for the DWC OTG controller.
* @qh: QH for the periodic transfer.
*/
static int dwc2_uframe_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
int ret;
if (qh->dev_speed == USB_SPEED_HIGH)
ret = dwc2_uframe_schedule_hs(hsotg, qh);
else if (!qh->do_split)
ret = dwc2_uframe_schedule_ls(hsotg, qh);
else
ret = dwc2_uframe_schedule_split(hsotg, qh);
if (ret)
dwc2_sch_dbg(hsotg, "QH=%p Failed to schedule %d\n", qh, ret);
else
dwc2_qh_schedule_print(hsotg, qh);
return ret;
}
/**
* dwc2_uframe_unschedule - Undoes dwc2_uframe_schedule().
*
* @hsotg: The HCD state structure for the DWC OTG controller.
* @qh: QH for the periodic transfer.
*/
static void dwc2_uframe_unschedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
int i;
for (i = 0; i < qh->num_hs_transfers; i++)
dwc2_hs_pmap_unschedule(hsotg, qh, i);
if (qh->schedule_low_speed)
dwc2_ls_pmap_unschedule(hsotg, qh);
dwc2_sch_dbg(hsotg, "QH=%p Unscheduled\n", qh);
}
/**
* dwc2_pick_first_frame() - Choose 1st frame for qh that's already scheduled
*
* Takes a qh that has already been scheduled (which means we know we have the
* bandwdith reserved for us) and set the next_active_frame and the
* start_active_frame.
*
* This is expected to be called on qh's that weren't previously actively
* running. It just picks the next frame that we can fit into without any
* thought about the past.
*
* @hsotg: The HCD state structure for the DWC OTG controller
* @qh: QH for a periodic endpoint
*
*/
static void dwc2_pick_first_frame(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
u16 frame_number;
u16 earliest_frame;
u16 next_active_frame;
u16 relative_frame;
u16 interval;
/*
* Use the real frame number rather than the cached value as of the
* last SOF to give us a little extra slop.
*/
frame_number = dwc2_hcd_get_frame_number(hsotg);
/*
* We wouldn't want to start any earlier than the next frame just in
* case the frame number ticks as we're doing this calculation.
*
* NOTE: if we could quantify how long till we actually get scheduled
* we might be able to avoid the "+ 1" by looking at the upper part of
* HFNUM (the FRREM field). For now we'll just use the + 1 though.
*/
earliest_frame = dwc2_frame_num_inc(frame_number, 1);
next_active_frame = earliest_frame;
/* Get the "no microframe scheduler" out of the way... */
if (!hsotg->params.uframe_sched) {
if (qh->do_split)
/* Splits are active at microframe 0 minus 1 */
next_active_frame |= 0x7;
goto exit;
}
if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
/*
* We're either at high speed or we're doing a split (which
* means we're talking high speed to a hub). In any case
* the first frame should be based on when the first scheduled
* event is.
*/
WARN_ON(qh->num_hs_transfers < 1);
relative_frame = qh->hs_transfers[0].start_schedule_us /
DWC2_HS_PERIODIC_US_PER_UFRAME;
/* Adjust interval as per high speed schedule */
interval = gcd(qh->host_interval, DWC2_HS_SCHEDULE_UFRAMES);
} else {
/*
* Low or full speed directly on dwc2. Just about the same
* as high speed but on a different schedule and with slightly
* different adjustments. Note that this works because when
* the host and device are both low speed then frames in the
* controller tick at low speed.
*/
relative_frame = qh->ls_start_schedule_slice /
DWC2_LS_PERIODIC_SLICES_PER_FRAME;
interval = gcd(qh->host_interval, DWC2_LS_SCHEDULE_FRAMES);
}
/* Scheduler messed up if frame is past interval */
WARN_ON(relative_frame >= interval);
/*
* We know interval must divide (HFNUM_MAX_FRNUM + 1) now that we've
* done the gcd(), so it's safe to move to the beginning of the current
* interval like this.
*
* After this we might be before earliest_frame, but don't worry,
* we'll fix it...
*/
next_active_frame = (next_active_frame / interval) * interval;
/*
* Actually choose to start at the frame number we've been
* scheduled for.
*/
next_active_frame = dwc2_frame_num_inc(next_active_frame,
relative_frame);
/*
* We actually need 1 frame before since the next_active_frame is
* the frame number we'll be put on the ready list and we won't be on
* the bus until 1 frame later.
*/
next_active_frame = dwc2_frame_num_dec(next_active_frame, 1);
/*
* By now we might actually be before the earliest_frame. Let's move
* up intervals until we're not.
*/
while (dwc2_frame_num_gt(earliest_frame, next_active_frame))
next_active_frame = dwc2_frame_num_inc(next_active_frame,
interval);
exit:
qh->next_active_frame = next_active_frame;
qh->start_active_frame = next_active_frame;
dwc2_sch_vdbg(hsotg, "QH=%p First fn=%04x nxt=%04x\n",
qh, frame_number, qh->next_active_frame);
}
/**
* dwc2_do_reserve() - Make a periodic reservation
*
* Try to allocate space in the periodic schedule. Depending on parameters
* this might use the microframe scheduler or the dumb scheduler.
*
* @hsotg: The HCD state structure for the DWC OTG controller
* @qh: QH for the periodic transfer.
*
* Returns: 0 upon success; error upon failure.
*/
static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
int status;
if (hsotg->params.uframe_sched) {
status = dwc2_uframe_schedule(hsotg, qh);
} else {
status = dwc2_periodic_channel_available(hsotg);
if (status) {
dev_info(hsotg->dev,
"%s: No host channel available for periodic transfer\n",
__func__);
return status;
}
status = dwc2_check_periodic_bandwidth(hsotg, qh);
}
if (status) {
dev_dbg(hsotg->dev,
"%s: Insufficient periodic bandwidth for periodic transfer\n",
__func__);
return status;
}
if (!hsotg->params.uframe_sched)
/* Reserve periodic channel */
hsotg->periodic_channels++;
/* Update claimed usecs per (micro)frame */
hsotg->periodic_usecs += qh->host_us;
dwc2_pick_first_frame(hsotg, qh);
return 0;
}
/**
* dwc2_do_unreserve() - Actually release the periodic reservation
*
* This function actually releases the periodic bandwidth that was reserved
* by the given qh.
*
* @hsotg: The HCD state structure for the DWC OTG controller
* @qh: QH for the periodic transfer.
*/
static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
assert_spin_locked(&hsotg->lock);
WARN_ON(!qh->unreserve_pending);
/* No more unreserve pending--we're doing it */
qh->unreserve_pending = false;
if (WARN_ON(!list_empty(&qh->qh_list_entry)))
list_del_init(&qh->qh_list_entry);
/* Update claimed usecs per (micro)frame */
hsotg->periodic_usecs -= qh->host_us;
if (hsotg->params.uframe_sched) {
dwc2_uframe_unschedule(hsotg, qh);
} else {
/* Release periodic channel reservation */
hsotg->periodic_channels--;
}
}
/**
* dwc2_unreserve_timer_fn() - Timer function to release periodic reservation
*
* According to the kernel doc for usb_submit_urb() (specifically the part about
* "Reserved Bandwidth Transfers"), we need to keep a reservation active as
* long as a device driver keeps submitting. Since we're using HCD_BH to give
* back the URB we need to give the driver a little bit of time before we
* release the reservation. This worker is called after the appropriate
* delay.
*
* @t: Address to a qh unreserve_work.
*/
static void dwc2_unreserve_timer_fn(struct timer_list *t)
{
struct dwc2_qh *qh = from_timer(qh, t, unreserve_timer);
struct dwc2_hsotg *hsotg = qh->hsotg;
unsigned long flags;
/*
* Wait for the lock, or for us to be scheduled again. We
* could be scheduled again if:
* - We started executing but didn't get the lock yet.
* - A new reservation came in, but cancel didn't take effect
* because we already started executing.
* - The timer has been kicked again.
* In that case cancel and wait for the next call.
*/
while (!spin_trylock_irqsave(&hsotg->lock, flags)) {
if (timer_pending(&qh->unreserve_timer))
return;
}
/*
* Might be no more unreserve pending if:
* - We started executing but didn't get the lock yet.
* - A new reservation came in, but cancel didn't take effect
* because we already started executing.
*
* We can't put this in the loop above because unreserve_pending needs
* to be accessed under lock, so we can only check it once we got the
* lock.
*/
if (qh->unreserve_pending)
dwc2_do_unreserve(hsotg, qh);
spin_unlock_irqrestore(&hsotg->lock, flags);
}
/**
* dwc2_check_max_xfer_size() - Checks that the max transfer size allowed in a
* host channel is large enough to handle the maximum data transfer in a single
* (micro)frame for a periodic transfer
*
* @hsotg: The HCD state structure for the DWC OTG controller
* @qh: QH for a periodic endpoint
*
* Return: 0 if successful, negative error code otherwise
*/
static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh)
{
u32 max_xfer_size;
u32 max_channel_xfer_size;
int status = 0;
max_xfer_size = qh->maxp * qh->maxp_mult;
max_channel_xfer_size = hsotg->params.max_transfer_size;
if (max_xfer_size > max_channel_xfer_size) {
dev_err(hsotg->dev,
"%s: Periodic xfer length %d > max xfer length for channel %d\n",
__func__, max_xfer_size, max_channel_xfer_size);
status = -ENOSPC;
}
return status;
}
/**
* dwc2_schedule_periodic() - Schedules an interrupt or isochronous transfer in
* the periodic schedule
*
* @hsotg: The HCD state structure for the DWC OTG controller
* @qh: QH for the periodic transfer. The QH should already contain the
* scheduling information.
*
* Return: 0 if successful, negative error code otherwise
*/
static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
int status;
status = dwc2_check_max_xfer_size(hsotg, qh);
if (status) {
dev_dbg(hsotg->dev,
"%s: Channel max transfer size too small for periodic transfer\n",
__func__);
return status;
}
/* Cancel pending unreserve; if canceled OK, unreserve was pending */
if (del_timer(&qh->unreserve_timer))
WARN_ON(!qh->unreserve_pending);
/*
* Only need to reserve if there's not an unreserve pending, since if an
* unreserve is pending then by definition our old reservation is still
* valid. Unreserve might still be pending even if we didn't cancel if
* dwc2_unreserve_timer_fn() already started. Code in the timer handles
* that case.
*/
if (!qh->unreserve_pending) {
status = dwc2_do_reserve(hsotg, qh);
if (status)
return status;
} else {
/*
* It might have been a while, so make sure that frame_number
* is still good. Note: we could also try to use the similar
* dwc2_next_periodic_start() but that schedules much more
* tightly and we might need to hurry and queue things up.
*/
if (dwc2_frame_num_le(qh->next_active_frame,
hsotg->frame_number))
dwc2_pick_first_frame(hsotg, qh);
}
qh->unreserve_pending = 0;
if (hsotg->params.dma_desc_enable)
/* Don't rely on SOF and start in ready schedule */
list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
else
/* Always start in inactive schedule */
list_add_tail(&qh->qh_list_entry,
&hsotg->periodic_sched_inactive);
return 0;
}
/**
* dwc2_deschedule_periodic() - Removes an interrupt or isochronous transfer
* from the periodic schedule
*
* @hsotg: The HCD state structure for the DWC OTG controller
* @qh: QH for the periodic transfer
*/
static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh)
{
bool did_modify;
assert_spin_locked(&hsotg->lock);
/*
* Schedule the unreserve to happen in a little bit. Cases here:
* - Unreserve worker might be sitting there waiting to grab the lock.
* In this case it will notice it's been schedule again and will
* quit.
* - Unreserve worker might not be scheduled.
*
* We should never already be scheduled since dwc2_schedule_periodic()
* should have canceled the scheduled unreserve timer (hence the
* warning on did_modify).
*
* We add + 1 to the timer to guarantee that at least 1 jiffy has
* passed (otherwise if the jiffy counter might tick right after we
* read it and we'll get no delay).
*/
did_modify = mod_timer(&qh->unreserve_timer,
jiffies + DWC2_UNRESERVE_DELAY + 1);
WARN_ON(did_modify);
qh->unreserve_pending = 1;
list_del_init(&qh->qh_list_entry);
}
/**
* dwc2_wait_timer_fn() - Timer function to re-queue after waiting
*
* As per the spec, a NAK indicates that "a function is temporarily unable to
* transmit or receive data, but will eventually be able to do so without need
* of host intervention".
*
* That means that when we encounter a NAK we're supposed to retry.
*
* ...but if we retry right away (from the interrupt handler that saw the NAK)
* then we can end up with an interrupt storm (if the other side keeps NAKing
* us) because on slow enough CPUs it could take us longer to get out of the
* interrupt routine than it takes for the device to send another NAK. That
* leads to a constant stream of NAK interrupts and the CPU locks.
*
* ...so instead of retrying right away in the case of a NAK we'll set a timer
* to retry some time later. This function handles that timer and moves the
* qh back to the "inactive" list, then queues transactions.
*
* @t: Pointer to wait_timer in a qh.
*
* Return: HRTIMER_NORESTART to not automatically restart this timer.
*/
static enum hrtimer_restart dwc2_wait_timer_fn(struct hrtimer *t)
{
struct dwc2_qh *qh = container_of(t, struct dwc2_qh, wait_timer);
struct dwc2_hsotg *hsotg = qh->hsotg;
unsigned long flags;
spin_lock_irqsave(&hsotg->lock, flags);
/*
* We'll set wait_timer_cancel to true if we want to cancel this
* operation in dwc2_hcd_qh_unlink().
*/
if (!qh->wait_timer_cancel) {
enum dwc2_transaction_type tr_type;
qh->want_wait = false;
list_move(&qh->qh_list_entry,
&hsotg->non_periodic_sched_inactive);
tr_type = dwc2_hcd_select_transactions(hsotg);
if (tr_type != DWC2_TRANSACTION_NONE)
dwc2_hcd_queue_transactions(hsotg, tr_type);
}
spin_unlock_irqrestore(&hsotg->lock, flags);
return HRTIMER_NORESTART;
}
/**
* dwc2_qh_init() - Initializes a QH structure
*
* @hsotg: The HCD state structure for the DWC OTG controller
* @qh: The QH to init
* @urb: Holds the information about the device/endpoint needed to initialize
* the QH
* @mem_flags: Flags for allocating memory.
*/
static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
struct dwc2_hcd_urb *urb, gfp_t mem_flags)
{
int dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
u8 ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
bool ep_is_in = !!dwc2_hcd_is_pipe_in(&urb->pipe_info);
bool ep_is_isoc = (ep_type == USB_ENDPOINT_XFER_ISOC);
bool ep_is_int = (ep_type == USB_ENDPOINT_XFER_INT);
u32 hprt = dwc2_readl(hsotg, HPRT0);
u32 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
bool do_split = (prtspd == HPRT0_SPD_HIGH_SPEED &&
dev_speed != USB_SPEED_HIGH);
int maxp = dwc2_hcd_get_maxp(&urb->pipe_info);
int maxp_mult = dwc2_hcd_get_maxp_mult(&urb->pipe_info);
int bytecount = maxp_mult * maxp;
char *speed, *type;
/* Initialize QH */
qh->hsotg = hsotg;
timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
hrtimer_init(&qh->wait_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
qh->wait_timer.function = &dwc2_wait_timer_fn;
qh->ep_type = ep_type;
qh->ep_is_in = ep_is_in;
qh->data_toggle = DWC2_HC_PID_DATA0;
qh->maxp = maxp;
qh->maxp_mult = maxp_mult;
INIT_LIST_HEAD(&qh->qtd_list);
INIT_LIST_HEAD(&qh->qh_list_entry);
qh->do_split = do_split;
qh->dev_speed = dev_speed;
if (ep_is_int || ep_is_isoc) {
/* Compute scheduling parameters once and save them */
int host_speed = do_split ? USB_SPEED_HIGH : dev_speed;
struct dwc2_tt *dwc_tt = dwc2_host_get_tt_info(hsotg, urb->priv,
mem_flags,
&qh->ttport);
int device_ns;
qh->dwc_tt = dwc_tt;
qh->host_us = NS_TO_US(usb_calc_bus_time(host_speed, ep_is_in,
ep_is_isoc, bytecount));
device_ns = usb_calc_bus_time(dev_speed, ep_is_in,
ep_is_isoc, bytecount);
if (do_split && dwc_tt)
device_ns += dwc_tt->usb_tt->think_time;
qh->device_us = NS_TO_US(device_ns);
qh->device_interval = urb->interval;
qh->host_interval = urb->interval * (do_split ? 8 : 1);
/*
* Schedule low speed if we're running the host in low or
* full speed OR if we've got a "TT" to deal with to access this
* device.
*/
qh->schedule_low_speed = prtspd != HPRT0_SPD_HIGH_SPEED ||
dwc_tt;
if (do_split) {
/* We won't know num transfers until we schedule */
qh->num_hs_transfers = -1;
} else if (dev_speed == USB_SPEED_HIGH) {
qh->num_hs_transfers = 1;
} else {
qh->num_hs_transfers = 0;
}
/* We'll schedule later when we have something to do */
}
switch (dev_speed) {
case USB_SPEED_LOW:
speed = "low";
break;
case USB_SPEED_FULL:
speed = "full";
break;
case USB_SPEED_HIGH:
speed = "high";
break;
default:
speed = "?";
break;
}
switch (qh->ep_type) {
case USB_ENDPOINT_XFER_ISOC:
type = "isochronous";
break;
case USB_ENDPOINT_XFER_INT:
type = "interrupt";
break;
case USB_ENDPOINT_XFER_CONTROL:
type = "control";
break;
case USB_ENDPOINT_XFER_BULK:
type = "bulk";
break;
default:
type = "?";
break;
}
dwc2_sch_dbg(hsotg, "QH=%p Init %s, %s speed, %d bytes:\n", qh, type,
speed, bytecount);
dwc2_sch_dbg(hsotg, "QH=%p ...addr=%d, ep=%d, %s\n", qh,
dwc2_hcd_get_dev_addr(&urb->pipe_info),
dwc2_hcd_get_ep_num(&urb->pipe_info),
ep_is_in ? "IN" : "OUT");
if (ep_is_int || ep_is_isoc) {
dwc2_sch_dbg(hsotg,
"QH=%p ...duration: host=%d us, device=%d us\n",
qh, qh->host_us, qh->device_us);
dwc2_sch_dbg(hsotg, "QH=%p ...interval: host=%d, device=%d\n",
qh, qh->host_interval, qh->device_interval);
if (qh->schedule_low_speed)
dwc2_sch_dbg(hsotg, "QH=%p ...low speed schedule=%p\n",
qh, dwc2_get_ls_map(hsotg, qh));
}
}
/**
* dwc2_hcd_qh_create() - Allocates and initializes a QH
*
* @hsotg: The HCD state structure for the DWC OTG controller
* @urb: Holds the information about the device/endpoint needed
* to initialize the QH
* @mem_flags: Flags for allocating memory.
*
* Return: Pointer to the newly allocated QH, or NULL on error
*/
struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
struct dwc2_hcd_urb *urb,
gfp_t mem_flags)
{
struct dwc2_qh *qh;
if (!urb->priv)
return NULL;
/* Allocate memory */
qh = kzalloc(sizeof(*qh), mem_flags);
if (!qh)
return NULL;
dwc2_qh_init(hsotg, qh, urb, mem_flags);
if (hsotg->params.dma_desc_enable &&
dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
dwc2_hcd_qh_free(hsotg, qh);
return NULL;
}
return qh;
}
/**
* dwc2_hcd_qh_free() - Frees the QH
*
* @hsotg: HCD instance
* @qh: The QH to free
*
* QH should already be removed from the list. QTD list should already be empty
* if called from URB Dequeue.
*
* Must NOT be called with interrupt disabled or spinlock held
*/
void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
/* Make sure any unreserve work is finished. */
if (del_timer_sync(&qh->unreserve_timer)) {
unsigned long flags;
spin_lock_irqsave(&hsotg->lock, flags);
dwc2_do_unreserve(hsotg, qh);
spin_unlock_irqrestore(&hsotg->lock, flags);
}
/*
* We don't have the lock so we can safely wait until the wait timer
* finishes. Of course, at this point in time we'd better have set
* wait_timer_active to false so if this timer was still pending it
* won't do anything anyway, but we want it to finish before we free
* memory.
*/
hrtimer_cancel(&qh->wait_timer);
dwc2_host_put_tt_info(hsotg, qh->dwc_tt);
if (qh->desc_list)
dwc2_hcd_qh_free_ddma(hsotg, qh);
else if (hsotg->unaligned_cache && qh->dw_align_buf)
kmem_cache_free(hsotg->unaligned_cache, qh->dw_align_buf);
kfree(qh);
}
/**
* dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic
* schedule if it is not already in the schedule. If the QH is already in
* the schedule, no action is taken.
*
* @hsotg: The HCD state structure for the DWC OTG controller
* @qh: The QH to add
*
* Return: 0 if successful, negative error code otherwise
*/
int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
int status;
u32 intr_mask;
ktime_t delay;
if (dbg_qh(qh))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
if (!list_empty(&qh->qh_list_entry))
/* QH already in a schedule */
return 0;
/* Add the new QH to the appropriate schedule */
if (dwc2_qh_is_non_per(qh)) {
/* Schedule right away */
qh->start_active_frame = hsotg->frame_number;
qh->next_active_frame = qh->start_active_frame;
if (qh->want_wait) {
list_add_tail(&qh->qh_list_entry,
&hsotg->non_periodic_sched_waiting);
qh->wait_timer_cancel = false;
delay = ktime_set(0, DWC2_RETRY_WAIT_DELAY);
hrtimer_start(&qh->wait_timer, delay, HRTIMER_MODE_REL);
} else {
list_add_tail(&qh->qh_list_entry,
&hsotg->non_periodic_sched_inactive);
}
return 0;
}
status = dwc2_schedule_periodic(hsotg, qh);
if (status)
return status;
if (!hsotg->periodic_qh_count) {
intr_mask = dwc2_readl(hsotg, GINTMSK);
intr_mask |= GINTSTS_SOF;
dwc2_writel(hsotg, intr_mask, GINTMSK);
}
hsotg->periodic_qh_count++;
return 0;
}
/**
* dwc2_hcd_qh_unlink() - Removes a QH from either the non-periodic or periodic
* schedule. Memory is not freed.
*
* @hsotg: The HCD state structure
* @qh: QH to remove from schedule
*/
void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
u32 intr_mask;
dev_vdbg(hsotg->dev, "%s()\n", __func__);
/* If the wait_timer is pending, this will stop it from acting */
qh->wait_timer_cancel = true;
if (list_empty(&qh->qh_list_entry))
/* QH is not in a schedule */
return;
if (dwc2_qh_is_non_per(qh)) {
if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry)
hsotg->non_periodic_qh_ptr =
hsotg->non_periodic_qh_ptr->next;
list_del_init(&qh->qh_list_entry);
return;
}
dwc2_deschedule_periodic(hsotg, qh);
hsotg->periodic_qh_count--;
if (!hsotg->periodic_qh_count &&
!hsotg->params.dma_desc_enable) {
intr_mask = dwc2_readl(hsotg, GINTMSK);
intr_mask &= ~GINTSTS_SOF;
dwc2_writel(hsotg, intr_mask, GINTMSK);
}
}
/**
* dwc2_next_for_periodic_split() - Set next_active_frame midway thru a split.
*
* This is called for setting next_active_frame for periodic splits for all but
* the first packet of the split. Confusing? I thought so...
*
* Periodic splits are single low/full speed transfers that we end up splitting
* up into several high speed transfers. They always fit into one full (1 ms)
* frame but might be split over several microframes (125 us each). We to put
* each of the parts on a very specific high speed frame.
*
* This function figures out where the next active uFrame needs to be.
*
* @hsotg: The HCD state structure
* @qh: QH for the periodic transfer.
* @frame_number: The current frame number.
*
* Return: number missed by (or 0 if we didn't miss).
*/
static int dwc2_next_for_periodic_split(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh, u16 frame_number)
{
u16 old_frame = qh->next_active_frame;
u16 prev_frame_number = dwc2_frame_num_dec(frame_number, 1);
int missed = 0;
u16 incr;
/*
* See dwc2_uframe_schedule_split() for split scheduling.
*
* Basically: increment 1 normally, but 2 right after the start split
* (except for ISOC out).
*/
if (old_frame == qh->start_active_frame &&
!(qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in))
incr = 2;
else
incr = 1;
qh->next_active_frame = dwc2_frame_num_inc(old_frame, incr);
/*
* Note that it's OK for frame_number to be 1 frame past
* next_active_frame. Remember that next_active_frame is supposed to
* be 1 frame _before_ when we want to be scheduled. If we're 1 frame
* past it just means schedule ASAP.
*
* It's _not_ OK, however, if we're more than one frame past.
*/
if (dwc2_frame_num_gt(prev_frame_number, qh->next_active_frame)) {
/*
* OOPS, we missed. That's actually pretty bad since
* the hub will be unhappy; try ASAP I guess.
*/
missed = dwc2_frame_num_dec(prev_frame_number,
qh->next_active_frame);
qh->next_active_frame = frame_number;
}
return missed;
}
/**
* dwc2_next_periodic_start() - Set next_active_frame for next transfer start
*
* This is called for setting next_active_frame for a periodic transfer for
* all cases other than midway through a periodic split. This will also update
* start_active_frame.
*
* Since we _always_ keep start_active_frame as the start of the previous
* transfer this is normally pretty easy: we just add our interval to
* start_active_frame and we've got our answer.
*
* The tricks come into play if we miss. In that case we'll look for the next
* slot we can fit into.
*
* @hsotg: The HCD state structure
* @qh: QH for the periodic transfer.
* @frame_number: The current frame number.
*
* Return: number missed by (or 0 if we didn't miss).
*/
static int dwc2_next_periodic_start(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh, u16 frame_number)
{
int missed = 0;
u16 interval = qh->host_interval;
u16 prev_frame_number = dwc2_frame_num_dec(frame_number, 1);
qh->start_active_frame = dwc2_frame_num_inc(qh->start_active_frame,
interval);
/*
* The dwc2_frame_num_gt() function used below won't work terribly well
* with if we just incremented by a really large intervals since the
* frame counter only goes to 0x3fff. It's terribly unlikely that we
* will have missed in this case anyway. Just go to exit. If we want
* to try to do better we'll need to keep track of a bigger counter
* somewhere in the driver and handle overflows.
*/
if (interval >= 0x1000)
goto exit;
/*
* Test for misses, which is when it's too late to schedule.
*
* A few things to note:
* - We compare against prev_frame_number since start_active_frame
* and next_active_frame are always 1 frame before we want things
* to be active and we assume we can still get scheduled in the
* current frame number.
* - It's possible for start_active_frame (now incremented) to be
* next_active_frame if we got an EO MISS (even_odd miss) which
* basically means that we detected there wasn't enough time for
* the last packet and dwc2_hc_set_even_odd_frame() rescheduled us
* at the last second. We want to make sure we don't schedule
* another transfer for the same frame. My test webcam doesn't seem
* terribly upset by missing a transfer but really doesn't like when
* we do two transfers in the same frame.
* - Some misses are expected. Specifically, in order to work
* perfectly dwc2 really needs quite spectacular interrupt latency
* requirements. It needs to be able to handle its interrupts
* completely within 125 us of them being asserted. That not only
* means that the dwc2 interrupt handler needs to be fast but it
* means that nothing else in the system has to block dwc2 for a long
* time. We can help with the dwc2 parts of this, but it's hard to
* guarantee that a system will have interrupt latency < 125 us, so
* we have to be robust to some misses.
*/
if (qh->start_active_frame == qh->next_active_frame ||
dwc2_frame_num_gt(prev_frame_number, qh->start_active_frame)) {
u16 ideal_start = qh->start_active_frame;
int periods_in_map;
/*
* Adjust interval as per gcd with map size.
* See pmap_schedule() for more details here.
*/
if (qh->do_split || qh->dev_speed == USB_SPEED_HIGH)
periods_in_map = DWC2_HS_SCHEDULE_UFRAMES;
else
periods_in_map = DWC2_LS_SCHEDULE_FRAMES;
interval = gcd(interval, periods_in_map);
do {
qh->start_active_frame = dwc2_frame_num_inc(
qh->start_active_frame, interval);
} while (dwc2_frame_num_gt(prev_frame_number,
qh->start_active_frame));
missed = dwc2_frame_num_dec(qh->start_active_frame,
ideal_start);
}
exit:
qh->next_active_frame = qh->start_active_frame;
return missed;
}
/*
* Deactivates a QH. For non-periodic QHs, removes the QH from the active
* non-periodic schedule. The QH is added to the inactive non-periodic
* schedule if any QTDs are still attached to the QH.
*
* For periodic QHs, the QH is removed from the periodic queued schedule. If
* there are any QTDs still attached to the QH, the QH is added to either the
* periodic inactive schedule or the periodic ready schedule and its next
* scheduled frame is calculated. The QH is placed in the ready schedule if
* the scheduled frame has been reached already. Otherwise it's placed in the
* inactive schedule. If there are no QTDs attached to the QH, the QH is
* completely removed from the periodic schedule.
*/
void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
int sched_next_periodic_split)
{
u16 old_frame = qh->next_active_frame;
u16 frame_number;
int missed;
if (dbg_qh(qh))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
if (dwc2_qh_is_non_per(qh)) {
dwc2_hcd_qh_unlink(hsotg, qh);
if (!list_empty(&qh->qtd_list))
/* Add back to inactive/waiting non-periodic schedule */
dwc2_hcd_qh_add(hsotg, qh);
return;
}
/*
* Use the real frame number rather than the cached value as of the
* last SOF just to get us a little closer to reality. Note that
* means we don't actually know if we've already handled the SOF
* interrupt for this frame.
*/
frame_number = dwc2_hcd_get_frame_number(hsotg);
if (sched_next_periodic_split)
missed = dwc2_next_for_periodic_split(hsotg, qh, frame_number);
else
missed = dwc2_next_periodic_start(hsotg, qh, frame_number);
dwc2_sch_vdbg(hsotg,
"QH=%p next(%d) fn=%04x, sch=%04x=>%04x (%+d) miss=%d %s\n",
qh, sched_next_periodic_split, frame_number, old_frame,
qh->next_active_frame,
dwc2_frame_num_dec(qh->next_active_frame, old_frame),
missed, missed ? "MISS" : "");
if (list_empty(&qh->qtd_list)) {
dwc2_hcd_qh_unlink(hsotg, qh);
return;
}
/*
* Remove from periodic_sched_queued and move to
* appropriate queue
*
* Note: we purposely use the frame_number from the "hsotg" structure
* since we know SOF interrupt will handle future frames.
*/
if (dwc2_frame_num_le(qh->next_active_frame, hsotg->frame_number))
list_move_tail(&qh->qh_list_entry,
&hsotg->periodic_sched_ready);
else
list_move_tail(&qh->qh_list_entry,
&hsotg->periodic_sched_inactive);
}
/**
* dwc2_hcd_qtd_init() - Initializes a QTD structure
*
* @qtd: The QTD to initialize
* @urb: The associated URB
*/
void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
{
qtd->urb = urb;
if (dwc2_hcd_get_pipe_type(&urb->pipe_info) ==
USB_ENDPOINT_XFER_CONTROL) {
/*
* The only time the QTD data toggle is used is on the data
* phase of control transfers. This phase always starts with
* DATA1.
*/
qtd->data_toggle = DWC2_HC_PID_DATA1;
qtd->control_phase = DWC2_CONTROL_SETUP;
}
/* Start split */
qtd->complete_split = 0;
qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
qtd->isoc_split_offset = 0;
qtd->in_process = 0;
/* Store the qtd ptr in the urb to reference the QTD */
urb->qtd = qtd;
}
/**
* dwc2_hcd_qtd_add() - Adds a QTD to the QTD-list of a QH
* Caller must hold driver lock.
*
* @hsotg: The DWC HCD structure
* @qtd: The QTD to add
* @qh: Queue head to add qtd to
*
* Return: 0 if successful, negative error code otherwise
*
* If the QH to which the QTD is added is not currently scheduled, it is placed
* into the proper schedule based on its EP type.
*/
int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
struct dwc2_qh *qh)
{
int retval;
if (unlikely(!qh)) {
dev_err(hsotg->dev, "%s: Invalid QH\n", __func__);
retval = -EINVAL;
goto fail;
}
retval = dwc2_hcd_qh_add(hsotg, qh);
if (retval)
goto fail;
qtd->qh = qh;
list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list);
return 0;
fail:
return retval;
}
| linux-master | drivers/usb/dwc2/hcd_queue.c |
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* core_intr.c - DesignWare HS OTG Controller common interrupt handling
*
* Copyright (C) 2004-2013 Synopsys, Inc.
*/
/*
* This file contains the common interrupt handlers
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/ch11.h>
#include "core.h"
#include "hcd.h"
static const char *dwc2_op_state_str(struct dwc2_hsotg *hsotg)
{
switch (hsotg->op_state) {
case OTG_STATE_A_HOST:
return "a_host";
case OTG_STATE_A_SUSPEND:
return "a_suspend";
case OTG_STATE_A_PERIPHERAL:
return "a_peripheral";
case OTG_STATE_B_PERIPHERAL:
return "b_peripheral";
case OTG_STATE_B_HOST:
return "b_host";
default:
return "unknown";
}
}
/**
* dwc2_handle_usb_port_intr - handles OTG PRTINT interrupts.
* When the PRTINT interrupt fires, there are certain status bits in the Host
* Port that needs to get cleared.
*
* @hsotg: Programming view of DWC_otg controller
*/
static void dwc2_handle_usb_port_intr(struct dwc2_hsotg *hsotg)
{
u32 hprt0 = dwc2_readl(hsotg, HPRT0);
if (hprt0 & HPRT0_ENACHG) {
hprt0 &= ~HPRT0_ENA;
dwc2_writel(hsotg, hprt0, HPRT0);
}
}
/**
* dwc2_handle_mode_mismatch_intr() - Logs a mode mismatch warning message
*
* @hsotg: Programming view of DWC_otg controller
*/
static void dwc2_handle_mode_mismatch_intr(struct dwc2_hsotg *hsotg)
{
/* Clear interrupt */
dwc2_writel(hsotg, GINTSTS_MODEMIS, GINTSTS);
dev_warn(hsotg->dev, "Mode Mismatch Interrupt: currently in %s mode\n",
dwc2_is_host_mode(hsotg) ? "Host" : "Device");
}
/**
* dwc2_handle_otg_intr() - Handles the OTG Interrupts. It reads the OTG
* Interrupt Register (GOTGINT) to determine what interrupt has occurred.
*
* @hsotg: Programming view of DWC_otg controller
*/
static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg)
{
u32 gotgint;
u32 gotgctl;
u32 gintmsk;
gotgint = dwc2_readl(hsotg, GOTGINT);
gotgctl = dwc2_readl(hsotg, GOTGCTL);
dev_dbg(hsotg->dev, "++OTG Interrupt gotgint=%0x [%s]\n", gotgint,
dwc2_op_state_str(hsotg));
if (gotgint & GOTGINT_SES_END_DET) {
dev_dbg(hsotg->dev,
" ++OTG Interrupt: Session End Detected++ (%s)\n",
dwc2_op_state_str(hsotg));
gotgctl = dwc2_readl(hsotg, GOTGCTL);
if (dwc2_is_device_mode(hsotg))
dwc2_hsotg_disconnect(hsotg);
if (hsotg->op_state == OTG_STATE_B_HOST) {
hsotg->op_state = OTG_STATE_B_PERIPHERAL;
} else {
/*
* If not B_HOST and Device HNP still set, HNP did
* not succeed!
*/
if (gotgctl & GOTGCTL_DEVHNPEN) {
dev_dbg(hsotg->dev, "Session End Detected\n");
dev_err(hsotg->dev,
"Device Not Connected/Responding!\n");
}
/*
* If Session End Detected the B-Cable has been
* disconnected
*/
/* Reset to a clean state */
hsotg->lx_state = DWC2_L0;
}
gotgctl = dwc2_readl(hsotg, GOTGCTL);
gotgctl &= ~GOTGCTL_DEVHNPEN;
dwc2_writel(hsotg, gotgctl, GOTGCTL);
}
if (gotgint & GOTGINT_SES_REQ_SUC_STS_CHNG) {
dev_dbg(hsotg->dev,
" ++OTG Interrupt: Session Request Success Status Change++\n");
gotgctl = dwc2_readl(hsotg, GOTGCTL);
if (gotgctl & GOTGCTL_SESREQSCS) {
if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS &&
hsotg->params.i2c_enable) {
hsotg->srp_success = 1;
} else {
/* Clear Session Request */
gotgctl = dwc2_readl(hsotg, GOTGCTL);
gotgctl &= ~GOTGCTL_SESREQ;
dwc2_writel(hsotg, gotgctl, GOTGCTL);
}
}
}
if (gotgint & GOTGINT_HST_NEG_SUC_STS_CHNG) {
/*
* Print statements during the HNP interrupt handling
* can cause it to fail
*/
gotgctl = dwc2_readl(hsotg, GOTGCTL);
/*
* WA for 3.00a- HW is not setting cur_mode, even sometimes
* this does not help
*/
if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_3_00a)
udelay(100);
if (gotgctl & GOTGCTL_HSTNEGSCS) {
if (dwc2_is_host_mode(hsotg)) {
hsotg->op_state = OTG_STATE_B_HOST;
/*
* Need to disable SOF interrupt immediately.
* When switching from device to host, the PCD
* interrupt handler won't handle the interrupt
* if host mode is already set. The HCD
* interrupt handler won't get called if the
* HCD state is HALT. This means that the
* interrupt does not get handled and Linux
* complains loudly.
*/
gintmsk = dwc2_readl(hsotg, GINTMSK);
gintmsk &= ~GINTSTS_SOF;
dwc2_writel(hsotg, gintmsk, GINTMSK);
/*
* Call callback function with spin lock
* released
*/
spin_unlock(&hsotg->lock);
/* Initialize the Core for Host mode */
dwc2_hcd_start(hsotg);
spin_lock(&hsotg->lock);
hsotg->op_state = OTG_STATE_B_HOST;
}
} else {
gotgctl = dwc2_readl(hsotg, GOTGCTL);
gotgctl &= ~(GOTGCTL_HNPREQ | GOTGCTL_DEVHNPEN);
dwc2_writel(hsotg, gotgctl, GOTGCTL);
dev_dbg(hsotg->dev, "HNP Failed\n");
dev_err(hsotg->dev,
"Device Not Connected/Responding\n");
}
}
if (gotgint & GOTGINT_HST_NEG_DET) {
/*
* The disconnect interrupt is set at the same time as
* Host Negotiation Detected. During the mode switch all
* interrupts are cleared so the disconnect interrupt
* handler will not get executed.
*/
dev_dbg(hsotg->dev,
" ++OTG Interrupt: Host Negotiation Detected++ (%s)\n",
(dwc2_is_host_mode(hsotg) ? "Host" : "Device"));
if (dwc2_is_device_mode(hsotg)) {
dev_dbg(hsotg->dev, "a_suspend->a_peripheral (%d)\n",
hsotg->op_state);
spin_unlock(&hsotg->lock);
dwc2_hcd_disconnect(hsotg, false);
spin_lock(&hsotg->lock);
hsotg->op_state = OTG_STATE_A_PERIPHERAL;
} else {
/* Need to disable SOF interrupt immediately */
gintmsk = dwc2_readl(hsotg, GINTMSK);
gintmsk &= ~GINTSTS_SOF;
dwc2_writel(hsotg, gintmsk, GINTMSK);
spin_unlock(&hsotg->lock);
dwc2_hcd_start(hsotg);
spin_lock(&hsotg->lock);
hsotg->op_state = OTG_STATE_A_HOST;
}
}
if (gotgint & GOTGINT_A_DEV_TOUT_CHG)
dev_dbg(hsotg->dev,
" ++OTG Interrupt: A-Device Timeout Change++\n");
if (gotgint & GOTGINT_DBNCE_DONE)
dev_dbg(hsotg->dev, " ++OTG Interrupt: Debounce Done++\n");
/* Clear GOTGINT */
dwc2_writel(hsotg, gotgint, GOTGINT);
}
/**
* dwc2_handle_conn_id_status_change_intr() - Handles the Connector ID Status
* Change Interrupt
*
* @hsotg: Programming view of DWC_otg controller
*
* Reads the OTG Interrupt Register (GOTCTL) to determine whether this is a
* Device to Host Mode transition or a Host to Device Mode transition. This only
* occurs when the cable is connected/removed from the PHY connector.
*/
static void dwc2_handle_conn_id_status_change_intr(struct dwc2_hsotg *hsotg)
{
u32 gintmsk;
/* Clear interrupt */
dwc2_writel(hsotg, GINTSTS_CONIDSTSCHNG, GINTSTS);
/* Need to disable SOF interrupt immediately */
gintmsk = dwc2_readl(hsotg, GINTMSK);
gintmsk &= ~GINTSTS_SOF;
dwc2_writel(hsotg, gintmsk, GINTMSK);
dev_dbg(hsotg->dev, " ++Connector ID Status Change Interrupt++ (%s)\n",
dwc2_is_host_mode(hsotg) ? "Host" : "Device");
/*
* Need to schedule a work, as there are possible DELAY function calls.
*/
if (hsotg->wq_otg)
queue_work(hsotg->wq_otg, &hsotg->wf_otg);
}
/**
* dwc2_handle_session_req_intr() - This interrupt indicates that a device is
* initiating the Session Request Protocol to request the host to turn on bus
* power so a new session can begin
*
* @hsotg: Programming view of DWC_otg controller
*
* This handler responds by turning on bus power. If the DWC_otg controller is
* in low power mode, this handler brings the controller out of low power mode
* before turning on bus power.
*/
static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
{
int ret;
u32 hprt0;
/* Clear interrupt */
dwc2_writel(hsotg, GINTSTS_SESSREQINT, GINTSTS);
dev_dbg(hsotg->dev, "Session request interrupt - lx_state=%d\n",
hsotg->lx_state);
if (dwc2_is_device_mode(hsotg)) {
if (hsotg->lx_state == DWC2_L2) {
if (hsotg->in_ppd) {
ret = dwc2_exit_partial_power_down(hsotg, 0,
true);
if (ret)
dev_err(hsotg->dev,
"exit power_down failed\n");
}
/* Exit gadget mode clock gating. */
if (hsotg->params.power_down ==
DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
dwc2_gadget_exit_clock_gating(hsotg, 0);
}
/*
* Report disconnect if there is any previous session
* established
*/
dwc2_hsotg_disconnect(hsotg);
} else {
/* Turn on the port power bit. */
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 |= HPRT0_PWR;
dwc2_writel(hsotg, hprt0, HPRT0);
/* Connect hcd after port power is set. */
dwc2_hcd_connect(hsotg);
}
}
/**
* dwc2_wakeup_from_lpm_l1 - Exit the device from LPM L1 state
*
* @hsotg: Programming view of DWC_otg controller
*
*/
static void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg)
{
u32 glpmcfg;
u32 i = 0;
if (hsotg->lx_state != DWC2_L1) {
dev_err(hsotg->dev, "Core isn't in DWC2_L1 state\n");
return;
}
glpmcfg = dwc2_readl(hsotg, GLPMCFG);
if (dwc2_is_device_mode(hsotg)) {
dev_dbg(hsotg->dev, "Exit from L1 state\n");
glpmcfg &= ~GLPMCFG_ENBLSLPM;
glpmcfg &= ~GLPMCFG_HIRD_THRES_EN;
dwc2_writel(hsotg, glpmcfg, GLPMCFG);
do {
glpmcfg = dwc2_readl(hsotg, GLPMCFG);
if (!(glpmcfg & (GLPMCFG_COREL1RES_MASK |
GLPMCFG_L1RESUMEOK | GLPMCFG_SLPSTS)))
break;
udelay(1);
} while (++i < 200);
if (i == 200) {
dev_err(hsotg->dev, "Failed to exit L1 sleep state in 200us.\n");
return;
}
dwc2_gadget_init_lpm(hsotg);
} else {
/* TODO */
dev_err(hsotg->dev, "Host side LPM is not supported.\n");
return;
}
/* Change to L0 state */
hsotg->lx_state = DWC2_L0;
/* Inform gadget to exit from L1 */
call_gadget(hsotg, resume);
}
/*
* This interrupt indicates that the DWC_otg controller has detected a
* resume or remote wakeup sequence. If the DWC_otg controller is in
* low power mode, the handler must brings the controller out of low
* power mode. The controller automatically begins resume signaling.
* The handler schedules a time to stop resume signaling.
*/
static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
{
int ret;
/* Clear interrupt */
dwc2_writel(hsotg, GINTSTS_WKUPINT, GINTSTS);
dev_dbg(hsotg->dev, "++Resume or Remote Wakeup Detected Interrupt++\n");
dev_dbg(hsotg->dev, "%s lxstate = %d\n", __func__, hsotg->lx_state);
if (hsotg->lx_state == DWC2_L1) {
dwc2_wakeup_from_lpm_l1(hsotg);
return;
}
if (dwc2_is_device_mode(hsotg)) {
dev_dbg(hsotg->dev, "DSTS=0x%0x\n",
dwc2_readl(hsotg, DSTS));
if (hsotg->lx_state == DWC2_L2) {
if (hsotg->in_ppd) {
u32 dctl = dwc2_readl(hsotg, DCTL);
/* Clear Remote Wakeup Signaling */
dctl &= ~DCTL_RMTWKUPSIG;
dwc2_writel(hsotg, dctl, DCTL);
ret = dwc2_exit_partial_power_down(hsotg, 1,
true);
if (ret)
dev_err(hsotg->dev,
"exit partial_power_down failed\n");
call_gadget(hsotg, resume);
}
/* Exit gadget mode clock gating. */
if (hsotg->params.power_down ==
DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
dwc2_gadget_exit_clock_gating(hsotg, 0);
} else {
/* Change to L0 state */
hsotg->lx_state = DWC2_L0;
}
} else {
if (hsotg->lx_state == DWC2_L2) {
if (hsotg->in_ppd) {
ret = dwc2_exit_partial_power_down(hsotg, 1,
true);
if (ret)
dev_err(hsotg->dev,
"exit partial_power_down failed\n");
}
if (hsotg->params.power_down ==
DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
dwc2_host_exit_clock_gating(hsotg, 1);
/*
* If we've got this quirk then the PHY is stuck upon
* wakeup. Assert reset. This will propagate out and
* eventually we'll re-enumerate the device. Not great
* but the best we can do. We can't call phy_reset()
* at interrupt time but there's no hurry, so we'll
* schedule it for later.
*/
if (hsotg->reset_phy_on_wake)
dwc2_host_schedule_phy_reset(hsotg);
mod_timer(&hsotg->wkp_timer,
jiffies + msecs_to_jiffies(71));
} else {
/* Change to L0 state */
hsotg->lx_state = DWC2_L0;
}
}
}
/*
* This interrupt indicates that a device has been disconnected from the
* root port
*/
static void dwc2_handle_disconnect_intr(struct dwc2_hsotg *hsotg)
{
dwc2_writel(hsotg, GINTSTS_DISCONNINT, GINTSTS);
dev_dbg(hsotg->dev, "++Disconnect Detected Interrupt++ (%s) %s\n",
dwc2_is_host_mode(hsotg) ? "Host" : "Device",
dwc2_op_state_str(hsotg));
if (hsotg->op_state == OTG_STATE_A_HOST)
dwc2_hcd_disconnect(hsotg, false);
}
/*
* This interrupt indicates that SUSPEND state has been detected on the USB.
*
* For HNP the USB Suspend interrupt signals the change from "a_peripheral"
* to "a_host".
*
* When power management is enabled the core will be put in low power mode.
*/
static void dwc2_handle_usb_suspend_intr(struct dwc2_hsotg *hsotg)
{
u32 dsts;
int ret;
/* Clear interrupt */
dwc2_writel(hsotg, GINTSTS_USBSUSP, GINTSTS);
dev_dbg(hsotg->dev, "USB SUSPEND\n");
if (dwc2_is_device_mode(hsotg)) {
/*
* Check the Device status register to determine if the Suspend
* state is active
*/
dsts = dwc2_readl(hsotg, DSTS);
dev_dbg(hsotg->dev, "%s: DSTS=0x%0x\n", __func__, dsts);
dev_dbg(hsotg->dev,
"DSTS.Suspend Status=%d HWCFG4.Power Optimize=%d HWCFG4.Hibernation=%d\n",
!!(dsts & DSTS_SUSPSTS),
hsotg->hw_params.power_optimized,
hsotg->hw_params.hibernation);
/* Ignore suspend request before enumeration */
if (!dwc2_is_device_connected(hsotg)) {
dev_dbg(hsotg->dev,
"ignore suspend request before enumeration\n");
return;
}
if (dsts & DSTS_SUSPSTS) {
switch (hsotg->params.power_down) {
case DWC2_POWER_DOWN_PARAM_PARTIAL:
ret = dwc2_enter_partial_power_down(hsotg);
if (ret)
dev_err(hsotg->dev,
"enter partial_power_down failed\n");
udelay(100);
/* Ask phy to be suspended */
if (!IS_ERR_OR_NULL(hsotg->uphy))
usb_phy_set_suspend(hsotg->uphy, true);
break;
case DWC2_POWER_DOWN_PARAM_HIBERNATION:
ret = dwc2_enter_hibernation(hsotg, 0);
if (ret)
dev_err(hsotg->dev,
"enter hibernation failed\n");
break;
case DWC2_POWER_DOWN_PARAM_NONE:
/*
* If neither hibernation nor partial power down are supported,
* clock gating is used to save power.
*/
if (!hsotg->params.no_clock_gating)
dwc2_gadget_enter_clock_gating(hsotg);
}
/*
* Change to L2 (suspend) state before releasing
* spinlock
*/
hsotg->lx_state = DWC2_L2;
/* Call gadget suspend callback */
call_gadget(hsotg, suspend);
}
} else {
if (hsotg->op_state == OTG_STATE_A_PERIPHERAL) {
dev_dbg(hsotg->dev, "a_peripheral->a_host\n");
/* Change to L2 (suspend) state */
hsotg->lx_state = DWC2_L2;
/* Clear the a_peripheral flag, back to a_host */
spin_unlock(&hsotg->lock);
dwc2_hcd_start(hsotg);
spin_lock(&hsotg->lock);
hsotg->op_state = OTG_STATE_A_HOST;
}
}
}
/**
* dwc2_handle_lpm_intr - GINTSTS_LPMTRANRCVD Interrupt handler
*
* @hsotg: Programming view of DWC_otg controller
*
*/
static void dwc2_handle_lpm_intr(struct dwc2_hsotg *hsotg)
{
u32 glpmcfg;
u32 pcgcctl;
u32 hird;
u32 hird_thres;
u32 hird_thres_en;
u32 enslpm;
/* Clear interrupt */
dwc2_writel(hsotg, GINTSTS_LPMTRANRCVD, GINTSTS);
glpmcfg = dwc2_readl(hsotg, GLPMCFG);
if (!(glpmcfg & GLPMCFG_LPMCAP)) {
dev_err(hsotg->dev, "Unexpected LPM interrupt\n");
return;
}
hird = (glpmcfg & GLPMCFG_HIRD_MASK) >> GLPMCFG_HIRD_SHIFT;
hird_thres = (glpmcfg & GLPMCFG_HIRD_THRES_MASK &
~GLPMCFG_HIRD_THRES_EN) >> GLPMCFG_HIRD_THRES_SHIFT;
hird_thres_en = glpmcfg & GLPMCFG_HIRD_THRES_EN;
enslpm = glpmcfg & GLPMCFG_ENBLSLPM;
if (dwc2_is_device_mode(hsotg)) {
dev_dbg(hsotg->dev, "HIRD_THRES_EN = %d\n", hird_thres_en);
if (hird_thres_en && hird >= hird_thres) {
dev_dbg(hsotg->dev, "L1 with utmi_l1_suspend_n\n");
} else if (enslpm) {
dev_dbg(hsotg->dev, "L1 with utmi_sleep_n\n");
} else {
dev_dbg(hsotg->dev, "Entering Sleep with L1 Gating\n");
pcgcctl = dwc2_readl(hsotg, PCGCTL);
pcgcctl |= PCGCTL_ENBL_SLEEP_GATING;
dwc2_writel(hsotg, pcgcctl, PCGCTL);
}
/**
* Examine prt_sleep_sts after TL1TokenTetry period max (10 us)
*/
udelay(10);
glpmcfg = dwc2_readl(hsotg, GLPMCFG);
if (glpmcfg & GLPMCFG_SLPSTS) {
/* Save the current state */
hsotg->lx_state = DWC2_L1;
dev_dbg(hsotg->dev,
"Core is in L1 sleep glpmcfg=%08x\n", glpmcfg);
/* Inform gadget that we are in L1 state */
call_gadget(hsotg, suspend);
}
}
}
#define GINTMSK_COMMON (GINTSTS_WKUPINT | GINTSTS_SESSREQINT | \
GINTSTS_CONIDSTSCHNG | GINTSTS_OTGINT | \
GINTSTS_MODEMIS | GINTSTS_DISCONNINT | \
GINTSTS_USBSUSP | GINTSTS_PRTINT | \
GINTSTS_LPMTRANRCVD)
/*
* This function returns the Core Interrupt register
*/
static u32 dwc2_read_common_intr(struct dwc2_hsotg *hsotg)
{
u32 gintsts;
u32 gintmsk;
u32 gahbcfg;
u32 gintmsk_common = GINTMSK_COMMON;
gintsts = dwc2_readl(hsotg, GINTSTS);
gintmsk = dwc2_readl(hsotg, GINTMSK);
gahbcfg = dwc2_readl(hsotg, GAHBCFG);
/* If any common interrupts set */
if (gintsts & gintmsk_common)
dev_dbg(hsotg->dev, "gintsts=%08x gintmsk=%08x\n",
gintsts, gintmsk);
if (gahbcfg & GAHBCFG_GLBL_INTR_EN)
return gintsts & gintmsk & gintmsk_common;
else
return 0;
}
/**
* dwc_handle_gpwrdn_disc_det() - Handles the gpwrdn disconnect detect.
* Exits hibernation without restoring registers.
*
* @hsotg: Programming view of DWC_otg controller
* @gpwrdn: GPWRDN register
*/
static inline void dwc_handle_gpwrdn_disc_det(struct dwc2_hsotg *hsotg,
u32 gpwrdn)
{
u32 gpwrdn_tmp;
/* Switch-on voltage to the core */
gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
gpwrdn_tmp &= ~GPWRDN_PWRDNSWTCH;
dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
udelay(5);
/* Reset core */
gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
gpwrdn_tmp &= ~GPWRDN_PWRDNRSTN;
dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
udelay(5);
/* Disable Power Down Clamp */
gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
gpwrdn_tmp &= ~GPWRDN_PWRDNCLMP;
dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
udelay(5);
/* Deassert reset core */
gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
gpwrdn_tmp |= GPWRDN_PWRDNRSTN;
dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
udelay(5);
/* Disable PMU interrupt */
gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
gpwrdn_tmp &= ~GPWRDN_PMUINTSEL;
dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
/* De-assert Wakeup Logic */
gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
gpwrdn_tmp &= ~GPWRDN_PMUACTV;
dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
hsotg->hibernated = 0;
hsotg->bus_suspended = 0;
if (gpwrdn & GPWRDN_IDSTS) {
hsotg->op_state = OTG_STATE_B_PERIPHERAL;
dwc2_core_init(hsotg, false);
dwc2_enable_global_interrupts(hsotg);
dwc2_hsotg_core_init_disconnected(hsotg, false);
dwc2_hsotg_core_connect(hsotg);
} else {
hsotg->op_state = OTG_STATE_A_HOST;
/* Initialize the Core for Host mode */
dwc2_core_init(hsotg, false);
dwc2_enable_global_interrupts(hsotg);
dwc2_hcd_start(hsotg);
}
}
/*
* GPWRDN interrupt handler.
*
* The GPWRDN interrupts are those that occur in both Host and
* Device mode while core is in hibernated state.
*/
static int dwc2_handle_gpwrdn_intr(struct dwc2_hsotg *hsotg)
{
u32 gpwrdn;
int linestate;
int ret = 0;
gpwrdn = dwc2_readl(hsotg, GPWRDN);
/* clear all interrupt */
dwc2_writel(hsotg, gpwrdn, GPWRDN);
linestate = (gpwrdn & GPWRDN_LINESTATE_MASK) >> GPWRDN_LINESTATE_SHIFT;
dev_dbg(hsotg->dev,
"%s: dwc2_handle_gpwrdwn_intr called gpwrdn= %08x\n", __func__,
gpwrdn);
if ((gpwrdn & GPWRDN_DISCONN_DET) &&
(gpwrdn & GPWRDN_DISCONN_DET_MSK) && !linestate) {
dev_dbg(hsotg->dev, "%s: GPWRDN_DISCONN_DET\n", __func__);
/*
* Call disconnect detect function to exit from
* hibernation
*/
dwc_handle_gpwrdn_disc_det(hsotg, gpwrdn);
} else if ((gpwrdn & GPWRDN_LNSTSCHG) &&
(gpwrdn & GPWRDN_LNSTSCHG_MSK) && linestate) {
dev_dbg(hsotg->dev, "%s: GPWRDN_LNSTSCHG\n", __func__);
if (hsotg->hw_params.hibernation &&
hsotg->hibernated) {
if (gpwrdn & GPWRDN_IDSTS) {
ret = dwc2_exit_hibernation(hsotg, 0, 0, 0);
if (ret)
dev_err(hsotg->dev,
"exit hibernation failed.\n");
call_gadget(hsotg, resume);
} else {
ret = dwc2_exit_hibernation(hsotg, 1, 0, 1);
if (ret)
dev_err(hsotg->dev,
"exit hibernation failed.\n");
}
}
} else if ((gpwrdn & GPWRDN_RST_DET) &&
(gpwrdn & GPWRDN_RST_DET_MSK)) {
dev_dbg(hsotg->dev, "%s: GPWRDN_RST_DET\n", __func__);
if (!linestate) {
ret = dwc2_exit_hibernation(hsotg, 0, 1, 0);
if (ret)
dev_err(hsotg->dev,
"exit hibernation failed.\n");
}
} else if ((gpwrdn & GPWRDN_STS_CHGINT) &&
(gpwrdn & GPWRDN_STS_CHGINT_MSK)) {
dev_dbg(hsotg->dev, "%s: GPWRDN_STS_CHGINT\n", __func__);
/*
* As GPWRDN_STS_CHGINT exit from hibernation flow is
* the same as in GPWRDN_DISCONN_DET flow. Call
* disconnect detect helper function to exit from
* hibernation.
*/
dwc_handle_gpwrdn_disc_det(hsotg, gpwrdn);
}
return ret;
}
/*
* Common interrupt handler
*
* The common interrupts are those that occur in both Host and Device mode.
* This handler handles the following interrupts:
* - Mode Mismatch Interrupt
* - OTG Interrupt
* - Connector ID Status Change Interrupt
* - Disconnect Interrupt
* - Session Request Interrupt
* - Resume / Remote Wakeup Detected Interrupt
* - Suspend Interrupt
*/
irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
{
struct dwc2_hsotg *hsotg = dev;
u32 gintsts;
irqreturn_t retval = IRQ_NONE;
spin_lock(&hsotg->lock);
if (!dwc2_is_controller_alive(hsotg)) {
dev_warn(hsotg->dev, "Controller is dead\n");
goto out;
}
/* Reading current frame number value in device or host modes. */
if (dwc2_is_device_mode(hsotg))
hsotg->frame_number = (dwc2_readl(hsotg, DSTS)
& DSTS_SOFFN_MASK) >> DSTS_SOFFN_SHIFT;
else
hsotg->frame_number = (dwc2_readl(hsotg, HFNUM)
& HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT;
gintsts = dwc2_read_common_intr(hsotg);
if (gintsts & ~GINTSTS_PRTINT)
retval = IRQ_HANDLED;
/* In case of hibernated state gintsts must not work */
if (hsotg->hibernated) {
dwc2_handle_gpwrdn_intr(hsotg);
retval = IRQ_HANDLED;
goto out;
}
if (gintsts & GINTSTS_MODEMIS)
dwc2_handle_mode_mismatch_intr(hsotg);
if (gintsts & GINTSTS_OTGINT)
dwc2_handle_otg_intr(hsotg);
if (gintsts & GINTSTS_CONIDSTSCHNG)
dwc2_handle_conn_id_status_change_intr(hsotg);
if (gintsts & GINTSTS_DISCONNINT)
dwc2_handle_disconnect_intr(hsotg);
if (gintsts & GINTSTS_SESSREQINT)
dwc2_handle_session_req_intr(hsotg);
if (gintsts & GINTSTS_WKUPINT)
dwc2_handle_wakeup_detected_intr(hsotg);
if (gintsts & GINTSTS_USBSUSP)
dwc2_handle_usb_suspend_intr(hsotg);
if (gintsts & GINTSTS_LPMTRANRCVD)
dwc2_handle_lpm_intr(hsotg);
if (gintsts & GINTSTS_PRTINT) {
/*
* The port interrupt occurs while in device mode with HPRT0
* Port Enable/Disable
*/
if (dwc2_is_device_mode(hsotg)) {
dev_dbg(hsotg->dev,
" --Port interrupt received in Device mode--\n");
dwc2_handle_usb_port_intr(hsotg);
retval = IRQ_HANDLED;
}
}
out:
spin_unlock(&hsotg->lock);
return retval;
}
| linux-master | drivers/usb/dwc2/core_intr.c |
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* hcd.c - DesignWare HS OTG Controller host-mode routines
*
* Copyright (C) 2004-2013 Synopsys, Inc.
*/
/*
* This file contains the core HCD code, and implements the Linux hc_driver
* API
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/ch11.h>
#include <linux/usb/of.h>
#include "core.h"
#include "hcd.h"
/*
* =========================================================================
* Host Core Layer Functions
* =========================================================================
*/
/**
* dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
* used in both device and host modes
*
* @hsotg: Programming view of the DWC_otg controller
*/
static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
{
u32 intmsk;
/* Clear any pending OTG Interrupts */
dwc2_writel(hsotg, 0xffffffff, GOTGINT);
/* Clear any pending interrupts */
dwc2_writel(hsotg, 0xffffffff, GINTSTS);
/* Enable the interrupts in the GINTMSK */
intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
if (!hsotg->params.host_dma)
intmsk |= GINTSTS_RXFLVL;
if (!hsotg->params.external_id_pin_ctl)
intmsk |= GINTSTS_CONIDSTSCHNG;
intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP |
GINTSTS_SESSREQINT;
if (dwc2_is_device_mode(hsotg) && hsotg->params.lpm)
intmsk |= GINTSTS_LPMTRANRCVD;
dwc2_writel(hsotg, intmsk, GINTMSK);
}
static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
{
u32 ahbcfg = dwc2_readl(hsotg, GAHBCFG);
switch (hsotg->hw_params.arch) {
case GHWCFG2_EXT_DMA_ARCH:
dev_err(hsotg->dev, "External DMA Mode not supported\n");
return -EINVAL;
case GHWCFG2_INT_DMA_ARCH:
dev_dbg(hsotg->dev, "Internal DMA Mode\n");
if (hsotg->params.ahbcfg != -1) {
ahbcfg &= GAHBCFG_CTRL_MASK;
ahbcfg |= hsotg->params.ahbcfg &
~GAHBCFG_CTRL_MASK;
}
break;
case GHWCFG2_SLAVE_ONLY_ARCH:
default:
dev_dbg(hsotg->dev, "Slave Only Mode\n");
break;
}
if (hsotg->params.host_dma)
ahbcfg |= GAHBCFG_DMA_EN;
else
hsotg->params.dma_desc_enable = false;
dwc2_writel(hsotg, ahbcfg, GAHBCFG);
return 0;
}
static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
{
u32 usbcfg;
usbcfg = dwc2_readl(hsotg, GUSBCFG);
usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
switch (hsotg->hw_params.op_mode) {
case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
if (hsotg->params.otg_caps.hnp_support &&
hsotg->params.otg_caps.srp_support)
usbcfg |= GUSBCFG_HNPCAP;
fallthrough;
case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
if (hsotg->params.otg_caps.srp_support)
usbcfg |= GUSBCFG_SRPCAP;
break;
case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
default:
break;
}
dwc2_writel(hsotg, usbcfg, GUSBCFG);
}
static int dwc2_vbus_supply_init(struct dwc2_hsotg *hsotg)
{
if (hsotg->vbus_supply)
return regulator_enable(hsotg->vbus_supply);
return 0;
}
static int dwc2_vbus_supply_exit(struct dwc2_hsotg *hsotg)
{
if (hsotg->vbus_supply)
return regulator_disable(hsotg->vbus_supply);
return 0;
}
/**
* dwc2_enable_host_interrupts() - Enables the Host mode interrupts
*
* @hsotg: Programming view of DWC_otg controller
*/
static void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
{
u32 intmsk;
dev_dbg(hsotg->dev, "%s()\n", __func__);
/* Disable all interrupts */
dwc2_writel(hsotg, 0, GINTMSK);
dwc2_writel(hsotg, 0, HAINTMSK);
/* Enable the common interrupts */
dwc2_enable_common_interrupts(hsotg);
/* Enable host mode interrupts without disturbing common interrupts */
intmsk = dwc2_readl(hsotg, GINTMSK);
intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
dwc2_writel(hsotg, intmsk, GINTMSK);
}
/**
* dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
*
* @hsotg: Programming view of DWC_otg controller
*/
static void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
{
u32 intmsk = dwc2_readl(hsotg, GINTMSK);
/* Disable host mode interrupts without disturbing common interrupts */
intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT);
dwc2_writel(hsotg, intmsk, GINTMSK);
}
/*
* dwc2_calculate_dynamic_fifo() - Calculates the default fifo size
* For system that have a total fifo depth that is smaller than the default
* RX + TX fifo size.
*
* @hsotg: Programming view of DWC_otg controller
*/
static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *params = &hsotg->params;
struct dwc2_hw_params *hw = &hsotg->hw_params;
u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
total_fifo_size = hw->total_fifo_size;
rxfsiz = params->host_rx_fifo_size;
nptxfsiz = params->host_nperio_tx_fifo_size;
ptxfsiz = params->host_perio_tx_fifo_size;
/*
* Will use Method 2 defined in the DWC2 spec: minimum FIFO depth
* allocation with support for high bandwidth endpoints. Synopsys
* defines MPS(Max Packet size) for a periodic EP=1024, and for
* non-periodic as 512.
*/
if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) {
/*
* For Buffer DMA mode/Scatter Gather DMA mode
* 2 * ((Largest Packet size / 4) + 1 + 1) + n
* with n = number of host channel.
* 2 * ((1024/4) + 2) = 516
*/
rxfsiz = 516 + hw->host_channels;
/*
* min non-periodic tx fifo depth
* 2 * (largest non-periodic USB packet used / 4)
* 2 * (512/4) = 256
*/
nptxfsiz = 256;
/*
* min periodic tx fifo depth
* (largest packet size*MC)/4
* (1024 * 3)/4 = 768
*/
ptxfsiz = 768;
params->host_rx_fifo_size = rxfsiz;
params->host_nperio_tx_fifo_size = nptxfsiz;
params->host_perio_tx_fifo_size = ptxfsiz;
}
/*
* If the summation of RX, NPTX and PTX fifo sizes is still
* bigger than the total_fifo_size, then we have a problem.
*
* We won't be able to allocate as many endpoints. Right now,
* we're just printing an error message, but ideally this FIFO
* allocation algorithm would be improved in the future.
*
* FIXME improve this FIFO allocation algorithm.
*/
if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)))
dev_err(hsotg->dev, "invalid fifo sizes\n");
}
static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *params = &hsotg->params;
u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
if (!params->enable_dynamic_fifo)
return;
dwc2_calculate_dynamic_fifo(hsotg);
/* Rx FIFO */
grxfsiz = dwc2_readl(hsotg, GRXFSIZ);
dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
grxfsiz |= params->host_rx_fifo_size <<
GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
dwc2_writel(hsotg, grxfsiz, GRXFSIZ);
dev_dbg(hsotg->dev, "new grxfsiz=%08x\n",
dwc2_readl(hsotg, GRXFSIZ));
/* Non-periodic Tx FIFO */
dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
dwc2_readl(hsotg, GNPTXFSIZ));
nptxfsiz = params->host_nperio_tx_fifo_size <<
FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
nptxfsiz |= params->host_rx_fifo_size <<
FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
dwc2_writel(hsotg, nptxfsiz, GNPTXFSIZ);
dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
dwc2_readl(hsotg, GNPTXFSIZ));
/* Periodic Tx FIFO */
dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
dwc2_readl(hsotg, HPTXFSIZ));
hptxfsiz = params->host_perio_tx_fifo_size <<
FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
hptxfsiz |= (params->host_rx_fifo_size +
params->host_nperio_tx_fifo_size) <<
FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
dwc2_writel(hsotg, hptxfsiz, HPTXFSIZ);
dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
dwc2_readl(hsotg, HPTXFSIZ));
if (hsotg->params.en_multiple_tx_fifo &&
hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_91a) {
/*
* This feature was implemented in 2.91a version
* Global DFIFOCFG calculation for Host mode -
* include RxFIFO, NPTXFIFO and HPTXFIFO
*/
dfifocfg = dwc2_readl(hsotg, GDFIFOCFG);
dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
dfifocfg |= (params->host_rx_fifo_size +
params->host_nperio_tx_fifo_size +
params->host_perio_tx_fifo_size) <<
GDFIFOCFG_EPINFOBASE_SHIFT &
GDFIFOCFG_EPINFOBASE_MASK;
dwc2_writel(hsotg, dfifocfg, GDFIFOCFG);
}
}
/**
* dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
* the HFIR register according to PHY type and speed
*
* @hsotg: Programming view of DWC_otg controller
*
* NOTE: The caller can modify the value of the HFIR register only after the
* Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
* has been set
*/
u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
{
u32 usbcfg;
u32 hprt0;
int clock = 60; /* default value */
usbcfg = dwc2_readl(hsotg, GUSBCFG);
hprt0 = dwc2_readl(hsotg, HPRT0);
if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
!(usbcfg & GUSBCFG_PHYIF16))
clock = 60;
if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
clock = 48;
if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
!(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
clock = 30;
if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
!(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
clock = 60;
if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
!(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
clock = 48;
if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
clock = 48;
if ((usbcfg & GUSBCFG_PHYSEL) &&
hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
clock = 48;
if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
/* High speed case */
return 125 * clock - 1;
/* FS/LS case */
return 1000 * clock - 1;
}
/**
* dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
* buffer
*
* @hsotg: Programming view of DWC_otg controller
* @dest: Destination buffer for the packet
* @bytes: Number of bytes to copy to the destination
*/
void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
{
u32 *data_buf = (u32 *)dest;
int word_count = (bytes + 3) / 4;
int i;
/*
* Todo: Account for the case where dest is not dword aligned. This
* requires reading data from the FIFO into a u32 temp buffer, then
* moving it into the data buffer.
*/
dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
for (i = 0; i < word_count; i++, data_buf++)
*data_buf = dwc2_readl(hsotg, HCFIFO(0));
}
/**
* dwc2_dump_channel_info() - Prints the state of a host channel
*
* @hsotg: Programming view of DWC_otg controller
* @chan: Pointer to the channel to dump
*
* Must be called with interrupt disabled and spinlock held
*
* NOTE: This function will be removed once the peripheral controller code
* is integrated and the driver is stable
*/
static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan)
{
#ifdef VERBOSE_DEBUG
int num_channels = hsotg->params.host_channels;
struct dwc2_qh *qh;
u32 hcchar;
u32 hcsplt;
u32 hctsiz;
u32 hc_dma;
int i;
if (!chan)
return;
hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
hcsplt = dwc2_readl(hsotg, HCSPLT(chan->hc_num));
hctsiz = dwc2_readl(hsotg, HCTSIZ(chan->hc_num));
hc_dma = dwc2_readl(hsotg, HCDMA(chan->hc_num));
dev_dbg(hsotg->dev, " Assigned to channel %p:\n", chan);
dev_dbg(hsotg->dev, " hcchar 0x%08x, hcsplt 0x%08x\n",
hcchar, hcsplt);
dev_dbg(hsotg->dev, " hctsiz 0x%08x, hc_dma 0x%08x\n",
hctsiz, hc_dma);
dev_dbg(hsotg->dev, " dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
chan->dev_addr, chan->ep_num, chan->ep_is_in);
dev_dbg(hsotg->dev, " ep_type: %d\n", chan->ep_type);
dev_dbg(hsotg->dev, " max_packet: %d\n", chan->max_packet);
dev_dbg(hsotg->dev, " data_pid_start: %d\n", chan->data_pid_start);
dev_dbg(hsotg->dev, " xfer_started: %d\n", chan->xfer_started);
dev_dbg(hsotg->dev, " halt_status: %d\n", chan->halt_status);
dev_dbg(hsotg->dev, " xfer_buf: %p\n", chan->xfer_buf);
dev_dbg(hsotg->dev, " xfer_dma: %08lx\n",
(unsigned long)chan->xfer_dma);
dev_dbg(hsotg->dev, " xfer_len: %d\n", chan->xfer_len);
dev_dbg(hsotg->dev, " qh: %p\n", chan->qh);
dev_dbg(hsotg->dev, " NP inactive sched:\n");
list_for_each_entry(qh, &hsotg->non_periodic_sched_inactive,
qh_list_entry)
dev_dbg(hsotg->dev, " %p\n", qh);
dev_dbg(hsotg->dev, " NP waiting sched:\n");
list_for_each_entry(qh, &hsotg->non_periodic_sched_waiting,
qh_list_entry)
dev_dbg(hsotg->dev, " %p\n", qh);
dev_dbg(hsotg->dev, " NP active sched:\n");
list_for_each_entry(qh, &hsotg->non_periodic_sched_active,
qh_list_entry)
dev_dbg(hsotg->dev, " %p\n", qh);
dev_dbg(hsotg->dev, " Channels:\n");
for (i = 0; i < num_channels; i++) {
struct dwc2_host_chan *chan = hsotg->hc_ptr_array[i];
dev_dbg(hsotg->dev, " %2d: %p\n", i, chan);
}
#endif /* VERBOSE_DEBUG */
}
static int _dwc2_hcd_start(struct usb_hcd *hcd);
static void dwc2_host_start(struct dwc2_hsotg *hsotg)
{
struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
hcd->self.is_b_host = dwc2_hcd_is_b_host(hsotg);
_dwc2_hcd_start(hcd);
}
static void dwc2_host_disconnect(struct dwc2_hsotg *hsotg)
{
struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
hcd->self.is_b_host = 0;
}
static void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context,
int *hub_addr, int *hub_port)
{
struct urb *urb = context;
if (urb->dev->tt)
*hub_addr = urb->dev->tt->hub->devnum;
else
*hub_addr = 0;
*hub_port = urb->dev->ttport;
}
/*
* =========================================================================
* Low Level Host Channel Access Functions
* =========================================================================
*/
static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan)
{
u32 hcintmsk = HCINTMSK_CHHLTD;
switch (chan->ep_type) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_BULK:
dev_vdbg(hsotg->dev, "control/bulk\n");
hcintmsk |= HCINTMSK_XFERCOMPL;
hcintmsk |= HCINTMSK_STALL;
hcintmsk |= HCINTMSK_XACTERR;
hcintmsk |= HCINTMSK_DATATGLERR;
if (chan->ep_is_in) {
hcintmsk |= HCINTMSK_BBLERR;
} else {
hcintmsk |= HCINTMSK_NAK;
hcintmsk |= HCINTMSK_NYET;
if (chan->do_ping)
hcintmsk |= HCINTMSK_ACK;
}
if (chan->do_split) {
hcintmsk |= HCINTMSK_NAK;
if (chan->complete_split)
hcintmsk |= HCINTMSK_NYET;
else
hcintmsk |= HCINTMSK_ACK;
}
if (chan->error_state)
hcintmsk |= HCINTMSK_ACK;
break;
case USB_ENDPOINT_XFER_INT:
if (dbg_perio())
dev_vdbg(hsotg->dev, "intr\n");
hcintmsk |= HCINTMSK_XFERCOMPL;
hcintmsk |= HCINTMSK_NAK;
hcintmsk |= HCINTMSK_STALL;
hcintmsk |= HCINTMSK_XACTERR;
hcintmsk |= HCINTMSK_DATATGLERR;
hcintmsk |= HCINTMSK_FRMOVRUN;
if (chan->ep_is_in)
hcintmsk |= HCINTMSK_BBLERR;
if (chan->error_state)
hcintmsk |= HCINTMSK_ACK;
if (chan->do_split) {
if (chan->complete_split)
hcintmsk |= HCINTMSK_NYET;
else
hcintmsk |= HCINTMSK_ACK;
}
break;
case USB_ENDPOINT_XFER_ISOC:
if (dbg_perio())
dev_vdbg(hsotg->dev, "isoc\n");
hcintmsk |= HCINTMSK_XFERCOMPL;
hcintmsk |= HCINTMSK_FRMOVRUN;
hcintmsk |= HCINTMSK_ACK;
if (chan->ep_is_in) {
hcintmsk |= HCINTMSK_XACTERR;
hcintmsk |= HCINTMSK_BBLERR;
}
break;
default:
dev_err(hsotg->dev, "## Unknown EP type ##\n");
break;
}
dwc2_writel(hsotg, hcintmsk, HCINTMSK(chan->hc_num));
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
}
static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan)
{
u32 hcintmsk = HCINTMSK_CHHLTD;
/*
* For Descriptor DMA mode core halts the channel on AHB error.
* Interrupt is not required.
*/
if (!hsotg->params.dma_desc_enable) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "desc DMA disabled\n");
hcintmsk |= HCINTMSK_AHBERR;
} else {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "desc DMA enabled\n");
if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
hcintmsk |= HCINTMSK_XFERCOMPL;
}
if (chan->error_state && !chan->do_split &&
chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "setting ACK\n");
hcintmsk |= HCINTMSK_ACK;
if (chan->ep_is_in) {
hcintmsk |= HCINTMSK_DATATGLERR;
if (chan->ep_type != USB_ENDPOINT_XFER_INT)
hcintmsk |= HCINTMSK_NAK;
}
}
dwc2_writel(hsotg, hcintmsk, HCINTMSK(chan->hc_num));
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
}
static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan)
{
u32 intmsk;
if (hsotg->params.host_dma) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "DMA enabled\n");
dwc2_hc_enable_dma_ints(hsotg, chan);
} else {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "DMA disabled\n");
dwc2_hc_enable_slave_ints(hsotg, chan);
}
/* Enable the top level host channel interrupt */
intmsk = dwc2_readl(hsotg, HAINTMSK);
intmsk |= 1 << chan->hc_num;
dwc2_writel(hsotg, intmsk, HAINTMSK);
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
/* Make sure host channel interrupts are enabled */
intmsk = dwc2_readl(hsotg, GINTMSK);
intmsk |= GINTSTS_HCHINT;
dwc2_writel(hsotg, intmsk, GINTMSK);
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
}
/**
* dwc2_hc_init() - Prepares a host channel for transferring packets to/from
* a specific endpoint
*
* @hsotg: Programming view of DWC_otg controller
* @chan: Information needed to initialize the host channel
*
* The HCCHARn register is set up with the characteristics specified in chan.
* Host channel interrupts that may need to be serviced while this transfer is
* in progress are enabled.
*/
static void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
{
u8 hc_num = chan->hc_num;
u32 hcintmsk;
u32 hcchar;
u32 hcsplt = 0;
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
/* Clear old interrupt conditions for this host channel */
hcintmsk = 0xffffffff;
hcintmsk &= ~HCINTMSK_RESERVED14_31;
dwc2_writel(hsotg, hcintmsk, HCINT(hc_num));
/* Enable channel interrupts required for this transfer */
dwc2_hc_enable_ints(hsotg, chan);
/*
* Program the HCCHARn register with the endpoint characteristics for
* the current transfer
*/
hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
if (chan->ep_is_in)
hcchar |= HCCHAR_EPDIR;
if (chan->speed == USB_SPEED_LOW)
hcchar |= HCCHAR_LSPDDEV;
hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
dwc2_writel(hsotg, hcchar, HCCHAR(hc_num));
if (dbg_hc(chan)) {
dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
hc_num, hcchar);
dev_vdbg(hsotg->dev, "%s: Channel %d\n",
__func__, hc_num);
dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
chan->dev_addr);
dev_vdbg(hsotg->dev, " Ep Num: %d\n",
chan->ep_num);
dev_vdbg(hsotg->dev, " Is In: %d\n",
chan->ep_is_in);
dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
chan->speed == USB_SPEED_LOW);
dev_vdbg(hsotg->dev, " Ep Type: %d\n",
chan->ep_type);
dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
chan->max_packet);
}
/* Program the HCSPLT register for SPLITs */
if (chan->do_split) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev,
"Programming HC %d with split --> %s\n",
hc_num,
chan->complete_split ? "CSPLIT" : "SSPLIT");
if (chan->complete_split)
hcsplt |= HCSPLT_COMPSPLT;
hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
HCSPLT_XACTPOS_MASK;
hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
HCSPLT_HUBADDR_MASK;
hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
HCSPLT_PRTADDR_MASK;
if (dbg_hc(chan)) {
dev_vdbg(hsotg->dev, " comp split %d\n",
chan->complete_split);
dev_vdbg(hsotg->dev, " xact pos %d\n",
chan->xact_pos);
dev_vdbg(hsotg->dev, " hub addr %d\n",
chan->hub_addr);
dev_vdbg(hsotg->dev, " hub port %d\n",
chan->hub_port);
dev_vdbg(hsotg->dev, " is_in %d\n",
chan->ep_is_in);
dev_vdbg(hsotg->dev, " Max Pkt %d\n",
chan->max_packet);
dev_vdbg(hsotg->dev, " xferlen %d\n",
chan->xfer_len);
}
}
dwc2_writel(hsotg, hcsplt, HCSPLT(hc_num));
}
/**
* dwc2_hc_halt() - Attempts to halt a host channel
*
* @hsotg: Controller register interface
* @chan: Host channel to halt
* @halt_status: Reason for halting the channel
*
* This function should only be called in Slave mode or to abort a transfer in
* either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
* controller halts the channel when the transfer is complete or a condition
* occurs that requires application intervention.
*
* In slave mode, checks for a free request queue entry, then sets the Channel
* Enable and Channel Disable bits of the Host Channel Characteristics
* register of the specified channel to intiate the halt. If there is no free
* request queue entry, sets only the Channel Disable bit of the HCCHARn
* register to flush requests for this channel. In the latter case, sets a
* flag to indicate that the host channel needs to be halted when a request
* queue slot is open.
*
* In DMA mode, always sets the Channel Enable and Channel Disable bits of the
* HCCHARn register. The controller ensures there is space in the request
* queue before submitting the halt request.
*
* Some time may elapse before the core flushes any posted requests for this
* host channel and halts. The Channel Halted interrupt handler completes the
* deactivation of the host channel.
*/
void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
enum dwc2_halt_status halt_status)
{
u32 nptxsts, hptxsts, hcchar;
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
/*
* In buffer DMA or external DMA mode channel can't be halted
* for non-split periodic channels. At the end of the next
* uframe/frame (in the worst case), the core generates a channel
* halted and disables the channel automatically.
*/
if ((hsotg->params.g_dma && !hsotg->params.g_dma_desc) ||
hsotg->hw_params.arch == GHWCFG2_EXT_DMA_ARCH) {
if (!chan->do_split &&
(chan->ep_type == USB_ENDPOINT_XFER_ISOC ||
chan->ep_type == USB_ENDPOINT_XFER_INT)) {
dev_err(hsotg->dev, "%s() Channel can't be halted\n",
__func__);
return;
}
}
if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
halt_status == DWC2_HC_XFER_AHB_ERR) {
/*
* Disable all channel interrupts except Ch Halted. The QTD
* and QH state associated with this transfer has been cleared
* (in the case of URB_DEQUEUE), so the channel needs to be
* shut down carefully to prevent crashes.
*/
u32 hcintmsk = HCINTMSK_CHHLTD;
dev_vdbg(hsotg->dev, "dequeue/error\n");
dwc2_writel(hsotg, hcintmsk, HCINTMSK(chan->hc_num));
/*
* Make sure no other interrupts besides halt are currently
* pending. Handling another interrupt could cause a crash due
* to the QTD and QH state.
*/
dwc2_writel(hsotg, ~hcintmsk, HCINT(chan->hc_num));
/*
* Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
* even if the channel was already halted for some other
* reason
*/
chan->halt_status = halt_status;
hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
if (!(hcchar & HCCHAR_CHENA)) {
/*
* The channel is either already halted or it hasn't
* started yet. In DMA mode, the transfer may halt if
* it finishes normally or a condition occurs that
* requires driver intervention. Don't want to halt
* the channel again. In either Slave or DMA mode,
* it's possible that the transfer has been assigned
* to a channel, but not started yet when an URB is
* dequeued. Don't want to halt a channel that hasn't
* started yet.
*/
return;
}
}
if (chan->halt_pending) {
/*
* A halt has already been issued for this channel. This might
* happen when a transfer is aborted by a higher level in
* the stack.
*/
dev_vdbg(hsotg->dev,
"*** %s: Channel %d, chan->halt_pending already set ***\n",
__func__, chan->hc_num);
return;
}
hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
/* No need to set the bit in DDMA for disabling the channel */
/* TODO check it everywhere channel is disabled */
if (!hsotg->params.dma_desc_enable) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "desc DMA disabled\n");
hcchar |= HCCHAR_CHENA;
} else {
if (dbg_hc(chan))
dev_dbg(hsotg->dev, "desc DMA enabled\n");
}
hcchar |= HCCHAR_CHDIS;
if (!hsotg->params.host_dma) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "DMA not enabled\n");
hcchar |= HCCHAR_CHENA;
/* Check for space in the request queue to issue the halt */
if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
chan->ep_type == USB_ENDPOINT_XFER_BULK) {
dev_vdbg(hsotg->dev, "control/bulk\n");
nptxsts = dwc2_readl(hsotg, GNPTXSTS);
if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
dev_vdbg(hsotg->dev, "Disabling channel\n");
hcchar &= ~HCCHAR_CHENA;
}
} else {
if (dbg_perio())
dev_vdbg(hsotg->dev, "isoc/intr\n");
hptxsts = dwc2_readl(hsotg, HPTXSTS);
if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
hsotg->queuing_high_bandwidth) {
if (dbg_perio())
dev_vdbg(hsotg->dev, "Disabling channel\n");
hcchar &= ~HCCHAR_CHENA;
}
}
} else {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "DMA enabled\n");
}
dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
chan->halt_status = halt_status;
if (hcchar & HCCHAR_CHENA) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "Channel enabled\n");
chan->halt_pending = 1;
chan->halt_on_queue = 0;
} else {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "Channel disabled\n");
chan->halt_on_queue = 1;
}
if (dbg_hc(chan)) {
dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
chan->hc_num);
dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n",
hcchar);
dev_vdbg(hsotg->dev, " halt_pending: %d\n",
chan->halt_pending);
dev_vdbg(hsotg->dev, " halt_on_queue: %d\n",
chan->halt_on_queue);
dev_vdbg(hsotg->dev, " halt_status: %d\n",
chan->halt_status);
}
}
/**
* dwc2_hc_cleanup() - Clears the transfer state for a host channel
*
* @hsotg: Programming view of DWC_otg controller
* @chan: Identifies the host channel to clean up
*
* This function is normally called after a transfer is done and the host
* channel is being released
*/
void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
{
u32 hcintmsk;
chan->xfer_started = 0;
list_del_init(&chan->split_order_list_entry);
/*
* Clear channel interrupt enables and any unhandled channel interrupt
* conditions
*/
dwc2_writel(hsotg, 0, HCINTMSK(chan->hc_num));
hcintmsk = 0xffffffff;
hcintmsk &= ~HCINTMSK_RESERVED14_31;
dwc2_writel(hsotg, hcintmsk, HCINT(chan->hc_num));
}
/**
* dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
* which frame a periodic transfer should occur
*
* @hsotg: Programming view of DWC_otg controller
* @chan: Identifies the host channel to set up and its properties
* @hcchar: Current value of the HCCHAR register for the specified host channel
*
* This function has no effect on non-periodic transfers
*/
static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, u32 *hcchar)
{
if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
int host_speed;
int xfer_ns;
int xfer_us;
int bytes_in_fifo;
u16 fifo_space;
u16 frame_number;
u16 wire_frame;
/*
* Try to figure out if we're an even or odd frame. If we set
* even and the current frame number is even the transfer
* will happen immediately. Similar if both are odd. If one is
* even and the other is odd then the transfer will happen when
* the frame number ticks.
*
* There's a bit of a balancing act to get this right.
* Sometimes we may want to send data in the current frame (AK
* right away). We might want to do this if the frame number
* _just_ ticked, but we might also want to do this in order
* to continue a split transaction that happened late in a
* microframe (so we didn't know to queue the next transfer
* until the frame number had ticked). The problem is that we
* need a lot of knowledge to know if there's actually still
* time to send things or if it would be better to wait until
* the next frame.
*
* We can look at how much time is left in the current frame
* and make a guess about whether we'll have time to transfer.
* We'll do that.
*/
/* Get speed host is running at */
host_speed = (chan->speed != USB_SPEED_HIGH &&
!chan->do_split) ? chan->speed : USB_SPEED_HIGH;
/* See how many bytes are in the periodic FIFO right now */
fifo_space = (dwc2_readl(hsotg, HPTXSTS) &
TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT;
bytes_in_fifo = sizeof(u32) *
(hsotg->params.host_perio_tx_fifo_size -
fifo_space);
/*
* Roughly estimate bus time for everything in the periodic
* queue + our new transfer. This is "rough" because we're
* using a function that makes takes into account IN/OUT
* and INT/ISO and we're just slamming in one value for all
* transfers. This should be an over-estimate and that should
* be OK, but we can probably tighten it.
*/
xfer_ns = usb_calc_bus_time(host_speed, false, false,
chan->xfer_len + bytes_in_fifo);
xfer_us = NS_TO_US(xfer_ns);
/* See what frame number we'll be at by the time we finish */
frame_number = dwc2_hcd_get_future_frame_number(hsotg, xfer_us);
/* This is when we were scheduled to be on the wire */
wire_frame = dwc2_frame_num_inc(chan->qh->next_active_frame, 1);
/*
* If we'd finish _after_ the frame we're scheduled in then
* it's hopeless. Just schedule right away and hope for the
* best. Note that it _might_ be wise to call back into the
* scheduler to pick a better frame, but this is better than
* nothing.
*/
if (dwc2_frame_num_gt(frame_number, wire_frame)) {
dwc2_sch_vdbg(hsotg,
"QH=%p EO MISS fr=%04x=>%04x (%+d)\n",
chan->qh, wire_frame, frame_number,
dwc2_frame_num_dec(frame_number,
wire_frame));
wire_frame = frame_number;
/*
* We picked a different frame number; communicate this
* back to the scheduler so it doesn't try to schedule
* another in the same frame.
*
* Remember that next_active_frame is 1 before the wire
* frame.
*/
chan->qh->next_active_frame =
dwc2_frame_num_dec(frame_number, 1);
}
if (wire_frame & 1)
*hcchar |= HCCHAR_ODDFRM;
else
*hcchar &= ~HCCHAR_ODDFRM;
}
}
static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
{
/* Set up the initial PID for the transfer */
if (chan->speed == USB_SPEED_HIGH) {
if (chan->ep_is_in) {
if (chan->multi_count == 1)
chan->data_pid_start = DWC2_HC_PID_DATA0;
else if (chan->multi_count == 2)
chan->data_pid_start = DWC2_HC_PID_DATA1;
else
chan->data_pid_start = DWC2_HC_PID_DATA2;
} else {
if (chan->multi_count == 1)
chan->data_pid_start = DWC2_HC_PID_DATA0;
else
chan->data_pid_start = DWC2_HC_PID_MDATA;
}
} else {
chan->data_pid_start = DWC2_HC_PID_DATA0;
}
}
/**
* dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
* the Host Channel
*
* @hsotg: Programming view of DWC_otg controller
* @chan: Information needed to initialize the host channel
*
* This function should only be called in Slave mode. For a channel associated
* with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
* associated with a periodic EP, the periodic Tx FIFO is written.
*
* Upon return the xfer_buf and xfer_count fields in chan are incremented by
* the number of bytes written to the Tx FIFO.
*/
static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan)
{
u32 i;
u32 remaining_count;
u32 byte_count;
u32 dword_count;
u32 *data_buf = (u32 *)chan->xfer_buf;
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
remaining_count = chan->xfer_len - chan->xfer_count;
if (remaining_count > chan->max_packet)
byte_count = chan->max_packet;
else
byte_count = remaining_count;
dword_count = (byte_count + 3) / 4;
if (((unsigned long)data_buf & 0x3) == 0) {
/* xfer_buf is DWORD aligned */
for (i = 0; i < dword_count; i++, data_buf++)
dwc2_writel(hsotg, *data_buf, HCFIFO(chan->hc_num));
} else {
/* xfer_buf is not DWORD aligned */
for (i = 0; i < dword_count; i++, data_buf++) {
u32 data = data_buf[0] | data_buf[1] << 8 |
data_buf[2] << 16 | data_buf[3] << 24;
dwc2_writel(hsotg, data, HCFIFO(chan->hc_num));
}
}
chan->xfer_count += byte_count;
chan->xfer_buf += byte_count;
}
/**
* dwc2_hc_do_ping() - Starts a PING transfer
*
* @hsotg: Programming view of DWC_otg controller
* @chan: Information needed to initialize the host channel
*
* This function should only be called in Slave mode. The Do Ping bit is set in
* the HCTSIZ register, then the channel is enabled.
*/
static void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan)
{
u32 hcchar;
u32 hctsiz;
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
chan->hc_num);
hctsiz = TSIZ_DOPNG;
hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
dwc2_writel(hsotg, hctsiz, HCTSIZ(chan->hc_num));
hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
hcchar |= HCCHAR_CHENA;
hcchar &= ~HCCHAR_CHDIS;
dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
}
/**
* dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
* channel and starts the transfer
*
* @hsotg: Programming view of DWC_otg controller
* @chan: Information needed to initialize the host channel. The xfer_len value
* may be reduced to accommodate the max widths of the XferSize and
* PktCnt fields in the HCTSIZn register. The multi_count value may be
* changed to reflect the final xfer_len value.
*
* This function may be called in either Slave mode or DMA mode. In Slave mode,
* the caller must ensure that there is sufficient space in the request queue
* and Tx Data FIFO.
*
* For an OUT transfer in Slave mode, it loads a data packet into the
* appropriate FIFO. If necessary, additional data packets are loaded in the
* Host ISR.
*
* For an IN transfer in Slave mode, a data packet is requested. The data
* packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
* additional data packets are requested in the Host ISR.
*
* For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
* register along with a packet count of 1 and the channel is enabled. This
* causes a single PING transaction to occur. Other fields in HCTSIZ are
* simply set to 0 since no data transfer occurs in this case.
*
* For a PING transfer in DMA mode, the HCTSIZ register is initialized with
* all the information required to perform the subsequent data transfer. In
* addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
* controller performs the entire PING protocol, then starts the data
* transfer.
*/
static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan)
{
u32 max_hc_xfer_size = hsotg->params.max_transfer_size;
u16 max_hc_pkt_count = hsotg->params.max_packet_count;
u32 hcchar;
u32 hctsiz = 0;
u16 num_packets;
u32 ec_mc;
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
if (chan->do_ping) {
if (!hsotg->params.host_dma) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "ping, no DMA\n");
dwc2_hc_do_ping(hsotg, chan);
chan->xfer_started = 1;
return;
}
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "ping, DMA\n");
hctsiz |= TSIZ_DOPNG;
}
if (chan->do_split) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "split\n");
num_packets = 1;
if (chan->complete_split && !chan->ep_is_in)
/*
* For CSPLIT OUT Transfer, set the size to 0 so the
* core doesn't expect any data written to the FIFO
*/
chan->xfer_len = 0;
else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
chan->xfer_len = chan->max_packet;
else if (!chan->ep_is_in && chan->xfer_len > 188)
chan->xfer_len = 188;
hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
TSIZ_XFERSIZE_MASK;
/* For split set ec_mc for immediate retries */
if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC)
ec_mc = 3;
else
ec_mc = 1;
} else {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "no split\n");
/*
* Ensure that the transfer length and packet count will fit
* in the widths allocated for them in the HCTSIZn register
*/
if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
/*
* Make sure the transfer size is no larger than one
* (micro)frame's worth of data. (A check was done
* when the periodic transfer was accepted to ensure
* that a (micro)frame's worth of data can be
* programmed into a channel.)
*/
u32 max_periodic_len =
chan->multi_count * chan->max_packet;
if (chan->xfer_len > max_periodic_len)
chan->xfer_len = max_periodic_len;
} else if (chan->xfer_len > max_hc_xfer_size) {
/*
* Make sure that xfer_len is a multiple of max packet
* size
*/
chan->xfer_len =
max_hc_xfer_size - chan->max_packet + 1;
}
if (chan->xfer_len > 0) {
num_packets = (chan->xfer_len + chan->max_packet - 1) /
chan->max_packet;
if (num_packets > max_hc_pkt_count) {
num_packets = max_hc_pkt_count;
chan->xfer_len = num_packets * chan->max_packet;
} else if (chan->ep_is_in) {
/*
* Always program an integral # of max packets
* for IN transfers.
* Note: This assumes that the input buffer is
* aligned and sized accordingly.
*/
chan->xfer_len = num_packets * chan->max_packet;
}
} else {
/* Need 1 packet for transfer length of 0 */
num_packets = 1;
}
if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC)
/*
* Make sure that the multi_count field matches the
* actual transfer length
*/
chan->multi_count = num_packets;
if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
dwc2_set_pid_isoc(chan);
hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
TSIZ_XFERSIZE_MASK;
/* The ec_mc gets the multi_count for non-split */
ec_mc = chan->multi_count;
}
chan->start_pkt_count = num_packets;
hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
TSIZ_SC_MC_PID_MASK;
dwc2_writel(hsotg, hctsiz, HCTSIZ(chan->hc_num));
if (dbg_hc(chan)) {
dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
hctsiz, chan->hc_num);
dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
chan->hc_num);
dev_vdbg(hsotg->dev, " Xfer Size: %d\n",
(hctsiz & TSIZ_XFERSIZE_MASK) >>
TSIZ_XFERSIZE_SHIFT);
dev_vdbg(hsotg->dev, " Num Pkts: %d\n",
(hctsiz & TSIZ_PKTCNT_MASK) >>
TSIZ_PKTCNT_SHIFT);
dev_vdbg(hsotg->dev, " Start PID: %d\n",
(hctsiz & TSIZ_SC_MC_PID_MASK) >>
TSIZ_SC_MC_PID_SHIFT);
}
if (hsotg->params.host_dma) {
dma_addr_t dma_addr;
if (chan->align_buf) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "align_buf\n");
dma_addr = chan->align_buf;
} else {
dma_addr = chan->xfer_dma;
}
dwc2_writel(hsotg, (u32)dma_addr, HCDMA(chan->hc_num));
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
(unsigned long)dma_addr, chan->hc_num);
}
/* Start the split */
if (chan->do_split) {
u32 hcsplt = dwc2_readl(hsotg, HCSPLT(chan->hc_num));
hcsplt |= HCSPLT_SPLTENA;
dwc2_writel(hsotg, hcsplt, HCSPLT(chan->hc_num));
}
hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
hcchar &= ~HCCHAR_MULTICNT_MASK;
hcchar |= (ec_mc << HCCHAR_MULTICNT_SHIFT) & HCCHAR_MULTICNT_MASK;
dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
if (hcchar & HCCHAR_CHDIS)
dev_warn(hsotg->dev,
"%s: chdis set, channel %d, hcchar 0x%08x\n",
__func__, chan->hc_num, hcchar);
/* Set host channel enable after all other setup is complete */
hcchar |= HCCHAR_CHENA;
hcchar &= ~HCCHAR_CHDIS;
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
(hcchar & HCCHAR_MULTICNT_MASK) >>
HCCHAR_MULTICNT_SHIFT);
dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
chan->hc_num);
chan->xfer_started = 1;
chan->requests++;
if (!hsotg->params.host_dma &&
!chan->ep_is_in && chan->xfer_len > 0)
/* Load OUT packet into the appropriate Tx FIFO */
dwc2_hc_write_packet(hsotg, chan);
}
/**
* dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
* host channel and starts the transfer in Descriptor DMA mode
*
* @hsotg: Programming view of DWC_otg controller
* @chan: Information needed to initialize the host channel
*
* Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
* Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
* with micro-frame bitmap.
*
* Initializes HCDMA register with descriptor list address and CTD value then
* starts the transfer via enabling the channel.
*/
void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan)
{
u32 hcchar;
u32 hctsiz = 0;
if (chan->do_ping)
hctsiz |= TSIZ_DOPNG;
if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
dwc2_set_pid_isoc(chan);
/* Packet Count and Xfer Size are not used in Descriptor DMA mode */
hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
TSIZ_SC_MC_PID_MASK;
/* 0 - 1 descriptor, 1 - 2 descriptors, etc */
hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
/* Non-zero only for high-speed interrupt endpoints */
hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
if (dbg_hc(chan)) {
dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
chan->hc_num);
dev_vdbg(hsotg->dev, " Start PID: %d\n",
chan->data_pid_start);
dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1);
}
dwc2_writel(hsotg, hctsiz, HCTSIZ(chan->hc_num));
dma_sync_single_for_device(hsotg->dev, chan->desc_list_addr,
chan->desc_list_sz, DMA_TO_DEVICE);
dwc2_writel(hsotg, chan->desc_list_addr, HCDMA(chan->hc_num));
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "Wrote %pad to HCDMA(%d)\n",
&chan->desc_list_addr, chan->hc_num);
hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
hcchar &= ~HCCHAR_MULTICNT_MASK;
hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
HCCHAR_MULTICNT_MASK;
if (hcchar & HCCHAR_CHDIS)
dev_warn(hsotg->dev,
"%s: chdis set, channel %d, hcchar 0x%08x\n",
__func__, chan->hc_num, hcchar);
/* Set host channel enable after all other setup is complete */
hcchar |= HCCHAR_CHENA;
hcchar &= ~HCCHAR_CHDIS;
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
(hcchar & HCCHAR_MULTICNT_MASK) >>
HCCHAR_MULTICNT_SHIFT);
dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
chan->hc_num);
chan->xfer_started = 1;
chan->requests++;
}
/**
* dwc2_hc_continue_transfer() - Continues a data transfer that was started by
* a previous call to dwc2_hc_start_transfer()
*
* @hsotg: Programming view of DWC_otg controller
* @chan: Information needed to initialize the host channel
*
* The caller must ensure there is sufficient space in the request queue and Tx
* Data FIFO. This function should only be called in Slave mode. In DMA mode,
* the controller acts autonomously to complete transfers programmed to a host
* channel.
*
* For an OUT transfer, a new data packet is loaded into the appropriate FIFO
* if there is any data remaining to be queued. For an IN transfer, another
* data packet is always requested. For the SETUP phase of a control transfer,
* this function does nothing.
*
* Return: 1 if a new request is queued, 0 if no more requests are required
* for this transfer
*/
static int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan)
{
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
chan->hc_num);
if (chan->do_split)
/* SPLITs always queue just once per channel */
return 0;
if (chan->data_pid_start == DWC2_HC_PID_SETUP)
/* SETUPs are queued only once since they can't be NAK'd */
return 0;
if (chan->ep_is_in) {
/*
* Always queue another request for other IN transfers. If
* back-to-back INs are issued and NAKs are received for both,
* the driver may still be processing the first NAK when the
* second NAK is received. When the interrupt handler clears
* the NAK interrupt for the first NAK, the second NAK will
* not be seen. So we can't depend on the NAK interrupt
* handler to requeue a NAK'd request. Instead, IN requests
* are issued each time this function is called. When the
* transfer completes, the extra requests for the channel will
* be flushed.
*/
u32 hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
hcchar |= HCCHAR_CHENA;
hcchar &= ~HCCHAR_CHDIS;
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n",
hcchar);
dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
chan->requests++;
return 1;
}
/* OUT transfers */
if (chan->xfer_count < chan->xfer_len) {
if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
u32 hcchar = dwc2_readl(hsotg,
HCCHAR(chan->hc_num));
dwc2_hc_set_even_odd_frame(hsotg, chan,
&hcchar);
}
/* Load OUT packet into the appropriate Tx FIFO */
dwc2_hc_write_packet(hsotg, chan);
chan->requests++;
return 1;
}
return 0;
}
/*
* =========================================================================
* HCD
* =========================================================================
*/
/*
* Processes all the URBs in a single list of QHs. Completes them with
* -ETIMEDOUT and frees the QTD.
*
* Must be called with interrupt disabled and spinlock held
*/
static void dwc2_kill_urbs_in_qh_list(struct dwc2_hsotg *hsotg,
struct list_head *qh_list)
{
struct dwc2_qh *qh, *qh_tmp;
struct dwc2_qtd *qtd, *qtd_tmp;
list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) {
list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
qtd_list_entry) {
dwc2_host_complete(hsotg, qtd, -ECONNRESET);
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
}
}
}
static void dwc2_qh_list_free(struct dwc2_hsotg *hsotg,
struct list_head *qh_list)
{
struct dwc2_qtd *qtd, *qtd_tmp;
struct dwc2_qh *qh, *qh_tmp;
unsigned long flags;
if (!qh_list->next)
/* The list hasn't been initialized yet */
return;
spin_lock_irqsave(&hsotg->lock, flags);
/* Ensure there are no QTDs or URBs left */
dwc2_kill_urbs_in_qh_list(hsotg, qh_list);
list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) {
dwc2_hcd_qh_unlink(hsotg, qh);
/* Free each QTD in the QH's QTD list */
list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
qtd_list_entry)
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
if (qh->channel && qh->channel->qh == qh)
qh->channel->qh = NULL;
spin_unlock_irqrestore(&hsotg->lock, flags);
dwc2_hcd_qh_free(hsotg, qh);
spin_lock_irqsave(&hsotg->lock, flags);
}
spin_unlock_irqrestore(&hsotg->lock, flags);
}
/*
* Responds with an error status of -ETIMEDOUT to all URBs in the non-periodic
* and periodic schedules. The QTD associated with each URB is removed from
* the schedule and freed. This function may be called when a disconnect is
* detected or when the HCD is being stopped.
*
* Must be called with interrupt disabled and spinlock held
*/
static void dwc2_kill_all_urbs(struct dwc2_hsotg *hsotg)
{
dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_inactive);
dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_waiting);
dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_active);
dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_inactive);
dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_ready);
dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_assigned);
dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_queued);
}
/**
* dwc2_hcd_start() - Starts the HCD when switching to Host mode
*
* @hsotg: Pointer to struct dwc2_hsotg
*/
void dwc2_hcd_start(struct dwc2_hsotg *hsotg)
{
u32 hprt0;
if (hsotg->op_state == OTG_STATE_B_HOST) {
/*
* Reset the port. During a HNP mode switch the reset
* needs to occur within 1ms and have a duration of at
* least 50ms.
*/
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 |= HPRT0_RST;
dwc2_writel(hsotg, hprt0, HPRT0);
}
queue_delayed_work(hsotg->wq_otg, &hsotg->start_work,
msecs_to_jiffies(50));
}
/* Must be called with interrupt disabled and spinlock held */
static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg)
{
int num_channels = hsotg->params.host_channels;
struct dwc2_host_chan *channel;
u32 hcchar;
int i;
if (!hsotg->params.host_dma) {
/* Flush out any channel requests in slave mode */
for (i = 0; i < num_channels; i++) {
channel = hsotg->hc_ptr_array[i];
if (!list_empty(&channel->hc_list_entry))
continue;
hcchar = dwc2_readl(hsotg, HCCHAR(i));
if (hcchar & HCCHAR_CHENA) {
hcchar &= ~(HCCHAR_CHENA | HCCHAR_EPDIR);
hcchar |= HCCHAR_CHDIS;
dwc2_writel(hsotg, hcchar, HCCHAR(i));
}
}
}
for (i = 0; i < num_channels; i++) {
channel = hsotg->hc_ptr_array[i];
if (!list_empty(&channel->hc_list_entry))
continue;
hcchar = dwc2_readl(hsotg, HCCHAR(i));
if (hcchar & HCCHAR_CHENA) {
/* Halt the channel */
hcchar |= HCCHAR_CHDIS;
dwc2_writel(hsotg, hcchar, HCCHAR(i));
}
dwc2_hc_cleanup(hsotg, channel);
list_add_tail(&channel->hc_list_entry, &hsotg->free_hc_list);
/*
* Added for Descriptor DMA to prevent channel double cleanup in
* release_channel_ddma(), which is called from ep_disable when
* device disconnects
*/
channel->qh = NULL;
}
/* All channels have been freed, mark them available */
if (hsotg->params.uframe_sched) {
hsotg->available_host_channels =
hsotg->params.host_channels;
} else {
hsotg->non_periodic_channels = 0;
hsotg->periodic_channels = 0;
}
}
/**
* dwc2_hcd_connect() - Handles connect of the HCD
*
* @hsotg: Pointer to struct dwc2_hsotg
*
* Must be called with interrupt disabled and spinlock held
*/
void dwc2_hcd_connect(struct dwc2_hsotg *hsotg)
{
if (hsotg->lx_state != DWC2_L0)
usb_hcd_resume_root_hub(hsotg->priv);
hsotg->flags.b.port_connect_status_change = 1;
hsotg->flags.b.port_connect_status = 1;
}
/**
* dwc2_hcd_disconnect() - Handles disconnect of the HCD
*
* @hsotg: Pointer to struct dwc2_hsotg
* @force: If true, we won't try to reconnect even if we see device connected.
*
* Must be called with interrupt disabled and spinlock held
*/
void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force)
{
u32 intr;
u32 hprt0;
/* Set status flags for the hub driver */
hsotg->flags.b.port_connect_status_change = 1;
hsotg->flags.b.port_connect_status = 0;
/*
* Shutdown any transfers in process by clearing the Tx FIFO Empty
* interrupt mask and status bits and disabling subsequent host
* channel interrupts.
*/
intr = dwc2_readl(hsotg, GINTMSK);
intr &= ~(GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT);
dwc2_writel(hsotg, intr, GINTMSK);
intr = GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT;
dwc2_writel(hsotg, intr, GINTSTS);
/*
* Turn off the vbus power only if the core has transitioned to device
* mode. If still in host mode, need to keep power on to detect a
* reconnection.
*/
if (dwc2_is_device_mode(hsotg)) {
if (hsotg->op_state != OTG_STATE_A_SUSPEND) {
dev_dbg(hsotg->dev, "Disconnect: PortPower off\n");
dwc2_writel(hsotg, 0, HPRT0);
}
dwc2_disable_host_interrupts(hsotg);
}
/* Respond with an error status to all URBs in the schedule */
dwc2_kill_all_urbs(hsotg);
if (dwc2_is_host_mode(hsotg))
/* Clean up any host channels that were in use */
dwc2_hcd_cleanup_channels(hsotg);
dwc2_host_disconnect(hsotg);
/*
* Add an extra check here to see if we're actually connected but
* we don't have a detection interrupt pending. This can happen if:
* 1. hardware sees connect
* 2. hardware sees disconnect
* 3. hardware sees connect
* 4. dwc2_port_intr() - clears connect interrupt
* 5. dwc2_handle_common_intr() - calls here
*
* Without the extra check here we will end calling disconnect
* and won't get any future interrupts to handle the connect.
*/
if (!force) {
hprt0 = dwc2_readl(hsotg, HPRT0);
if (!(hprt0 & HPRT0_CONNDET) && (hprt0 & HPRT0_CONNSTS))
dwc2_hcd_connect(hsotg);
}
}
/**
* dwc2_hcd_rem_wakeup() - Handles Remote Wakeup
*
* @hsotg: Pointer to struct dwc2_hsotg
*/
static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg)
{
if (hsotg->bus_suspended) {
hsotg->flags.b.port_suspend_change = 1;
usb_hcd_resume_root_hub(hsotg->priv);
}
if (hsotg->lx_state == DWC2_L1)
hsotg->flags.b.port_l1_change = 1;
}
/**
* dwc2_hcd_stop() - Halts the DWC_otg host mode operations in a clean manner
*
* @hsotg: Pointer to struct dwc2_hsotg
*
* Must be called with interrupt disabled and spinlock held
*/
void dwc2_hcd_stop(struct dwc2_hsotg *hsotg)
{
dev_dbg(hsotg->dev, "DWC OTG HCD STOP\n");
/*
* The root hub should be disconnected before this function is called.
* The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue)
* and the QH lists (via ..._hcd_endpoint_disable).
*/
/* Turn off all host-specific interrupts */
dwc2_disable_host_interrupts(hsotg);
/* Turn off the vbus power */
dev_dbg(hsotg->dev, "PortPower off\n");
dwc2_writel(hsotg, 0, HPRT0);
}
/* Caller must hold driver lock */
static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
struct dwc2_hcd_urb *urb, struct dwc2_qh *qh,
struct dwc2_qtd *qtd)
{
u32 intr_mask;
int retval;
int dev_speed;
if (!hsotg->flags.b.port_connect_status) {
/* No longer connected */
dev_err(hsotg->dev, "Not connected\n");
return -ENODEV;
}
dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
/* Some configurations cannot support LS traffic on a FS root port */
if ((dev_speed == USB_SPEED_LOW) &&
(hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) &&
(hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI)) {
u32 hprt0 = dwc2_readl(hsotg, HPRT0);
u32 prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
if (prtspd == HPRT0_SPD_FULL_SPEED)
return -ENODEV;
}
if (!qtd)
return -EINVAL;
dwc2_hcd_qtd_init(qtd, urb);
retval = dwc2_hcd_qtd_add(hsotg, qtd, qh);
if (retval) {
dev_err(hsotg->dev,
"DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n",
retval);
return retval;
}
intr_mask = dwc2_readl(hsotg, GINTMSK);
if (!(intr_mask & GINTSTS_SOF)) {
enum dwc2_transaction_type tr_type;
if (qtd->qh->ep_type == USB_ENDPOINT_XFER_BULK &&
!(qtd->urb->flags & URB_GIVEBACK_ASAP))
/*
* Do not schedule SG transactions until qtd has
* URB_GIVEBACK_ASAP set
*/
return 0;
tr_type = dwc2_hcd_select_transactions(hsotg);
if (tr_type != DWC2_TRANSACTION_NONE)
dwc2_hcd_queue_transactions(hsotg, tr_type);
}
return 0;
}
/* Must be called with interrupt disabled and spinlock held */
static int dwc2_hcd_urb_dequeue(struct dwc2_hsotg *hsotg,
struct dwc2_hcd_urb *urb)
{
struct dwc2_qh *qh;
struct dwc2_qtd *urb_qtd;
urb_qtd = urb->qtd;
if (!urb_qtd) {
dev_dbg(hsotg->dev, "## Urb QTD is NULL ##\n");
return -EINVAL;
}
qh = urb_qtd->qh;
if (!qh) {
dev_dbg(hsotg->dev, "## Urb QTD QH is NULL ##\n");
return -EINVAL;
}
urb->priv = NULL;
if (urb_qtd->in_process && qh->channel) {
dwc2_dump_channel_info(hsotg, qh->channel);
/* The QTD is in process (it has been assigned to a channel) */
if (hsotg->flags.b.port_connect_status)
/*
* If still connected (i.e. in host mode), halt the
* channel so it can be used for other transfers. If
* no longer connected, the host registers can't be
* written to halt the channel since the core is in
* device mode.
*/
dwc2_hc_halt(hsotg, qh->channel,
DWC2_HC_XFER_URB_DEQUEUE);
}
/*
* Free the QTD and clean up the associated QH. Leave the QH in the
* schedule if it has any remaining QTDs.
*/
if (!hsotg->params.dma_desc_enable) {
u8 in_process = urb_qtd->in_process;
dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh);
if (in_process) {
dwc2_hcd_qh_deactivate(hsotg, qh, 0);
qh->channel = NULL;
} else if (list_empty(&qh->qtd_list)) {
dwc2_hcd_qh_unlink(hsotg, qh);
}
} else {
dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh);
}
return 0;
}
/* Must NOT be called with interrupt disabled or spinlock held */
static int dwc2_hcd_endpoint_disable(struct dwc2_hsotg *hsotg,
struct usb_host_endpoint *ep, int retry)
{
struct dwc2_qtd *qtd, *qtd_tmp;
struct dwc2_qh *qh;
unsigned long flags;
int rc;
spin_lock_irqsave(&hsotg->lock, flags);
qh = ep->hcpriv;
if (!qh) {
rc = -EINVAL;
goto err;
}
while (!list_empty(&qh->qtd_list) && retry--) {
if (retry == 0) {
dev_err(hsotg->dev,
"## timeout in dwc2_hcd_endpoint_disable() ##\n");
rc = -EBUSY;
goto err;
}
spin_unlock_irqrestore(&hsotg->lock, flags);
msleep(20);
spin_lock_irqsave(&hsotg->lock, flags);
qh = ep->hcpriv;
if (!qh) {
rc = -EINVAL;
goto err;
}
}
dwc2_hcd_qh_unlink(hsotg, qh);
/* Free each QTD in the QH's QTD list */
list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry)
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
ep->hcpriv = NULL;
if (qh->channel && qh->channel->qh == qh)
qh->channel->qh = NULL;
spin_unlock_irqrestore(&hsotg->lock, flags);
dwc2_hcd_qh_free(hsotg, qh);
return 0;
err:
ep->hcpriv = NULL;
spin_unlock_irqrestore(&hsotg->lock, flags);
return rc;
}
/* Must be called with interrupt disabled and spinlock held */
static int dwc2_hcd_endpoint_reset(struct dwc2_hsotg *hsotg,
struct usb_host_endpoint *ep)
{
struct dwc2_qh *qh = ep->hcpriv;
if (!qh)
return -EINVAL;
qh->data_toggle = DWC2_HC_PID_DATA0;
return 0;
}
/**
* dwc2_core_init() - Initializes the DWC_otg controller registers and
* prepares the core for device mode or host mode operation
*
* @hsotg: Programming view of the DWC_otg controller
* @initial_setup: If true then this is the first init for this instance.
*/
int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
{
u32 usbcfg, otgctl;
int retval;
dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
usbcfg = dwc2_readl(hsotg, GUSBCFG);
/* Set ULPI External VBUS bit if needed */
usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
if (hsotg->params.phy_ulpi_ext_vbus)
usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
/* Set external TS Dline pulsing bit if needed */
usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
if (hsotg->params.ts_dline)
usbcfg |= GUSBCFG_TERMSELDLPULSE;
dwc2_writel(hsotg, usbcfg, GUSBCFG);
/*
* Reset the Controller
*
* We only need to reset the controller if this is a re-init.
* For the first init we know for sure that earlier code reset us (it
* needed to in order to properly detect various parameters).
*/
if (!initial_setup) {
retval = dwc2_core_reset(hsotg, false);
if (retval) {
dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
__func__);
return retval;
}
}
/*
* This needs to happen in FS mode before any other programming occurs
*/
retval = dwc2_phy_init(hsotg, initial_setup);
if (retval)
return retval;
/* Program the GAHBCFG Register */
retval = dwc2_gahbcfg_init(hsotg);
if (retval)
return retval;
/* Program the GUSBCFG register */
dwc2_gusbcfg_init(hsotg);
/* Program the GOTGCTL register */
otgctl = dwc2_readl(hsotg, GOTGCTL);
otgctl &= ~GOTGCTL_OTGVER;
dwc2_writel(hsotg, otgctl, GOTGCTL);
/* Clear the SRP success bit for FS-I2c */
hsotg->srp_success = 0;
/* Enable common interrupts */
dwc2_enable_common_interrupts(hsotg);
/*
* Do device or host initialization based on mode during PCD and
* HCD initialization
*/
if (dwc2_is_host_mode(hsotg)) {
dev_dbg(hsotg->dev, "Host Mode\n");
hsotg->op_state = OTG_STATE_A_HOST;
} else {
dev_dbg(hsotg->dev, "Device Mode\n");
hsotg->op_state = OTG_STATE_B_PERIPHERAL;
}
return 0;
}
/**
* dwc2_core_host_init() - Initializes the DWC_otg controller registers for
* Host mode
*
* @hsotg: Programming view of DWC_otg controller
*
* This function flushes the Tx and Rx FIFOs and flushes any entries in the
* request queues. Host channels are reset to ensure that they are ready for
* performing transfers.
*/
static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
{
u32 hcfg, hfir, otgctl, usbcfg;
dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
/* Set HS/FS Timeout Calibration to 7 (max available value).
* The number of PHY clocks that the application programs in
* this field is added to the high/full speed interpacket timeout
* duration in the core to account for any additional delays
* introduced by the PHY. This can be required, because the delay
* introduced by the PHY in generating the linestate condition
* can vary from one PHY to another.
*/
usbcfg = dwc2_readl(hsotg, GUSBCFG);
usbcfg |= GUSBCFG_TOUTCAL(7);
dwc2_writel(hsotg, usbcfg, GUSBCFG);
/* Restart the Phy Clock */
dwc2_writel(hsotg, 0, PCGCTL);
/* Initialize Host Configuration Register */
dwc2_init_fs_ls_pclk_sel(hsotg);
if (hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
hsotg->params.speed == DWC2_SPEED_PARAM_LOW) {
hcfg = dwc2_readl(hsotg, HCFG);
hcfg |= HCFG_FSLSSUPP;
dwc2_writel(hsotg, hcfg, HCFG);
}
/*
* This bit allows dynamic reloading of the HFIR register during
* runtime. This bit needs to be programmed during initial configuration
* and its value must not be changed during runtime.
*/
if (hsotg->params.reload_ctl) {
hfir = dwc2_readl(hsotg, HFIR);
hfir |= HFIR_RLDCTRL;
dwc2_writel(hsotg, hfir, HFIR);
}
if (hsotg->params.dma_desc_enable) {
u32 op_mode = hsotg->hw_params.op_mode;
if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
!hsotg->hw_params.dma_desc_enable ||
op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
dev_err(hsotg->dev,
"Hardware does not support descriptor DMA mode -\n");
dev_err(hsotg->dev,
"falling back to buffer DMA mode.\n");
hsotg->params.dma_desc_enable = false;
} else {
hcfg = dwc2_readl(hsotg, HCFG);
hcfg |= HCFG_DESCDMA;
dwc2_writel(hsotg, hcfg, HCFG);
}
}
/* Configure data FIFO sizes */
dwc2_config_fifos(hsotg);
/* TODO - check this */
/* Clear Host Set HNP Enable in the OTG Control Register */
otgctl = dwc2_readl(hsotg, GOTGCTL);
otgctl &= ~GOTGCTL_HSTSETHNPEN;
dwc2_writel(hsotg, otgctl, GOTGCTL);
/* Make sure the FIFOs are flushed */
dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
dwc2_flush_rx_fifo(hsotg);
/* Clear Host Set HNP Enable in the OTG Control Register */
otgctl = dwc2_readl(hsotg, GOTGCTL);
otgctl &= ~GOTGCTL_HSTSETHNPEN;
dwc2_writel(hsotg, otgctl, GOTGCTL);
if (!hsotg->params.dma_desc_enable) {
int num_channels, i;
u32 hcchar;
/* Flush out any leftover queued requests */
num_channels = hsotg->params.host_channels;
for (i = 0; i < num_channels; i++) {
hcchar = dwc2_readl(hsotg, HCCHAR(i));
if (hcchar & HCCHAR_CHENA) {
hcchar &= ~HCCHAR_CHENA;
hcchar |= HCCHAR_CHDIS;
hcchar &= ~HCCHAR_EPDIR;
dwc2_writel(hsotg, hcchar, HCCHAR(i));
}
}
/* Halt all channels to put them into a known state */
for (i = 0; i < num_channels; i++) {
hcchar = dwc2_readl(hsotg, HCCHAR(i));
if (hcchar & HCCHAR_CHENA) {
hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
hcchar &= ~HCCHAR_EPDIR;
dwc2_writel(hsotg, hcchar, HCCHAR(i));
dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
__func__, i);
if (dwc2_hsotg_wait_bit_clear(hsotg, HCCHAR(i),
HCCHAR_CHENA,
1000)) {
dev_warn(hsotg->dev,
"Unable to clear enable on channel %d\n",
i);
}
}
}
}
/* Enable ACG feature in host mode, if supported */
dwc2_enable_acg(hsotg);
/* Turn on the vbus power */
dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
if (hsotg->op_state == OTG_STATE_A_HOST) {
u32 hprt0 = dwc2_read_hprt0(hsotg);
dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
!!(hprt0 & HPRT0_PWR));
if (!(hprt0 & HPRT0_PWR)) {
hprt0 |= HPRT0_PWR;
dwc2_writel(hsotg, hprt0, HPRT0);
}
}
dwc2_enable_host_interrupts(hsotg);
}
/*
* Initializes dynamic portions of the DWC_otg HCD state
*
* Must be called with interrupt disabled and spinlock held
*/
static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg)
{
struct dwc2_host_chan *chan, *chan_tmp;
int num_channels;
int i;
hsotg->flags.d32 = 0;
hsotg->non_periodic_qh_ptr = &hsotg->non_periodic_sched_active;
if (hsotg->params.uframe_sched) {
hsotg->available_host_channels =
hsotg->params.host_channels;
} else {
hsotg->non_periodic_channels = 0;
hsotg->periodic_channels = 0;
}
/*
* Put all channels in the free channel list and clean up channel
* states
*/
list_for_each_entry_safe(chan, chan_tmp, &hsotg->free_hc_list,
hc_list_entry)
list_del_init(&chan->hc_list_entry);
num_channels = hsotg->params.host_channels;
for (i = 0; i < num_channels; i++) {
chan = hsotg->hc_ptr_array[i];
list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
dwc2_hc_cleanup(hsotg, chan);
}
/* Initialize the DWC core for host mode operation */
dwc2_core_host_init(hsotg);
}
static void dwc2_hc_init_split(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan,
struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
{
int hub_addr, hub_port;
chan->do_split = 1;
chan->xact_pos = qtd->isoc_split_pos;
chan->complete_split = qtd->complete_split;
dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port);
chan->hub_addr = (u8)hub_addr;
chan->hub_port = (u8)hub_port;
}
static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan,
struct dwc2_qtd *qtd)
{
struct dwc2_hcd_urb *urb = qtd->urb;
struct dwc2_hcd_iso_packet_desc *frame_desc;
switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
case USB_ENDPOINT_XFER_CONTROL:
chan->ep_type = USB_ENDPOINT_XFER_CONTROL;
switch (qtd->control_phase) {
case DWC2_CONTROL_SETUP:
dev_vdbg(hsotg->dev, " Control setup transaction\n");
chan->do_ping = 0;
chan->ep_is_in = 0;
chan->data_pid_start = DWC2_HC_PID_SETUP;
if (hsotg->params.host_dma)
chan->xfer_dma = urb->setup_dma;
else
chan->xfer_buf = urb->setup_packet;
chan->xfer_len = 8;
break;
case DWC2_CONTROL_DATA:
dev_vdbg(hsotg->dev, " Control data transaction\n");
chan->data_pid_start = qtd->data_toggle;
break;
case DWC2_CONTROL_STATUS:
/*
* Direction is opposite of data direction or IN if no
* data
*/
dev_vdbg(hsotg->dev, " Control status transaction\n");
if (urb->length == 0)
chan->ep_is_in = 1;
else
chan->ep_is_in =
dwc2_hcd_is_pipe_out(&urb->pipe_info);
if (chan->ep_is_in)
chan->do_ping = 0;
chan->data_pid_start = DWC2_HC_PID_DATA1;
chan->xfer_len = 0;
if (hsotg->params.host_dma)
chan->xfer_dma = hsotg->status_buf_dma;
else
chan->xfer_buf = hsotg->status_buf;
break;
}
break;
case USB_ENDPOINT_XFER_BULK:
chan->ep_type = USB_ENDPOINT_XFER_BULK;
break;
case USB_ENDPOINT_XFER_INT:
chan->ep_type = USB_ENDPOINT_XFER_INT;
break;
case USB_ENDPOINT_XFER_ISOC:
chan->ep_type = USB_ENDPOINT_XFER_ISOC;
if (hsotg->params.dma_desc_enable)
break;
frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
frame_desc->status = 0;
if (hsotg->params.host_dma) {
chan->xfer_dma = urb->dma;
chan->xfer_dma += frame_desc->offset +
qtd->isoc_split_offset;
} else {
chan->xfer_buf = urb->buf;
chan->xfer_buf += frame_desc->offset +
qtd->isoc_split_offset;
}
chan->xfer_len = frame_desc->length - qtd->isoc_split_offset;
if (chan->xact_pos == DWC2_HCSPLT_XACTPOS_ALL) {
if (chan->xfer_len <= 188)
chan->xact_pos = DWC2_HCSPLT_XACTPOS_ALL;
else
chan->xact_pos = DWC2_HCSPLT_XACTPOS_BEGIN;
}
break;
}
}
static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh,
struct dwc2_host_chan *chan)
{
if (!hsotg->unaligned_cache ||
chan->max_packet > DWC2_KMEM_UNALIGNED_BUF_SIZE)
return -ENOMEM;
if (!qh->dw_align_buf) {
qh->dw_align_buf = kmem_cache_alloc(hsotg->unaligned_cache,
GFP_ATOMIC | GFP_DMA);
if (!qh->dw_align_buf)
return -ENOMEM;
}
qh->dw_align_buf_dma = dma_map_single(hsotg->dev, qh->dw_align_buf,
DWC2_KMEM_UNALIGNED_BUF_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(hsotg->dev, qh->dw_align_buf_dma)) {
dev_err(hsotg->dev, "can't map align_buf\n");
chan->align_buf = 0;
return -EINVAL;
}
chan->align_buf = qh->dw_align_buf_dma;
return 0;
}
#define DWC2_USB_DMA_ALIGN 4
static void dwc2_free_dma_aligned_buffer(struct urb *urb)
{
void *stored_xfer_buffer;
size_t length;
if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
return;
/* Restore urb->transfer_buffer from the end of the allocated area */
memcpy(&stored_xfer_buffer,
PTR_ALIGN(urb->transfer_buffer + urb->transfer_buffer_length,
dma_get_cache_alignment()),
sizeof(urb->transfer_buffer));
if (usb_urb_dir_in(urb)) {
if (usb_pipeisoc(urb->pipe))
length = urb->transfer_buffer_length;
else
length = urb->actual_length;
memcpy(stored_xfer_buffer, urb->transfer_buffer, length);
}
kfree(urb->transfer_buffer);
urb->transfer_buffer = stored_xfer_buffer;
urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
}
static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
{
void *kmalloc_ptr;
size_t kmalloc_size;
if (urb->num_sgs || urb->sg ||
urb->transfer_buffer_length == 0 ||
!((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1)))
return 0;
/*
* Allocate a buffer with enough padding for original transfer_buffer
* pointer. This allocation is guaranteed to be aligned properly for
* DMA
*/
kmalloc_size = urb->transfer_buffer_length +
(dma_get_cache_alignment() - 1) +
sizeof(urb->transfer_buffer);
kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
if (!kmalloc_ptr)
return -ENOMEM;
/*
* Position value of original urb->transfer_buffer pointer to the end
* of allocation for later referencing
*/
memcpy(PTR_ALIGN(kmalloc_ptr + urb->transfer_buffer_length,
dma_get_cache_alignment()),
&urb->transfer_buffer, sizeof(urb->transfer_buffer));
if (usb_urb_dir_out(urb))
memcpy(kmalloc_ptr, urb->transfer_buffer,
urb->transfer_buffer_length);
urb->transfer_buffer = kmalloc_ptr;
urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
return 0;
}
static int dwc2_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
int ret;
/* We assume setup_dma is always aligned; warn if not */
WARN_ON_ONCE(urb->setup_dma &&
(urb->setup_dma & (DWC2_USB_DMA_ALIGN - 1)));
ret = dwc2_alloc_dma_aligned_buffer(urb, mem_flags);
if (ret)
return ret;
ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
if (ret)
dwc2_free_dma_aligned_buffer(urb);
return ret;
}
static void dwc2_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
{
usb_hcd_unmap_urb_for_dma(hcd, urb);
dwc2_free_dma_aligned_buffer(urb);
}
/**
* dwc2_assign_and_init_hc() - Assigns transactions from a QTD to a free host
* channel and initializes the host channel to perform the transactions. The
* host channel is removed from the free list.
*
* @hsotg: The HCD state structure
* @qh: Transactions from the first QTD for this QH are selected and assigned
* to a free host channel
*/
static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
struct dwc2_host_chan *chan;
struct dwc2_hcd_urb *urb;
struct dwc2_qtd *qtd;
if (dbg_qh(qh))
dev_vdbg(hsotg->dev, "%s(%p,%p)\n", __func__, hsotg, qh);
if (list_empty(&qh->qtd_list)) {
dev_dbg(hsotg->dev, "No QTDs in QH list\n");
return -ENOMEM;
}
if (list_empty(&hsotg->free_hc_list)) {
dev_dbg(hsotg->dev, "No free channel to assign\n");
return -ENOMEM;
}
chan = list_first_entry(&hsotg->free_hc_list, struct dwc2_host_chan,
hc_list_entry);
/* Remove host channel from free list */
list_del_init(&chan->hc_list_entry);
qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
urb = qtd->urb;
qh->channel = chan;
qtd->in_process = 1;
/*
* Use usb_pipedevice to determine device address. This address is
* 0 before the SET_ADDRESS command and the correct address afterward.
*/
chan->dev_addr = dwc2_hcd_get_dev_addr(&urb->pipe_info);
chan->ep_num = dwc2_hcd_get_ep_num(&urb->pipe_info);
chan->speed = qh->dev_speed;
chan->max_packet = qh->maxp;
chan->xfer_started = 0;
chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
chan->error_state = (qtd->error_count > 0);
chan->halt_on_queue = 0;
chan->halt_pending = 0;
chan->requests = 0;
/*
* The following values may be modified in the transfer type section
* below. The xfer_len value may be reduced when the transfer is
* started to accommodate the max widths of the XferSize and PktCnt
* fields in the HCTSIZn register.
*/
chan->ep_is_in = (dwc2_hcd_is_pipe_in(&urb->pipe_info) != 0);
if (chan->ep_is_in)
chan->do_ping = 0;
else
chan->do_ping = qh->ping_state;
chan->data_pid_start = qh->data_toggle;
chan->multi_count = 1;
if (urb->actual_length > urb->length &&
!dwc2_hcd_is_pipe_in(&urb->pipe_info))
urb->actual_length = urb->length;
if (hsotg->params.host_dma)
chan->xfer_dma = urb->dma + urb->actual_length;
else
chan->xfer_buf = (u8 *)urb->buf + urb->actual_length;
chan->xfer_len = urb->length - urb->actual_length;
chan->xfer_count = 0;
/* Set the split attributes if required */
if (qh->do_split)
dwc2_hc_init_split(hsotg, chan, qtd, urb);
else
chan->do_split = 0;
/* Set the transfer attributes */
dwc2_hc_init_xfer(hsotg, chan, qtd);
/* For non-dword aligned buffers */
if (hsotg->params.host_dma && qh->do_split &&
chan->ep_is_in && (chan->xfer_dma & 0x3)) {
dev_vdbg(hsotg->dev, "Non-aligned buffer\n");
if (dwc2_alloc_split_dma_aligned_buf(hsotg, qh, chan)) {
dev_err(hsotg->dev,
"Failed to allocate memory to handle non-aligned buffer\n");
/* Add channel back to free list */
chan->align_buf = 0;
chan->multi_count = 0;
list_add_tail(&chan->hc_list_entry,
&hsotg->free_hc_list);
qtd->in_process = 0;
qh->channel = NULL;
return -ENOMEM;
}
} else {
/*
* We assume that DMA is always aligned in non-split
* case or split out case. Warn if not.
*/
WARN_ON_ONCE(hsotg->params.host_dma &&
(chan->xfer_dma & 0x3));
chan->align_buf = 0;
}
if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC)
/*
* This value may be modified when the transfer is started
* to reflect the actual transfer length
*/
chan->multi_count = qh->maxp_mult;
if (hsotg->params.dma_desc_enable) {
chan->desc_list_addr = qh->desc_list_dma;
chan->desc_list_sz = qh->desc_list_sz;
}
dwc2_hc_init(hsotg, chan);
chan->qh = qh;
return 0;
}
/**
* dwc2_hcd_select_transactions() - Selects transactions from the HCD transfer
* schedule and assigns them to available host channels. Called from the HCD
* interrupt handler functions.
*
* @hsotg: The HCD state structure
*
* Return: The types of new transactions that were assigned to host channels
*/
enum dwc2_transaction_type dwc2_hcd_select_transactions(
struct dwc2_hsotg *hsotg)
{
enum dwc2_transaction_type ret_val = DWC2_TRANSACTION_NONE;
struct list_head *qh_ptr;
struct dwc2_qh *qh;
int num_channels;
#ifdef DWC2_DEBUG_SOF
dev_vdbg(hsotg->dev, " Select Transactions\n");
#endif
/* Process entries in the periodic ready list */
qh_ptr = hsotg->periodic_sched_ready.next;
while (qh_ptr != &hsotg->periodic_sched_ready) {
if (list_empty(&hsotg->free_hc_list))
break;
if (hsotg->params.uframe_sched) {
if (hsotg->available_host_channels <= 1)
break;
hsotg->available_host_channels--;
}
qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
if (dwc2_assign_and_init_hc(hsotg, qh))
break;
/*
* Move the QH from the periodic ready schedule to the
* periodic assigned schedule
*/
qh_ptr = qh_ptr->next;
list_move_tail(&qh->qh_list_entry,
&hsotg->periodic_sched_assigned);
ret_val = DWC2_TRANSACTION_PERIODIC;
}
/*
* Process entries in the inactive portion of the non-periodic
* schedule. Some free host channels may not be used if they are
* reserved for periodic transfers.
*/
num_channels = hsotg->params.host_channels;
qh_ptr = hsotg->non_periodic_sched_inactive.next;
while (qh_ptr != &hsotg->non_periodic_sched_inactive) {
if (!hsotg->params.uframe_sched &&
hsotg->non_periodic_channels >= num_channels -
hsotg->periodic_channels)
break;
if (list_empty(&hsotg->free_hc_list))
break;
qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
if (hsotg->params.uframe_sched) {
if (hsotg->available_host_channels < 1)
break;
hsotg->available_host_channels--;
}
if (dwc2_assign_and_init_hc(hsotg, qh))
break;
/*
* Move the QH from the non-periodic inactive schedule to the
* non-periodic active schedule
*/
qh_ptr = qh_ptr->next;
list_move_tail(&qh->qh_list_entry,
&hsotg->non_periodic_sched_active);
if (ret_val == DWC2_TRANSACTION_NONE)
ret_val = DWC2_TRANSACTION_NON_PERIODIC;
else
ret_val = DWC2_TRANSACTION_ALL;
if (!hsotg->params.uframe_sched)
hsotg->non_periodic_channels++;
}
return ret_val;
}
/**
* dwc2_queue_transaction() - Attempts to queue a single transaction request for
* a host channel associated with either a periodic or non-periodic transfer
*
* @hsotg: The HCD state structure
* @chan: Host channel descriptor associated with either a periodic or
* non-periodic transfer
* @fifo_dwords_avail: Number of DWORDs available in the periodic Tx FIFO
* for periodic transfers or the non-periodic Tx FIFO
* for non-periodic transfers
*
* Return: 1 if a request is queued and more requests may be needed to
* complete the transfer, 0 if no more requests are required for this
* transfer, -1 if there is insufficient space in the Tx FIFO
*
* This function assumes that there is space available in the appropriate
* request queue. For an OUT transfer or SETUP transaction in Slave mode,
* it checks whether space is available in the appropriate Tx FIFO.
*
* Must be called with interrupt disabled and spinlock held
*/
static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan,
u16 fifo_dwords_avail)
{
int retval = 0;
if (chan->do_split)
/* Put ourselves on the list to keep order straight */
list_move_tail(&chan->split_order_list_entry,
&hsotg->split_order);
if (hsotg->params.host_dma && chan->qh) {
if (hsotg->params.dma_desc_enable) {
if (!chan->xfer_started ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
dwc2_hcd_start_xfer_ddma(hsotg, chan->qh);
chan->qh->ping_state = 0;
}
} else if (!chan->xfer_started) {
dwc2_hc_start_transfer(hsotg, chan);
chan->qh->ping_state = 0;
}
} else if (chan->halt_pending) {
/* Don't queue a request if the channel has been halted */
} else if (chan->halt_on_queue) {
dwc2_hc_halt(hsotg, chan, chan->halt_status);
} else if (chan->do_ping) {
if (!chan->xfer_started)
dwc2_hc_start_transfer(hsotg, chan);
} else if (!chan->ep_is_in ||
chan->data_pid_start == DWC2_HC_PID_SETUP) {
if ((fifo_dwords_avail * 4) >= chan->max_packet) {
if (!chan->xfer_started) {
dwc2_hc_start_transfer(hsotg, chan);
retval = 1;
} else {
retval = dwc2_hc_continue_transfer(hsotg, chan);
}
} else {
retval = -1;
}
} else {
if (!chan->xfer_started) {
dwc2_hc_start_transfer(hsotg, chan);
retval = 1;
} else {
retval = dwc2_hc_continue_transfer(hsotg, chan);
}
}
return retval;
}
/*
* Processes periodic channels for the next frame and queues transactions for
* these channels to the DWC_otg controller. After queueing transactions, the
* Periodic Tx FIFO Empty interrupt is enabled if there are more transactions
* to queue as Periodic Tx FIFO or request queue space becomes available.
* Otherwise, the Periodic Tx FIFO Empty interrupt is disabled.
*
* Must be called with interrupt disabled and spinlock held
*/
static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
{
struct list_head *qh_ptr;
struct dwc2_qh *qh;
u32 tx_status;
u32 fspcavail;
u32 gintmsk;
int status;
bool no_queue_space = false;
bool no_fifo_space = false;
u32 qspcavail;
/* If empty list then just adjust interrupt enables */
if (list_empty(&hsotg->periodic_sched_assigned))
goto exit;
if (dbg_perio())
dev_vdbg(hsotg->dev, "Queue periodic transactions\n");
tx_status = dwc2_readl(hsotg, HPTXSTS);
qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
TXSTS_QSPCAVAIL_SHIFT;
fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
TXSTS_FSPCAVAIL_SHIFT;
if (dbg_perio()) {
dev_vdbg(hsotg->dev, " P Tx Req Queue Space Avail (before queue): %d\n",
qspcavail);
dev_vdbg(hsotg->dev, " P Tx FIFO Space Avail (before queue): %d\n",
fspcavail);
}
qh_ptr = hsotg->periodic_sched_assigned.next;
while (qh_ptr != &hsotg->periodic_sched_assigned) {
tx_status = dwc2_readl(hsotg, HPTXSTS);
qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
TXSTS_QSPCAVAIL_SHIFT;
if (qspcavail == 0) {
no_queue_space = true;
break;
}
qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
if (!qh->channel) {
qh_ptr = qh_ptr->next;
continue;
}
/* Make sure EP's TT buffer is clean before queueing qtds */
if (qh->tt_buffer_dirty) {
qh_ptr = qh_ptr->next;
continue;
}
/*
* Set a flag if we're queuing high-bandwidth in slave mode.
* The flag prevents any halts to get into the request queue in
* the middle of multiple high-bandwidth packets getting queued.
*/
if (!hsotg->params.host_dma &&
qh->channel->multi_count > 1)
hsotg->queuing_high_bandwidth = 1;
fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
TXSTS_FSPCAVAIL_SHIFT;
status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail);
if (status < 0) {
no_fifo_space = true;
break;
}
/*
* In Slave mode, stay on the current transfer until there is
* nothing more to do or the high-bandwidth request count is
* reached. In DMA mode, only need to queue one request. The
* controller automatically handles multiple packets for
* high-bandwidth transfers.
*/
if (hsotg->params.host_dma || status == 0 ||
qh->channel->requests == qh->channel->multi_count) {
qh_ptr = qh_ptr->next;
/*
* Move the QH from the periodic assigned schedule to
* the periodic queued schedule
*/
list_move_tail(&qh->qh_list_entry,
&hsotg->periodic_sched_queued);
/* done queuing high bandwidth */
hsotg->queuing_high_bandwidth = 0;
}
}
exit:
if (no_queue_space || no_fifo_space ||
(!hsotg->params.host_dma &&
!list_empty(&hsotg->periodic_sched_assigned))) {
/*
* May need to queue more transactions as the request
* queue or Tx FIFO empties. Enable the periodic Tx
* FIFO empty interrupt. (Always use the half-empty
* level to ensure that new requests are loaded as
* soon as possible.)
*/
gintmsk = dwc2_readl(hsotg, GINTMSK);
if (!(gintmsk & GINTSTS_PTXFEMP)) {
gintmsk |= GINTSTS_PTXFEMP;
dwc2_writel(hsotg, gintmsk, GINTMSK);
}
} else {
/*
* Disable the Tx FIFO empty interrupt since there are
* no more transactions that need to be queued right
* now. This function is called from interrupt
* handlers to queue more transactions as transfer
* states change.
*/
gintmsk = dwc2_readl(hsotg, GINTMSK);
if (gintmsk & GINTSTS_PTXFEMP) {
gintmsk &= ~GINTSTS_PTXFEMP;
dwc2_writel(hsotg, gintmsk, GINTMSK);
}
}
}
/*
* Processes active non-periodic channels and queues transactions for these
* channels to the DWC_otg controller. After queueing transactions, the NP Tx
* FIFO Empty interrupt is enabled if there are more transactions to queue as
* NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx
* FIFO Empty interrupt is disabled.
*
* Must be called with interrupt disabled and spinlock held
*/
static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg)
{
struct list_head *orig_qh_ptr;
struct dwc2_qh *qh;
u32 tx_status;
u32 qspcavail;
u32 fspcavail;
u32 gintmsk;
int status;
int no_queue_space = 0;
int no_fifo_space = 0;
int more_to_do = 0;
dev_vdbg(hsotg->dev, "Queue non-periodic transactions\n");
tx_status = dwc2_readl(hsotg, GNPTXSTS);
qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
TXSTS_QSPCAVAIL_SHIFT;
fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
TXSTS_FSPCAVAIL_SHIFT;
dev_vdbg(hsotg->dev, " NP Tx Req Queue Space Avail (before queue): %d\n",
qspcavail);
dev_vdbg(hsotg->dev, " NP Tx FIFO Space Avail (before queue): %d\n",
fspcavail);
/*
* Keep track of the starting point. Skip over the start-of-list
* entry.
*/
if (hsotg->non_periodic_qh_ptr == &hsotg->non_periodic_sched_active)
hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next;
orig_qh_ptr = hsotg->non_periodic_qh_ptr;
/*
* Process once through the active list or until no more space is
* available in the request queue or the Tx FIFO
*/
do {
tx_status = dwc2_readl(hsotg, GNPTXSTS);
qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
TXSTS_QSPCAVAIL_SHIFT;
if (!hsotg->params.host_dma && qspcavail == 0) {
no_queue_space = 1;
break;
}
qh = list_entry(hsotg->non_periodic_qh_ptr, struct dwc2_qh,
qh_list_entry);
if (!qh->channel)
goto next;
/* Make sure EP's TT buffer is clean before queueing qtds */
if (qh->tt_buffer_dirty)
goto next;
fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
TXSTS_FSPCAVAIL_SHIFT;
status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail);
if (status > 0) {
more_to_do = 1;
} else if (status < 0) {
no_fifo_space = 1;
break;
}
next:
/* Advance to next QH, skipping start-of-list entry */
hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next;
if (hsotg->non_periodic_qh_ptr ==
&hsotg->non_periodic_sched_active)
hsotg->non_periodic_qh_ptr =
hsotg->non_periodic_qh_ptr->next;
} while (hsotg->non_periodic_qh_ptr != orig_qh_ptr);
if (!hsotg->params.host_dma) {
tx_status = dwc2_readl(hsotg, GNPTXSTS);
qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
TXSTS_QSPCAVAIL_SHIFT;
fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
TXSTS_FSPCAVAIL_SHIFT;
dev_vdbg(hsotg->dev,
" NP Tx Req Queue Space Avail (after queue): %d\n",
qspcavail);
dev_vdbg(hsotg->dev,
" NP Tx FIFO Space Avail (after queue): %d\n",
fspcavail);
if (more_to_do || no_queue_space || no_fifo_space) {
/*
* May need to queue more transactions as the request
* queue or Tx FIFO empties. Enable the non-periodic
* Tx FIFO empty interrupt. (Always use the half-empty
* level to ensure that new requests are loaded as
* soon as possible.)
*/
gintmsk = dwc2_readl(hsotg, GINTMSK);
gintmsk |= GINTSTS_NPTXFEMP;
dwc2_writel(hsotg, gintmsk, GINTMSK);
} else {
/*
* Disable the Tx FIFO empty interrupt since there are
* no more transactions that need to be queued right
* now. This function is called from interrupt
* handlers to queue more transactions as transfer
* states change.
*/
gintmsk = dwc2_readl(hsotg, GINTMSK);
gintmsk &= ~GINTSTS_NPTXFEMP;
dwc2_writel(hsotg, gintmsk, GINTMSK);
}
}
}
/**
* dwc2_hcd_queue_transactions() - Processes the currently active host channels
* and queues transactions for these channels to the DWC_otg controller. Called
* from the HCD interrupt handler functions.
*
* @hsotg: The HCD state structure
* @tr_type: The type(s) of transactions to queue (non-periodic, periodic,
* or both)
*
* Must be called with interrupt disabled and spinlock held
*/
void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
enum dwc2_transaction_type tr_type)
{
#ifdef DWC2_DEBUG_SOF
dev_vdbg(hsotg->dev, "Queue Transactions\n");
#endif
/* Process host channels associated with periodic transfers */
if (tr_type == DWC2_TRANSACTION_PERIODIC ||
tr_type == DWC2_TRANSACTION_ALL)
dwc2_process_periodic_channels(hsotg);
/* Process host channels associated with non-periodic transfers */
if (tr_type == DWC2_TRANSACTION_NON_PERIODIC ||
tr_type == DWC2_TRANSACTION_ALL) {
if (!list_empty(&hsotg->non_periodic_sched_active)) {
dwc2_process_non_periodic_channels(hsotg);
} else {
/*
* Ensure NP Tx FIFO empty interrupt is disabled when
* there are no non-periodic transfers to process
*/
u32 gintmsk = dwc2_readl(hsotg, GINTMSK);
gintmsk &= ~GINTSTS_NPTXFEMP;
dwc2_writel(hsotg, gintmsk, GINTMSK);
}
}
}
static void dwc2_conn_id_status_change(struct work_struct *work)
{
struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
wf_otg);
u32 count = 0;
u32 gotgctl;
unsigned long flags;
dev_dbg(hsotg->dev, "%s()\n", __func__);
gotgctl = dwc2_readl(hsotg, GOTGCTL);
dev_dbg(hsotg->dev, "gotgctl=%0x\n", gotgctl);
dev_dbg(hsotg->dev, "gotgctl.b.conidsts=%d\n",
!!(gotgctl & GOTGCTL_CONID_B));
/* B-Device connector (Device Mode) */
if (gotgctl & GOTGCTL_CONID_B) {
dwc2_vbus_supply_exit(hsotg);
/* Wait for switch to device mode */
dev_dbg(hsotg->dev, "connId B\n");
if (hsotg->bus_suspended) {
dev_info(hsotg->dev,
"Do port resume before switching to device mode\n");
dwc2_port_resume(hsotg);
}
while (!dwc2_is_device_mode(hsotg)) {
dev_info(hsotg->dev,
"Waiting for Peripheral Mode, Mode=%s\n",
dwc2_is_host_mode(hsotg) ? "Host" :
"Peripheral");
msleep(20);
/*
* Sometimes the initial GOTGCTRL read is wrong, so
* check it again and jump to host mode if that was
* the case.
*/
gotgctl = dwc2_readl(hsotg, GOTGCTL);
if (!(gotgctl & GOTGCTL_CONID_B))
goto host;
if (++count > 250)
break;
}
if (count > 250)
dev_err(hsotg->dev,
"Connection id status change timed out\n");
/*
* Exit Partial Power Down without restoring registers.
* No need to check the return value as registers
* are not being restored.
*/
if (hsotg->in_ppd && hsotg->lx_state == DWC2_L2)
dwc2_exit_partial_power_down(hsotg, 0, false);
hsotg->op_state = OTG_STATE_B_PERIPHERAL;
dwc2_core_init(hsotg, false);
dwc2_enable_global_interrupts(hsotg);
spin_lock_irqsave(&hsotg->lock, flags);
dwc2_hsotg_core_init_disconnected(hsotg, false);
spin_unlock_irqrestore(&hsotg->lock, flags);
/* Enable ACG feature in device mode,if supported */
dwc2_enable_acg(hsotg);
dwc2_hsotg_core_connect(hsotg);
} else {
host:
/* A-Device connector (Host Mode) */
dev_dbg(hsotg->dev, "connId A\n");
while (!dwc2_is_host_mode(hsotg)) {
dev_info(hsotg->dev, "Waiting for Host Mode, Mode=%s\n",
dwc2_is_host_mode(hsotg) ?
"Host" : "Peripheral");
msleep(20);
if (++count > 250)
break;
}
if (count > 250)
dev_err(hsotg->dev,
"Connection id status change timed out\n");
spin_lock_irqsave(&hsotg->lock, flags);
dwc2_hsotg_disconnect(hsotg);
spin_unlock_irqrestore(&hsotg->lock, flags);
hsotg->op_state = OTG_STATE_A_HOST;
/* Initialize the Core for Host mode */
dwc2_core_init(hsotg, false);
dwc2_enable_global_interrupts(hsotg);
dwc2_hcd_start(hsotg);
}
}
static void dwc2_wakeup_detected(struct timer_list *t)
{
struct dwc2_hsotg *hsotg = from_timer(hsotg, t, wkp_timer);
u32 hprt0;
dev_dbg(hsotg->dev, "%s()\n", __func__);
/*
* Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms
* so that OPT tests pass with all PHYs.)
*/
hprt0 = dwc2_read_hprt0(hsotg);
dev_dbg(hsotg->dev, "Resume: HPRT0=%0x\n", hprt0);
hprt0 &= ~HPRT0_RES;
dwc2_writel(hsotg, hprt0, HPRT0);
dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n",
dwc2_readl(hsotg, HPRT0));
dwc2_hcd_rem_wakeup(hsotg);
hsotg->bus_suspended = false;
/* Change to L0 state */
hsotg->lx_state = DWC2_L0;
}
static int dwc2_host_is_b_hnp_enabled(struct dwc2_hsotg *hsotg)
{
struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
return hcd->self.b_hnp_enable;
}
/**
* dwc2_port_suspend() - Put controller in suspend mode for host.
*
* @hsotg: Programming view of the DWC_otg controller
* @windex: The control request wIndex field
*
* Return: non-zero if failed to enter suspend mode for host.
*
* This function is for entering Host mode suspend.
* Must NOT be called with interrupt disabled or spinlock held.
*/
int dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex)
{
unsigned long flags;
u32 pcgctl;
u32 gotgctl;
int ret = 0;
dev_dbg(hsotg->dev, "%s()\n", __func__);
spin_lock_irqsave(&hsotg->lock, flags);
if (windex == hsotg->otg_port && dwc2_host_is_b_hnp_enabled(hsotg)) {
gotgctl = dwc2_readl(hsotg, GOTGCTL);
gotgctl |= GOTGCTL_HSTSETHNPEN;
dwc2_writel(hsotg, gotgctl, GOTGCTL);
hsotg->op_state = OTG_STATE_A_SUSPEND;
}
switch (hsotg->params.power_down) {
case DWC2_POWER_DOWN_PARAM_PARTIAL:
ret = dwc2_enter_partial_power_down(hsotg);
if (ret)
dev_err(hsotg->dev,
"enter partial_power_down failed.\n");
break;
case DWC2_POWER_DOWN_PARAM_HIBERNATION:
/*
* Perform spin unlock and lock because in
* "dwc2_host_enter_hibernation()" function there is a spinlock
* logic which prevents servicing of any IRQ during entering
* hibernation.
*/
spin_unlock_irqrestore(&hsotg->lock, flags);
ret = dwc2_enter_hibernation(hsotg, 1);
if (ret)
dev_err(hsotg->dev, "enter hibernation failed.\n");
spin_lock_irqsave(&hsotg->lock, flags);
break;
case DWC2_POWER_DOWN_PARAM_NONE:
/*
* If not hibernation nor partial power down are supported,
* clock gating is used to save power.
*/
if (!hsotg->params.no_clock_gating)
dwc2_host_enter_clock_gating(hsotg);
break;
}
/* For HNP the bus must be suspended for at least 200ms */
if (dwc2_host_is_b_hnp_enabled(hsotg)) {
pcgctl = dwc2_readl(hsotg, PCGCTL);
pcgctl &= ~PCGCTL_STOPPCLK;
dwc2_writel(hsotg, pcgctl, PCGCTL);
spin_unlock_irqrestore(&hsotg->lock, flags);
msleep(200);
} else {
spin_unlock_irqrestore(&hsotg->lock, flags);
}
return ret;
}
/**
* dwc2_port_resume() - Exit controller from suspend mode for host.
*
* @hsotg: Programming view of the DWC_otg controller
*
* Return: non-zero if failed to exit suspend mode for host.
*
* This function is for exiting Host mode suspend.
* Must NOT be called with interrupt disabled or spinlock held.
*/
int dwc2_port_resume(struct dwc2_hsotg *hsotg)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&hsotg->lock, flags);
switch (hsotg->params.power_down) {
case DWC2_POWER_DOWN_PARAM_PARTIAL:
ret = dwc2_exit_partial_power_down(hsotg, 0, true);
if (ret)
dev_err(hsotg->dev,
"exit partial_power_down failed.\n");
break;
case DWC2_POWER_DOWN_PARAM_HIBERNATION:
/* Exit host hibernation. */
ret = dwc2_exit_hibernation(hsotg, 0, 0, 1);
if (ret)
dev_err(hsotg->dev, "exit hibernation failed.\n");
break;
case DWC2_POWER_DOWN_PARAM_NONE:
/*
* If not hibernation nor partial power down are supported,
* port resume is done using the clock gating programming flow.
*/
spin_unlock_irqrestore(&hsotg->lock, flags);
dwc2_host_exit_clock_gating(hsotg, 0);
spin_lock_irqsave(&hsotg->lock, flags);
break;
}
spin_unlock_irqrestore(&hsotg->lock, flags);
return ret;
}
/* Handles hub class-specific requests */
static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
u16 wvalue, u16 windex, char *buf, u16 wlength)
{
struct usb_hub_descriptor *hub_desc;
int retval = 0;
u32 hprt0;
u32 port_status;
u32 speed;
u32 pcgctl;
u32 pwr;
switch (typereq) {
case ClearHubFeature:
dev_dbg(hsotg->dev, "ClearHubFeature %1xh\n", wvalue);
switch (wvalue) {
case C_HUB_LOCAL_POWER:
case C_HUB_OVER_CURRENT:
/* Nothing required here */
break;
default:
retval = -EINVAL;
dev_err(hsotg->dev,
"ClearHubFeature request %1xh unknown\n",
wvalue);
}
break;
case ClearPortFeature:
if (wvalue != USB_PORT_FEAT_L1)
if (!windex || windex > 1)
goto error;
switch (wvalue) {
case USB_PORT_FEAT_ENABLE:
dev_dbg(hsotg->dev,
"ClearPortFeature USB_PORT_FEAT_ENABLE\n");
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 |= HPRT0_ENA;
dwc2_writel(hsotg, hprt0, HPRT0);
break;
case USB_PORT_FEAT_SUSPEND:
dev_dbg(hsotg->dev,
"ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
if (hsotg->bus_suspended)
retval = dwc2_port_resume(hsotg);
break;
case USB_PORT_FEAT_POWER:
dev_dbg(hsotg->dev,
"ClearPortFeature USB_PORT_FEAT_POWER\n");
hprt0 = dwc2_read_hprt0(hsotg);
pwr = hprt0 & HPRT0_PWR;
hprt0 &= ~HPRT0_PWR;
dwc2_writel(hsotg, hprt0, HPRT0);
if (pwr)
dwc2_vbus_supply_exit(hsotg);
break;
case USB_PORT_FEAT_INDICATOR:
dev_dbg(hsotg->dev,
"ClearPortFeature USB_PORT_FEAT_INDICATOR\n");
/* Port indicator not supported */
break;
case USB_PORT_FEAT_C_CONNECTION:
/*
* Clears driver's internal Connect Status Change flag
*/
dev_dbg(hsotg->dev,
"ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n");
hsotg->flags.b.port_connect_status_change = 0;
break;
case USB_PORT_FEAT_C_RESET:
/* Clears driver's internal Port Reset Change flag */
dev_dbg(hsotg->dev,
"ClearPortFeature USB_PORT_FEAT_C_RESET\n");
hsotg->flags.b.port_reset_change = 0;
break;
case USB_PORT_FEAT_C_ENABLE:
/*
* Clears the driver's internal Port Enable/Disable
* Change flag
*/
dev_dbg(hsotg->dev,
"ClearPortFeature USB_PORT_FEAT_C_ENABLE\n");
hsotg->flags.b.port_enable_change = 0;
break;
case USB_PORT_FEAT_C_SUSPEND:
/*
* Clears the driver's internal Port Suspend Change
* flag, which is set when resume signaling on the host
* port is complete
*/
dev_dbg(hsotg->dev,
"ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n");
hsotg->flags.b.port_suspend_change = 0;
break;
case USB_PORT_FEAT_C_PORT_L1:
dev_dbg(hsotg->dev,
"ClearPortFeature USB_PORT_FEAT_C_PORT_L1\n");
hsotg->flags.b.port_l1_change = 0;
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
dev_dbg(hsotg->dev,
"ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n");
hsotg->flags.b.port_over_current_change = 0;
break;
default:
retval = -EINVAL;
dev_err(hsotg->dev,
"ClearPortFeature request %1xh unknown or unsupported\n",
wvalue);
}
break;
case GetHubDescriptor:
dev_dbg(hsotg->dev, "GetHubDescriptor\n");
hub_desc = (struct usb_hub_descriptor *)buf;
hub_desc->bDescLength = 9;
hub_desc->bDescriptorType = USB_DT_HUB;
hub_desc->bNbrPorts = 1;
hub_desc->wHubCharacteristics =
cpu_to_le16(HUB_CHAR_COMMON_LPSM |
HUB_CHAR_INDV_PORT_OCPM);
hub_desc->bPwrOn2PwrGood = 1;
hub_desc->bHubContrCurrent = 0;
hub_desc->u.hs.DeviceRemovable[0] = 0;
hub_desc->u.hs.DeviceRemovable[1] = 0xff;
break;
case GetHubStatus:
dev_dbg(hsotg->dev, "GetHubStatus\n");
memset(buf, 0, 4);
break;
case GetPortStatus:
dev_vdbg(hsotg->dev,
"GetPortStatus wIndex=0x%04x flags=0x%08x\n", windex,
hsotg->flags.d32);
if (!windex || windex > 1)
goto error;
port_status = 0;
if (hsotg->flags.b.port_connect_status_change)
port_status |= USB_PORT_STAT_C_CONNECTION << 16;
if (hsotg->flags.b.port_enable_change)
port_status |= USB_PORT_STAT_C_ENABLE << 16;
if (hsotg->flags.b.port_suspend_change)
port_status |= USB_PORT_STAT_C_SUSPEND << 16;
if (hsotg->flags.b.port_l1_change)
port_status |= USB_PORT_STAT_C_L1 << 16;
if (hsotg->flags.b.port_reset_change)
port_status |= USB_PORT_STAT_C_RESET << 16;
if (hsotg->flags.b.port_over_current_change) {
dev_warn(hsotg->dev, "Overcurrent change detected\n");
port_status |= USB_PORT_STAT_C_OVERCURRENT << 16;
}
if (!hsotg->flags.b.port_connect_status) {
/*
* The port is disconnected, which means the core is
* either in device mode or it soon will be. Just
* return 0's for the remainder of the port status
* since the port register can't be read if the core
* is in device mode.
*/
*(__le32 *)buf = cpu_to_le32(port_status);
break;
}
hprt0 = dwc2_readl(hsotg, HPRT0);
dev_vdbg(hsotg->dev, " HPRT0: 0x%08x\n", hprt0);
if (hprt0 & HPRT0_CONNSTS)
port_status |= USB_PORT_STAT_CONNECTION;
if (hprt0 & HPRT0_ENA)
port_status |= USB_PORT_STAT_ENABLE;
if (hprt0 & HPRT0_SUSP)
port_status |= USB_PORT_STAT_SUSPEND;
if (hprt0 & HPRT0_OVRCURRACT)
port_status |= USB_PORT_STAT_OVERCURRENT;
if (hprt0 & HPRT0_RST)
port_status |= USB_PORT_STAT_RESET;
if (hprt0 & HPRT0_PWR)
port_status |= USB_PORT_STAT_POWER;
speed = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
if (speed == HPRT0_SPD_HIGH_SPEED)
port_status |= USB_PORT_STAT_HIGH_SPEED;
else if (speed == HPRT0_SPD_LOW_SPEED)
port_status |= USB_PORT_STAT_LOW_SPEED;
if (hprt0 & HPRT0_TSTCTL_MASK)
port_status |= USB_PORT_STAT_TEST;
/* USB_PORT_FEAT_INDICATOR unsupported always 0 */
if (hsotg->params.dma_desc_fs_enable) {
/*
* Enable descriptor DMA only if a full speed
* device is connected.
*/
if (hsotg->new_connection &&
((port_status &
(USB_PORT_STAT_CONNECTION |
USB_PORT_STAT_HIGH_SPEED |
USB_PORT_STAT_LOW_SPEED)) ==
USB_PORT_STAT_CONNECTION)) {
u32 hcfg;
dev_info(hsotg->dev, "Enabling descriptor DMA mode\n");
hsotg->params.dma_desc_enable = true;
hcfg = dwc2_readl(hsotg, HCFG);
hcfg |= HCFG_DESCDMA;
dwc2_writel(hsotg, hcfg, HCFG);
hsotg->new_connection = false;
}
}
dev_vdbg(hsotg->dev, "port_status=%08x\n", port_status);
*(__le32 *)buf = cpu_to_le32(port_status);
break;
case SetHubFeature:
dev_dbg(hsotg->dev, "SetHubFeature\n");
/* No HUB features supported */
break;
case SetPortFeature:
dev_dbg(hsotg->dev, "SetPortFeature\n");
if (wvalue != USB_PORT_FEAT_TEST && (!windex || windex > 1))
goto error;
if (!hsotg->flags.b.port_connect_status) {
/*
* The port is disconnected, which means the core is
* either in device mode or it soon will be. Just
* return without doing anything since the port
* register can't be written if the core is in device
* mode.
*/
break;
}
switch (wvalue) {
case USB_PORT_FEAT_SUSPEND:
dev_dbg(hsotg->dev,
"SetPortFeature - USB_PORT_FEAT_SUSPEND\n");
if (windex != hsotg->otg_port)
goto error;
if (!hsotg->bus_suspended)
retval = dwc2_port_suspend(hsotg, windex);
break;
case USB_PORT_FEAT_POWER:
dev_dbg(hsotg->dev,
"SetPortFeature - USB_PORT_FEAT_POWER\n");
hprt0 = dwc2_read_hprt0(hsotg);
pwr = hprt0 & HPRT0_PWR;
hprt0 |= HPRT0_PWR;
dwc2_writel(hsotg, hprt0, HPRT0);
if (!pwr)
dwc2_vbus_supply_init(hsotg);
break;
case USB_PORT_FEAT_RESET:
dev_dbg(hsotg->dev,
"SetPortFeature - USB_PORT_FEAT_RESET\n");
hprt0 = dwc2_read_hprt0(hsotg);
if (hsotg->hibernated) {
retval = dwc2_exit_hibernation(hsotg, 0, 1, 1);
if (retval)
dev_err(hsotg->dev,
"exit hibernation failed\n");
}
if (hsotg->in_ppd) {
retval = dwc2_exit_partial_power_down(hsotg, 1,
true);
if (retval)
dev_err(hsotg->dev,
"exit partial_power_down failed\n");
}
if (hsotg->params.power_down ==
DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
dwc2_host_exit_clock_gating(hsotg, 0);
pcgctl = dwc2_readl(hsotg, PCGCTL);
pcgctl &= ~(PCGCTL_ENBL_SLEEP_GATING | PCGCTL_STOPPCLK);
dwc2_writel(hsotg, pcgctl, PCGCTL);
/* ??? Original driver does this */
dwc2_writel(hsotg, 0, PCGCTL);
hprt0 = dwc2_read_hprt0(hsotg);
pwr = hprt0 & HPRT0_PWR;
/* Clear suspend bit if resetting from suspend state */
hprt0 &= ~HPRT0_SUSP;
/*
* When B-Host the Port reset bit is set in the Start
* HCD Callback function, so that the reset is started
* within 1ms of the HNP success interrupt
*/
if (!dwc2_hcd_is_b_host(hsotg)) {
hprt0 |= HPRT0_PWR | HPRT0_RST;
dev_dbg(hsotg->dev,
"In host mode, hprt0=%08x\n", hprt0);
dwc2_writel(hsotg, hprt0, HPRT0);
if (!pwr)
dwc2_vbus_supply_init(hsotg);
}
/* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
msleep(50);
hprt0 &= ~HPRT0_RST;
dwc2_writel(hsotg, hprt0, HPRT0);
hsotg->lx_state = DWC2_L0; /* Now back to On state */
break;
case USB_PORT_FEAT_INDICATOR:
dev_dbg(hsotg->dev,
"SetPortFeature - USB_PORT_FEAT_INDICATOR\n");
/* Not supported */
break;
case USB_PORT_FEAT_TEST:
hprt0 = dwc2_read_hprt0(hsotg);
dev_dbg(hsotg->dev,
"SetPortFeature - USB_PORT_FEAT_TEST\n");
hprt0 &= ~HPRT0_TSTCTL_MASK;
hprt0 |= (windex >> 8) << HPRT0_TSTCTL_SHIFT;
dwc2_writel(hsotg, hprt0, HPRT0);
break;
default:
retval = -EINVAL;
dev_err(hsotg->dev,
"SetPortFeature %1xh unknown or unsupported\n",
wvalue);
break;
}
break;
default:
error:
retval = -EINVAL;
dev_dbg(hsotg->dev,
"Unknown hub control request: %1xh wIndex: %1xh wValue: %1xh\n",
typereq, windex, wvalue);
break;
}
return retval;
}
static int dwc2_hcd_is_status_changed(struct dwc2_hsotg *hsotg, int port)
{
int retval;
if (port != 1)
return -EINVAL;
retval = (hsotg->flags.b.port_connect_status_change ||
hsotg->flags.b.port_reset_change ||
hsotg->flags.b.port_enable_change ||
hsotg->flags.b.port_suspend_change ||
hsotg->flags.b.port_over_current_change);
if (retval) {
dev_dbg(hsotg->dev,
"DWC OTG HCD HUB STATUS DATA: Root port status changed\n");
dev_dbg(hsotg->dev, " port_connect_status_change: %d\n",
hsotg->flags.b.port_connect_status_change);
dev_dbg(hsotg->dev, " port_reset_change: %d\n",
hsotg->flags.b.port_reset_change);
dev_dbg(hsotg->dev, " port_enable_change: %d\n",
hsotg->flags.b.port_enable_change);
dev_dbg(hsotg->dev, " port_suspend_change: %d\n",
hsotg->flags.b.port_suspend_change);
dev_dbg(hsotg->dev, " port_over_current_change: %d\n",
hsotg->flags.b.port_over_current_change);
}
return retval;
}
int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg)
{
u32 hfnum = dwc2_readl(hsotg, HFNUM);
#ifdef DWC2_DEBUG_SOF
dev_vdbg(hsotg->dev, "DWC OTG HCD GET FRAME NUMBER %d\n",
(hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT);
#endif
return (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT;
}
int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg *hsotg, int us)
{
u32 hprt = dwc2_readl(hsotg, HPRT0);
u32 hfir = dwc2_readl(hsotg, HFIR);
u32 hfnum = dwc2_readl(hsotg, HFNUM);
unsigned int us_per_frame;
unsigned int frame_number;
unsigned int remaining;
unsigned int interval;
unsigned int phy_clks;
/* High speed has 125 us per (micro) frame; others are 1 ms per */
us_per_frame = (hprt & HPRT0_SPD_MASK) ? 1000 : 125;
/* Extract fields */
frame_number = (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT;
remaining = (hfnum & HFNUM_FRREM_MASK) >> HFNUM_FRREM_SHIFT;
interval = (hfir & HFIR_FRINT_MASK) >> HFIR_FRINT_SHIFT;
/*
* Number of phy clocks since the last tick of the frame number after
* "us" has passed.
*/
phy_clks = (interval - remaining) +
DIV_ROUND_UP(interval * us, us_per_frame);
return dwc2_frame_num_inc(frame_number, phy_clks / interval);
}
int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg)
{
return hsotg->op_state == OTG_STATE_B_HOST;
}
static struct dwc2_hcd_urb *dwc2_hcd_urb_alloc(struct dwc2_hsotg *hsotg,
int iso_desc_count,
gfp_t mem_flags)
{
struct dwc2_hcd_urb *urb;
urb = kzalloc(struct_size(urb, iso_descs, iso_desc_count), mem_flags);
if (urb)
urb->packet_count = iso_desc_count;
return urb;
}
static void dwc2_hcd_urb_set_pipeinfo(struct dwc2_hsotg *hsotg,
struct dwc2_hcd_urb *urb, u8 dev_addr,
u8 ep_num, u8 ep_type, u8 ep_dir,
u16 maxp, u16 maxp_mult)
{
if (dbg_perio() ||
ep_type == USB_ENDPOINT_XFER_BULK ||
ep_type == USB_ENDPOINT_XFER_CONTROL)
dev_vdbg(hsotg->dev,
"addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, maxp=%d (%d mult)\n",
dev_addr, ep_num, ep_dir, ep_type, maxp, maxp_mult);
urb->pipe_info.dev_addr = dev_addr;
urb->pipe_info.ep_num = ep_num;
urb->pipe_info.pipe_type = ep_type;
urb->pipe_info.pipe_dir = ep_dir;
urb->pipe_info.maxp = maxp;
urb->pipe_info.maxp_mult = maxp_mult;
}
/*
* NOTE: This function will be removed once the peripheral controller code
* is integrated and the driver is stable
*/
void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg)
{
#ifdef DEBUG
struct dwc2_host_chan *chan;
struct dwc2_hcd_urb *urb;
struct dwc2_qtd *qtd;
int num_channels;
u32 np_tx_status;
u32 p_tx_status;
int i;
num_channels = hsotg->params.host_channels;
dev_dbg(hsotg->dev, "\n");
dev_dbg(hsotg->dev,
"************************************************************\n");
dev_dbg(hsotg->dev, "HCD State:\n");
dev_dbg(hsotg->dev, " Num channels: %d\n", num_channels);
for (i = 0; i < num_channels; i++) {
chan = hsotg->hc_ptr_array[i];
dev_dbg(hsotg->dev, " Channel %d:\n", i);
dev_dbg(hsotg->dev,
" dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
chan->dev_addr, chan->ep_num, chan->ep_is_in);
dev_dbg(hsotg->dev, " speed: %d\n", chan->speed);
dev_dbg(hsotg->dev, " ep_type: %d\n", chan->ep_type);
dev_dbg(hsotg->dev, " max_packet: %d\n", chan->max_packet);
dev_dbg(hsotg->dev, " data_pid_start: %d\n",
chan->data_pid_start);
dev_dbg(hsotg->dev, " multi_count: %d\n", chan->multi_count);
dev_dbg(hsotg->dev, " xfer_started: %d\n",
chan->xfer_started);
dev_dbg(hsotg->dev, " xfer_buf: %p\n", chan->xfer_buf);
dev_dbg(hsotg->dev, " xfer_dma: %08lx\n",
(unsigned long)chan->xfer_dma);
dev_dbg(hsotg->dev, " xfer_len: %d\n", chan->xfer_len);
dev_dbg(hsotg->dev, " xfer_count: %d\n", chan->xfer_count);
dev_dbg(hsotg->dev, " halt_on_queue: %d\n",
chan->halt_on_queue);
dev_dbg(hsotg->dev, " halt_pending: %d\n",
chan->halt_pending);
dev_dbg(hsotg->dev, " halt_status: %d\n", chan->halt_status);
dev_dbg(hsotg->dev, " do_split: %d\n", chan->do_split);
dev_dbg(hsotg->dev, " complete_split: %d\n",
chan->complete_split);
dev_dbg(hsotg->dev, " hub_addr: %d\n", chan->hub_addr);
dev_dbg(hsotg->dev, " hub_port: %d\n", chan->hub_port);
dev_dbg(hsotg->dev, " xact_pos: %d\n", chan->xact_pos);
dev_dbg(hsotg->dev, " requests: %d\n", chan->requests);
dev_dbg(hsotg->dev, " qh: %p\n", chan->qh);
if (chan->xfer_started) {
u32 hfnum, hcchar, hctsiz, hcint, hcintmsk;
hfnum = dwc2_readl(hsotg, HFNUM);
hcchar = dwc2_readl(hsotg, HCCHAR(i));
hctsiz = dwc2_readl(hsotg, HCTSIZ(i));
hcint = dwc2_readl(hsotg, HCINT(i));
hcintmsk = dwc2_readl(hsotg, HCINTMSK(i));
dev_dbg(hsotg->dev, " hfnum: 0x%08x\n", hfnum);
dev_dbg(hsotg->dev, " hcchar: 0x%08x\n", hcchar);
dev_dbg(hsotg->dev, " hctsiz: 0x%08x\n", hctsiz);
dev_dbg(hsotg->dev, " hcint: 0x%08x\n", hcint);
dev_dbg(hsotg->dev, " hcintmsk: 0x%08x\n", hcintmsk);
}
if (!(chan->xfer_started && chan->qh))
continue;
list_for_each_entry(qtd, &chan->qh->qtd_list, qtd_list_entry) {
if (!qtd->in_process)
break;
urb = qtd->urb;
dev_dbg(hsotg->dev, " URB Info:\n");
dev_dbg(hsotg->dev, " qtd: %p, urb: %p\n",
qtd, urb);
if (urb) {
dev_dbg(hsotg->dev,
" Dev: %d, EP: %d %s\n",
dwc2_hcd_get_dev_addr(&urb->pipe_info),
dwc2_hcd_get_ep_num(&urb->pipe_info),
dwc2_hcd_is_pipe_in(&urb->pipe_info) ?
"IN" : "OUT");
dev_dbg(hsotg->dev,
" Max packet size: %d (%d mult)\n",
dwc2_hcd_get_maxp(&urb->pipe_info),
dwc2_hcd_get_maxp_mult(&urb->pipe_info));
dev_dbg(hsotg->dev,
" transfer_buffer: %p\n",
urb->buf);
dev_dbg(hsotg->dev,
" transfer_dma: %08lx\n",
(unsigned long)urb->dma);
dev_dbg(hsotg->dev,
" transfer_buffer_length: %d\n",
urb->length);
dev_dbg(hsotg->dev, " actual_length: %d\n",
urb->actual_length);
}
}
}
dev_dbg(hsotg->dev, " non_periodic_channels: %d\n",
hsotg->non_periodic_channels);
dev_dbg(hsotg->dev, " periodic_channels: %d\n",
hsotg->periodic_channels);
dev_dbg(hsotg->dev, " periodic_usecs: %d\n", hsotg->periodic_usecs);
np_tx_status = dwc2_readl(hsotg, GNPTXSTS);
dev_dbg(hsotg->dev, " NP Tx Req Queue Space Avail: %d\n",
(np_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT);
dev_dbg(hsotg->dev, " NP Tx FIFO Space Avail: %d\n",
(np_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT);
p_tx_status = dwc2_readl(hsotg, HPTXSTS);
dev_dbg(hsotg->dev, " P Tx Req Queue Space Avail: %d\n",
(p_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT);
dev_dbg(hsotg->dev, " P Tx FIFO Space Avail: %d\n",
(p_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT);
dwc2_dump_global_registers(hsotg);
dwc2_dump_host_registers(hsotg);
dev_dbg(hsotg->dev,
"************************************************************\n");
dev_dbg(hsotg->dev, "\n");
#endif
}
struct wrapper_priv_data {
struct dwc2_hsotg *hsotg;
};
/* Gets the dwc2_hsotg from a usb_hcd */
static struct dwc2_hsotg *dwc2_hcd_to_hsotg(struct usb_hcd *hcd)
{
struct wrapper_priv_data *p;
p = (struct wrapper_priv_data *)&hcd->hcd_priv;
return p->hsotg;
}
/**
* dwc2_host_get_tt_info() - Get the dwc2_tt associated with context
*
* This will get the dwc2_tt structure (and ttport) associated with the given
* context (which is really just a struct urb pointer).
*
* The first time this is called for a given TT we allocate memory for our
* structure. When everyone is done and has called dwc2_host_put_tt_info()
* then the refcount for the structure will go to 0 and we'll free it.
*
* @hsotg: The HCD state structure for the DWC OTG controller.
* @context: The priv pointer from a struct dwc2_hcd_urb.
* @mem_flags: Flags for allocating memory.
* @ttport: We'll return this device's port number here. That's used to
* reference into the bitmap if we're on a multi_tt hub.
*
* Return: a pointer to a struct dwc2_tt. Don't forget to call
* dwc2_host_put_tt_info()! Returns NULL upon memory alloc failure.
*/
struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg, void *context,
gfp_t mem_flags, int *ttport)
{
struct urb *urb = context;
struct dwc2_tt *dwc_tt = NULL;
if (urb->dev->tt) {
*ttport = urb->dev->ttport;
dwc_tt = urb->dev->tt->hcpriv;
if (!dwc_tt) {
size_t bitmap_size;
/*
* For single_tt we need one schedule. For multi_tt
* we need one per port.
*/
bitmap_size = DWC2_ELEMENTS_PER_LS_BITMAP *
sizeof(dwc_tt->periodic_bitmaps[0]);
if (urb->dev->tt->multi)
bitmap_size *= urb->dev->tt->hub->maxchild;
dwc_tt = kzalloc(sizeof(*dwc_tt) + bitmap_size,
mem_flags);
if (!dwc_tt)
return NULL;
dwc_tt->usb_tt = urb->dev->tt;
dwc_tt->usb_tt->hcpriv = dwc_tt;
}
dwc_tt->refcount++;
}
return dwc_tt;
}
/**
* dwc2_host_put_tt_info() - Put the dwc2_tt from dwc2_host_get_tt_info()
*
* Frees resources allocated by dwc2_host_get_tt_info() if all current holders
* of the structure are done.
*
* It's OK to call this with NULL.
*
* @hsotg: The HCD state structure for the DWC OTG controller.
* @dwc_tt: The pointer returned by dwc2_host_get_tt_info.
*/
void dwc2_host_put_tt_info(struct dwc2_hsotg *hsotg, struct dwc2_tt *dwc_tt)
{
/* Model kfree and make put of NULL a no-op */
if (!dwc_tt)
return;
WARN_ON(dwc_tt->refcount < 1);
dwc_tt->refcount--;
if (!dwc_tt->refcount) {
dwc_tt->usb_tt->hcpriv = NULL;
kfree(dwc_tt);
}
}
int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context)
{
struct urb *urb = context;
return urb->dev->speed;
}
static void dwc2_allocate_bus_bandwidth(struct usb_hcd *hcd, u16 bw,
struct urb *urb)
{
struct usb_bus *bus = hcd_to_bus(hcd);
if (urb->interval)
bus->bandwidth_allocated += bw / urb->interval;
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
bus->bandwidth_isoc_reqs++;
else
bus->bandwidth_int_reqs++;
}
static void dwc2_free_bus_bandwidth(struct usb_hcd *hcd, u16 bw,
struct urb *urb)
{
struct usb_bus *bus = hcd_to_bus(hcd);
if (urb->interval)
bus->bandwidth_allocated -= bw / urb->interval;
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
bus->bandwidth_isoc_reqs--;
else
bus->bandwidth_int_reqs--;
}
/*
* Sets the final status of an URB and returns it to the upper layer. Any
* required cleanup of the URB is performed.
*
* Must be called with interrupt disabled and spinlock held
*/
void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
int status)
{
struct urb *urb;
int i;
if (!qtd) {
dev_dbg(hsotg->dev, "## %s: qtd is NULL ##\n", __func__);
return;
}
if (!qtd->urb) {
dev_dbg(hsotg->dev, "## %s: qtd->urb is NULL ##\n", __func__);
return;
}
urb = qtd->urb->priv;
if (!urb) {
dev_dbg(hsotg->dev, "## %s: urb->priv is NULL ##\n", __func__);
return;
}
urb->actual_length = dwc2_hcd_urb_get_actual_length(qtd->urb);
if (dbg_urb(urb))
dev_vdbg(hsotg->dev,
"%s: urb %p device %d ep %d-%s status %d actual %d\n",
__func__, urb, usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "IN" : "OUT", status,
urb->actual_length);
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
urb->error_count = dwc2_hcd_urb_get_error_count(qtd->urb);
for (i = 0; i < urb->number_of_packets; ++i) {
urb->iso_frame_desc[i].actual_length =
dwc2_hcd_urb_get_iso_desc_actual_length(
qtd->urb, i);
urb->iso_frame_desc[i].status =
dwc2_hcd_urb_get_iso_desc_status(qtd->urb, i);
}
}
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS && dbg_perio()) {
for (i = 0; i < urb->number_of_packets; i++)
dev_vdbg(hsotg->dev, " ISO Desc %d status %d\n",
i, urb->iso_frame_desc[i].status);
}
urb->status = status;
if (!status) {
if ((urb->transfer_flags & URB_SHORT_NOT_OK) &&
urb->actual_length < urb->transfer_buffer_length)
urb->status = -EREMOTEIO;
}
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS ||
usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
struct usb_host_endpoint *ep = urb->ep;
if (ep)
dwc2_free_bus_bandwidth(dwc2_hsotg_to_hcd(hsotg),
dwc2_hcd_get_ep_bandwidth(hsotg, ep),
urb);
}
usb_hcd_unlink_urb_from_ep(dwc2_hsotg_to_hcd(hsotg), urb);
urb->hcpriv = NULL;
kfree(qtd->urb);
qtd->urb = NULL;
usb_hcd_giveback_urb(dwc2_hsotg_to_hcd(hsotg), urb, status);
}
/*
* Work queue function for starting the HCD when A-Cable is connected
*/
static void dwc2_hcd_start_func(struct work_struct *work)
{
struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
start_work.work);
dev_dbg(hsotg->dev, "%s() %p\n", __func__, hsotg);
dwc2_host_start(hsotg);
}
/*
* Reset work queue function
*/
static void dwc2_hcd_reset_func(struct work_struct *work)
{
struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
reset_work.work);
unsigned long flags;
u32 hprt0;
dev_dbg(hsotg->dev, "USB RESET function called\n");
spin_lock_irqsave(&hsotg->lock, flags);
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 &= ~HPRT0_RST;
dwc2_writel(hsotg, hprt0, HPRT0);
hsotg->flags.b.port_reset_change = 1;
spin_unlock_irqrestore(&hsotg->lock, flags);
}
static void dwc2_hcd_phy_reset_func(struct work_struct *work)
{
struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
phy_reset_work);
int ret;
ret = phy_reset(hsotg->phy);
if (ret)
dev_warn(hsotg->dev, "PHY reset failed\n");
}
/*
* =========================================================================
* Linux HC Driver Functions
* =========================================================================
*/
/*
* Initializes the DWC_otg controller and its root hub and prepares it for host
* mode operation. Activates the root port. Returns 0 on success and a negative
* error code on failure.
*/
static int _dwc2_hcd_start(struct usb_hcd *hcd)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
struct usb_bus *bus = hcd_to_bus(hcd);
unsigned long flags;
u32 hprt0;
int ret;
dev_dbg(hsotg->dev, "DWC OTG HCD START\n");
spin_lock_irqsave(&hsotg->lock, flags);
hsotg->lx_state = DWC2_L0;
hcd->state = HC_STATE_RUNNING;
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
if (dwc2_is_device_mode(hsotg)) {
spin_unlock_irqrestore(&hsotg->lock, flags);
return 0; /* why 0 ?? */
}
dwc2_hcd_reinit(hsotg);
hprt0 = dwc2_read_hprt0(hsotg);
/* Has vbus power been turned on in dwc2_core_host_init ? */
if (hprt0 & HPRT0_PWR) {
/* Enable external vbus supply before resuming root hub */
spin_unlock_irqrestore(&hsotg->lock, flags);
ret = dwc2_vbus_supply_init(hsotg);
if (ret)
return ret;
spin_lock_irqsave(&hsotg->lock, flags);
}
/* Initialize and connect root hub if one is not already attached */
if (bus->root_hub) {
dev_dbg(hsotg->dev, "DWC OTG HCD Has Root Hub\n");
/* Inform the HUB driver to resume */
usb_hcd_resume_root_hub(hcd);
}
spin_unlock_irqrestore(&hsotg->lock, flags);
return 0;
}
/*
* Halts the DWC_otg host mode operations in a clean manner. USB transfers are
* stopped.
*/
static void _dwc2_hcd_stop(struct usb_hcd *hcd)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
unsigned long flags;
u32 hprt0;
/* Turn off all host-specific interrupts */
dwc2_disable_host_interrupts(hsotg);
/* Wait for interrupt processing to finish */
synchronize_irq(hcd->irq);
spin_lock_irqsave(&hsotg->lock, flags);
hprt0 = dwc2_read_hprt0(hsotg);
/* Ensure hcd is disconnected */
dwc2_hcd_disconnect(hsotg, true);
dwc2_hcd_stop(hsotg);
hsotg->lx_state = DWC2_L3;
hcd->state = HC_STATE_HALT;
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
spin_unlock_irqrestore(&hsotg->lock, flags);
/* keep balanced supply init/exit by checking HPRT0_PWR */
if (hprt0 & HPRT0_PWR)
dwc2_vbus_supply_exit(hsotg);
usleep_range(1000, 3000);
}
static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&hsotg->lock, flags);
if (dwc2_is_device_mode(hsotg))
goto unlock;
if (hsotg->lx_state != DWC2_L0)
goto unlock;
if (!HCD_HW_ACCESSIBLE(hcd))
goto unlock;
if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
goto unlock;
if (hsotg->bus_suspended)
goto skip_power_saving;
if (hsotg->flags.b.port_connect_status == 0)
goto skip_power_saving;
switch (hsotg->params.power_down) {
case DWC2_POWER_DOWN_PARAM_PARTIAL:
/* Enter partial_power_down */
ret = dwc2_enter_partial_power_down(hsotg);
if (ret)
dev_err(hsotg->dev,
"enter partial_power_down failed\n");
/* After entering suspend, hardware is not accessible */
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
break;
case DWC2_POWER_DOWN_PARAM_HIBERNATION:
/* Enter hibernation */
spin_unlock_irqrestore(&hsotg->lock, flags);
ret = dwc2_enter_hibernation(hsotg, 1);
if (ret)
dev_err(hsotg->dev, "enter hibernation failed\n");
spin_lock_irqsave(&hsotg->lock, flags);
/* After entering suspend, hardware is not accessible */
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
break;
case DWC2_POWER_DOWN_PARAM_NONE:
/*
* If not hibernation nor partial power down are supported,
* clock gating is used to save power.
*/
if (!hsotg->params.no_clock_gating) {
dwc2_host_enter_clock_gating(hsotg);
/* After entering suspend, hardware is not accessible */
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
}
break;
default:
goto skip_power_saving;
}
spin_unlock_irqrestore(&hsotg->lock, flags);
dwc2_vbus_supply_exit(hsotg);
spin_lock_irqsave(&hsotg->lock, flags);
/* Ask phy to be suspended */
if (!IS_ERR_OR_NULL(hsotg->uphy)) {
spin_unlock_irqrestore(&hsotg->lock, flags);
usb_phy_set_suspend(hsotg->uphy, true);
spin_lock_irqsave(&hsotg->lock, flags);
}
skip_power_saving:
hsotg->lx_state = DWC2_L2;
unlock:
spin_unlock_irqrestore(&hsotg->lock, flags);
return ret;
}
static int _dwc2_hcd_resume(struct usb_hcd *hcd)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
unsigned long flags;
u32 hprt0;
int ret = 0;
spin_lock_irqsave(&hsotg->lock, flags);
if (dwc2_is_device_mode(hsotg))
goto unlock;
if (hsotg->lx_state != DWC2_L2)
goto unlock;
hprt0 = dwc2_read_hprt0(hsotg);
/*
* Added port connection status checking which prevents exiting from
* Partial Power Down mode from _dwc2_hcd_resume() if not in Partial
* Power Down mode.
*/
if (hprt0 & HPRT0_CONNSTS) {
hsotg->lx_state = DWC2_L0;
goto unlock;
}
switch (hsotg->params.power_down) {
case DWC2_POWER_DOWN_PARAM_PARTIAL:
ret = dwc2_exit_partial_power_down(hsotg, 0, true);
if (ret)
dev_err(hsotg->dev,
"exit partial_power_down failed\n");
/*
* Set HW accessible bit before powering on the controller
* since an interrupt may rise.
*/
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
break;
case DWC2_POWER_DOWN_PARAM_HIBERNATION:
ret = dwc2_exit_hibernation(hsotg, 0, 0, 1);
if (ret)
dev_err(hsotg->dev, "exit hibernation failed.\n");
/*
* Set HW accessible bit before powering on the controller
* since an interrupt may rise.
*/
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
break;
case DWC2_POWER_DOWN_PARAM_NONE:
/*
* If not hibernation nor partial power down are supported,
* port resume is done using the clock gating programming flow.
*/
spin_unlock_irqrestore(&hsotg->lock, flags);
dwc2_host_exit_clock_gating(hsotg, 0);
/*
* Initialize the Core for Host mode, as after system resume
* the global interrupts are disabled.
*/
dwc2_core_init(hsotg, false);
dwc2_enable_global_interrupts(hsotg);
dwc2_hcd_reinit(hsotg);
spin_lock_irqsave(&hsotg->lock, flags);
/*
* Set HW accessible bit before powering on the controller
* since an interrupt may rise.
*/
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
break;
default:
hsotg->lx_state = DWC2_L0;
goto unlock;
}
/* Change Root port status, as port status change occurred after resume.*/
hsotg->flags.b.port_suspend_change = 1;
/*
* Enable power if not already done.
* This must not be spinlocked since duration
* of this call is unknown.
*/
if (!IS_ERR_OR_NULL(hsotg->uphy)) {
spin_unlock_irqrestore(&hsotg->lock, flags);
usb_phy_set_suspend(hsotg->uphy, false);
spin_lock_irqsave(&hsotg->lock, flags);
}
/* Enable external vbus supply after resuming the port. */
spin_unlock_irqrestore(&hsotg->lock, flags);
dwc2_vbus_supply_init(hsotg);
/* Wait for controller to correctly update D+/D- level */
usleep_range(3000, 5000);
spin_lock_irqsave(&hsotg->lock, flags);
/*
* Clear Port Enable and Port Status changes.
* Enable Port Power.
*/
dwc2_writel(hsotg, HPRT0_PWR | HPRT0_CONNDET |
HPRT0_ENACHG, HPRT0);
/* Wait for controller to detect Port Connect */
spin_unlock_irqrestore(&hsotg->lock, flags);
usleep_range(5000, 7000);
spin_lock_irqsave(&hsotg->lock, flags);
unlock:
spin_unlock_irqrestore(&hsotg->lock, flags);
return ret;
}
/* Returns the current frame number */
static int _dwc2_hcd_get_frame_number(struct usb_hcd *hcd)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
return dwc2_hcd_get_frame_number(hsotg);
}
static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
char *fn_name)
{
#ifdef VERBOSE_DEBUG
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
char *pipetype = NULL;
char *speed = NULL;
dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb);
dev_vdbg(hsotg->dev, " Device address: %d\n",
usb_pipedevice(urb->pipe));
dev_vdbg(hsotg->dev, " Endpoint: %d, %s\n",
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "IN" : "OUT");
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
pipetype = "CONTROL";
break;
case PIPE_BULK:
pipetype = "BULK";
break;
case PIPE_INTERRUPT:
pipetype = "INTERRUPT";
break;
case PIPE_ISOCHRONOUS:
pipetype = "ISOCHRONOUS";
break;
}
dev_vdbg(hsotg->dev, " Endpoint type: %s %s (%s)\n", pipetype,
usb_urb_dir_in(urb) ? "IN" : "OUT", usb_pipein(urb->pipe) ?
"IN" : "OUT");
switch (urb->dev->speed) {
case USB_SPEED_HIGH:
speed = "HIGH";
break;
case USB_SPEED_FULL:
speed = "FULL";
break;
case USB_SPEED_LOW:
speed = "LOW";
break;
default:
speed = "UNKNOWN";
break;
}
dev_vdbg(hsotg->dev, " Speed: %s\n", speed);
dev_vdbg(hsotg->dev, " Max packet size: %d (%d mult)\n",
usb_endpoint_maxp(&urb->ep->desc),
usb_endpoint_maxp_mult(&urb->ep->desc));
dev_vdbg(hsotg->dev, " Data buffer length: %d\n",
urb->transfer_buffer_length);
dev_vdbg(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n",
urb->transfer_buffer, (unsigned long)urb->transfer_dma);
dev_vdbg(hsotg->dev, " Setup buffer: %p, Setup DMA: %08lx\n",
urb->setup_packet, (unsigned long)urb->setup_dma);
dev_vdbg(hsotg->dev, " Interval: %d\n", urb->interval);
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
int i;
for (i = 0; i < urb->number_of_packets; i++) {
dev_vdbg(hsotg->dev, " ISO Desc %d:\n", i);
dev_vdbg(hsotg->dev, " offset: %d, length %d\n",
urb->iso_frame_desc[i].offset,
urb->iso_frame_desc[i].length);
}
}
#endif
}
/*
* Starts processing a USB transfer request specified by a USB Request Block
* (URB). mem_flags indicates the type of memory allocation to use while
* processing this URB.
*/
static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
struct usb_host_endpoint *ep = urb->ep;
struct dwc2_hcd_urb *dwc2_urb;
int i;
int retval;
int alloc_bandwidth = 0;
u8 ep_type = 0;
u32 tflags = 0;
void *buf;
unsigned long flags;
struct dwc2_qh *qh;
bool qh_allocated = false;
struct dwc2_qtd *qtd;
struct dwc2_gregs_backup *gr;
gr = &hsotg->gr_backup;
if (dbg_urb(urb)) {
dev_vdbg(hsotg->dev, "DWC OTG HCD URB Enqueue\n");
dwc2_dump_urb_info(hcd, urb, "urb_enqueue");
}
if (hsotg->hibernated) {
if (gr->gotgctl & GOTGCTL_CURMODE_HOST)
retval = dwc2_exit_hibernation(hsotg, 0, 0, 1);
else
retval = dwc2_exit_hibernation(hsotg, 0, 0, 0);
if (retval)
dev_err(hsotg->dev,
"exit hibernation failed.\n");
}
if (hsotg->in_ppd) {
retval = dwc2_exit_partial_power_down(hsotg, 0, true);
if (retval)
dev_err(hsotg->dev,
"exit partial_power_down failed\n");
}
if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE &&
hsotg->bus_suspended) {
if (dwc2_is_device_mode(hsotg))
dwc2_gadget_exit_clock_gating(hsotg, 0);
else
dwc2_host_exit_clock_gating(hsotg, 0);
}
if (!ep)
return -EINVAL;
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS ||
usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
spin_lock_irqsave(&hsotg->lock, flags);
if (!dwc2_hcd_is_bandwidth_allocated(hsotg, ep))
alloc_bandwidth = 1;
spin_unlock_irqrestore(&hsotg->lock, flags);
}
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
ep_type = USB_ENDPOINT_XFER_CONTROL;
break;
case PIPE_ISOCHRONOUS:
ep_type = USB_ENDPOINT_XFER_ISOC;
break;
case PIPE_BULK:
ep_type = USB_ENDPOINT_XFER_BULK;
break;
case PIPE_INTERRUPT:
ep_type = USB_ENDPOINT_XFER_INT;
break;
}
dwc2_urb = dwc2_hcd_urb_alloc(hsotg, urb->number_of_packets,
mem_flags);
if (!dwc2_urb)
return -ENOMEM;
dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe), ep_type,
usb_pipein(urb->pipe),
usb_endpoint_maxp(&ep->desc),
usb_endpoint_maxp_mult(&ep->desc));
buf = urb->transfer_buffer;
if (hcd_uses_dma(hcd)) {
if (!buf && (urb->transfer_dma & 3)) {
dev_err(hsotg->dev,
"%s: unaligned transfer with no transfer_buffer",
__func__);
retval = -EINVAL;
goto fail0;
}
}
if (!(urb->transfer_flags & URB_NO_INTERRUPT))
tflags |= URB_GIVEBACK_ASAP;
if (urb->transfer_flags & URB_ZERO_PACKET)
tflags |= URB_SEND_ZERO_PACKET;
dwc2_urb->priv = urb;
dwc2_urb->buf = buf;
dwc2_urb->dma = urb->transfer_dma;
dwc2_urb->length = urb->transfer_buffer_length;
dwc2_urb->setup_packet = urb->setup_packet;
dwc2_urb->setup_dma = urb->setup_dma;
dwc2_urb->flags = tflags;
dwc2_urb->interval = urb->interval;
dwc2_urb->status = -EINPROGRESS;
for (i = 0; i < urb->number_of_packets; ++i)
dwc2_hcd_urb_set_iso_desc_params(dwc2_urb, i,
urb->iso_frame_desc[i].offset,
urb->iso_frame_desc[i].length);
urb->hcpriv = dwc2_urb;
qh = (struct dwc2_qh *)ep->hcpriv;
/* Create QH for the endpoint if it doesn't exist */
if (!qh) {
qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, mem_flags);
if (!qh) {
retval = -ENOMEM;
goto fail0;
}
ep->hcpriv = qh;
qh_allocated = true;
}
qtd = kzalloc(sizeof(*qtd), mem_flags);
if (!qtd) {
retval = -ENOMEM;
goto fail1;
}
spin_lock_irqsave(&hsotg->lock, flags);
retval = usb_hcd_link_urb_to_ep(hcd, urb);
if (retval)
goto fail2;
retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd);
if (retval)
goto fail3;
if (alloc_bandwidth) {
dwc2_allocate_bus_bandwidth(hcd,
dwc2_hcd_get_ep_bandwidth(hsotg, ep),
urb);
}
spin_unlock_irqrestore(&hsotg->lock, flags);
return 0;
fail3:
dwc2_urb->priv = NULL;
usb_hcd_unlink_urb_from_ep(hcd, urb);
if (qh_allocated && qh->channel && qh->channel->qh == qh)
qh->channel->qh = NULL;
fail2:
spin_unlock_irqrestore(&hsotg->lock, flags);
urb->hcpriv = NULL;
kfree(qtd);
fail1:
if (qh_allocated) {
struct dwc2_qtd *qtd2, *qtd2_tmp;
ep->hcpriv = NULL;
dwc2_hcd_qh_unlink(hsotg, qh);
/* Free each QTD in the QH's QTD list */
list_for_each_entry_safe(qtd2, qtd2_tmp, &qh->qtd_list,
qtd_list_entry)
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh);
dwc2_hcd_qh_free(hsotg, qh);
}
fail0:
kfree(dwc2_urb);
return retval;
}
/*
* Aborts/cancels a USB transfer request. Always returns 0 to indicate success.
*/
static int _dwc2_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
int status)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
int rc;
unsigned long flags;
dev_dbg(hsotg->dev, "DWC OTG HCD URB Dequeue\n");
dwc2_dump_urb_info(hcd, urb, "urb_dequeue");
spin_lock_irqsave(&hsotg->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc)
goto out;
if (!urb->hcpriv) {
dev_dbg(hsotg->dev, "## urb->hcpriv is NULL ##\n");
goto out;
}
rc = dwc2_hcd_urb_dequeue(hsotg, urb->hcpriv);
usb_hcd_unlink_urb_from_ep(hcd, urb);
kfree(urb->hcpriv);
urb->hcpriv = NULL;
/* Higher layer software sets URB status */
spin_unlock(&hsotg->lock);
usb_hcd_giveback_urb(hcd, urb, status);
spin_lock(&hsotg->lock);
dev_dbg(hsotg->dev, "Called usb_hcd_giveback_urb()\n");
dev_dbg(hsotg->dev, " urb->status = %d\n", urb->status);
out:
spin_unlock_irqrestore(&hsotg->lock, flags);
return rc;
}
/*
* Frees resources in the DWC_otg controller related to a given endpoint. Also
* clears state in the HCD related to the endpoint. Any URBs for the endpoint
* must already be dequeued.
*/
static void _dwc2_hcd_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
dev_dbg(hsotg->dev,
"DWC OTG HCD EP DISABLE: bEndpointAddress=0x%02x, ep->hcpriv=%p\n",
ep->desc.bEndpointAddress, ep->hcpriv);
dwc2_hcd_endpoint_disable(hsotg, ep, 250);
}
/*
* Resets endpoint specific parameter values, in current version used to reset
* the data toggle (as a WA). This function can be called from usb_clear_halt
* routine.
*/
static void _dwc2_hcd_endpoint_reset(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
unsigned long flags;
dev_dbg(hsotg->dev,
"DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n",
ep->desc.bEndpointAddress);
spin_lock_irqsave(&hsotg->lock, flags);
dwc2_hcd_endpoint_reset(hsotg, ep);
spin_unlock_irqrestore(&hsotg->lock, flags);
}
/*
* Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if
* there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid
* interrupt.
*
* This function is called by the USB core when an interrupt occurs
*/
static irqreturn_t _dwc2_hcd_irq(struct usb_hcd *hcd)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
return dwc2_handle_hcd_intr(hsotg);
}
/*
* Creates Status Change bitmap for the root hub and root port. The bitmap is
* returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1
* is the status change indicator for the single root port. Returns 1 if either
* change indicator is 1, otherwise returns 0.
*/
static int _dwc2_hcd_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
buf[0] = dwc2_hcd_is_status_changed(hsotg, 1) << 1;
return buf[0] != 0;
}
/* Handles hub class-specific requests */
static int _dwc2_hcd_hub_control(struct usb_hcd *hcd, u16 typereq, u16 wvalue,
u16 windex, char *buf, u16 wlength)
{
int retval = dwc2_hcd_hub_control(dwc2_hcd_to_hsotg(hcd), typereq,
wvalue, windex, buf, wlength);
return retval;
}
/* Handles hub TT buffer clear completions */
static void _dwc2_hcd_clear_tt_buffer_complete(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
struct dwc2_qh *qh;
unsigned long flags;
qh = ep->hcpriv;
if (!qh)
return;
spin_lock_irqsave(&hsotg->lock, flags);
qh->tt_buffer_dirty = 0;
if (hsotg->flags.b.port_connect_status)
dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_ALL);
spin_unlock_irqrestore(&hsotg->lock, flags);
}
/*
* HPRT0_SPD_HIGH_SPEED: high speed
* HPRT0_SPD_FULL_SPEED: full speed
*/
static void dwc2_change_bus_speed(struct usb_hcd *hcd, int speed)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
if (hsotg->params.speed == speed)
return;
hsotg->params.speed = speed;
queue_work(hsotg->wq_otg, &hsotg->wf_otg);
}
static void dwc2_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
if (!hsotg->params.change_speed_quirk)
return;
/*
* On removal, set speed to default high-speed.
*/
if (udev->parent && udev->parent->speed > USB_SPEED_UNKNOWN &&
udev->parent->speed < USB_SPEED_HIGH) {
dev_info(hsotg->dev, "Set speed to default high-speed\n");
dwc2_change_bus_speed(hcd, HPRT0_SPD_HIGH_SPEED);
}
}
static int dwc2_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
if (!hsotg->params.change_speed_quirk)
return 0;
if (udev->speed == USB_SPEED_HIGH) {
dev_info(hsotg->dev, "Set speed to high-speed\n");
dwc2_change_bus_speed(hcd, HPRT0_SPD_HIGH_SPEED);
} else if ((udev->speed == USB_SPEED_FULL ||
udev->speed == USB_SPEED_LOW)) {
/*
* Change speed setting to full-speed if there's
* a full-speed or low-speed device plugged in.
*/
dev_info(hsotg->dev, "Set speed to full-speed\n");
dwc2_change_bus_speed(hcd, HPRT0_SPD_FULL_SPEED);
}
return 0;
}
static struct hc_driver dwc2_hc_driver = {
.description = "dwc2_hsotg",
.product_desc = "DWC OTG Controller",
.hcd_priv_size = sizeof(struct wrapper_priv_data),
.irq = _dwc2_hcd_irq,
.flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
.start = _dwc2_hcd_start,
.stop = _dwc2_hcd_stop,
.urb_enqueue = _dwc2_hcd_urb_enqueue,
.urb_dequeue = _dwc2_hcd_urb_dequeue,
.endpoint_disable = _dwc2_hcd_endpoint_disable,
.endpoint_reset = _dwc2_hcd_endpoint_reset,
.get_frame_number = _dwc2_hcd_get_frame_number,
.hub_status_data = _dwc2_hcd_hub_status_data,
.hub_control = _dwc2_hcd_hub_control,
.clear_tt_buffer_complete = _dwc2_hcd_clear_tt_buffer_complete,
.bus_suspend = _dwc2_hcd_suspend,
.bus_resume = _dwc2_hcd_resume,
.map_urb_for_dma = dwc2_map_urb_for_dma,
.unmap_urb_for_dma = dwc2_unmap_urb_for_dma,
};
/*
* Frees secondary storage associated with the dwc2_hsotg structure contained
* in the struct usb_hcd field
*/
static void dwc2_hcd_free(struct dwc2_hsotg *hsotg)
{
u32 ahbcfg;
u32 dctl;
int i;
dev_dbg(hsotg->dev, "DWC OTG HCD FREE\n");
/* Free memory for QH/QTD lists */
dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_inactive);
dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_waiting);
dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_active);
dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_inactive);
dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_ready);
dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_assigned);
dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_queued);
/* Free memory for the host channels */
for (i = 0; i < MAX_EPS_CHANNELS; i++) {
struct dwc2_host_chan *chan = hsotg->hc_ptr_array[i];
if (chan) {
dev_dbg(hsotg->dev, "HCD Free channel #%i, chan=%p\n",
i, chan);
hsotg->hc_ptr_array[i] = NULL;
kfree(chan);
}
}
if (hsotg->params.host_dma) {
if (hsotg->status_buf) {
dma_free_coherent(hsotg->dev, DWC2_HCD_STATUS_BUF_SIZE,
hsotg->status_buf,
hsotg->status_buf_dma);
hsotg->status_buf = NULL;
}
} else {
kfree(hsotg->status_buf);
hsotg->status_buf = NULL;
}
ahbcfg = dwc2_readl(hsotg, GAHBCFG);
/* Disable all interrupts */
ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
dwc2_writel(hsotg, ahbcfg, GAHBCFG);
dwc2_writel(hsotg, 0, GINTMSK);
if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_3_00a) {
dctl = dwc2_readl(hsotg, DCTL);
dctl |= DCTL_SFTDISCON;
dwc2_writel(hsotg, dctl, DCTL);
}
if (hsotg->wq_otg) {
if (!cancel_work_sync(&hsotg->wf_otg))
flush_workqueue(hsotg->wq_otg);
destroy_workqueue(hsotg->wq_otg);
}
cancel_work_sync(&hsotg->phy_reset_work);
del_timer(&hsotg->wkp_timer);
}
static void dwc2_hcd_release(struct dwc2_hsotg *hsotg)
{
/* Turn off all host-specific interrupts */
dwc2_disable_host_interrupts(hsotg);
dwc2_hcd_free(hsotg);
}
/*
* Initializes the HCD. This function allocates memory for and initializes the
* static parts of the usb_hcd and dwc2_hsotg structures. It also registers the
* USB bus with the core and calls the hc_driver->start() function. It returns
* a negative error on failure.
*/
int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
{
struct platform_device *pdev = to_platform_device(hsotg->dev);
struct resource *res;
struct usb_hcd *hcd;
struct dwc2_host_chan *channel;
u32 hcfg;
int i, num_channels;
int retval;
if (usb_disabled())
return -ENODEV;
dev_dbg(hsotg->dev, "DWC OTG HCD INIT\n");
retval = -ENOMEM;
hcfg = dwc2_readl(hsotg, HCFG);
dev_dbg(hsotg->dev, "hcfg=%08x\n", hcfg);
#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
hsotg->frame_num_array = kcalloc(FRAME_NUM_ARRAY_SIZE,
sizeof(*hsotg->frame_num_array),
GFP_KERNEL);
if (!hsotg->frame_num_array)
goto error1;
hsotg->last_frame_num_array =
kcalloc(FRAME_NUM_ARRAY_SIZE,
sizeof(*hsotg->last_frame_num_array), GFP_KERNEL);
if (!hsotg->last_frame_num_array)
goto error1;
#endif
hsotg->last_frame_num = HFNUM_MAX_FRNUM;
/* Check if the bus driver or platform code has setup a dma_mask */
if (hsotg->params.host_dma &&
!hsotg->dev->dma_mask) {
dev_warn(hsotg->dev,
"dma_mask not set, disabling DMA\n");
hsotg->params.host_dma = false;
hsotg->params.dma_desc_enable = false;
}
/* Set device flags indicating whether the HCD supports DMA */
if (hsotg->params.host_dma) {
if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
dev_warn(hsotg->dev, "can't set DMA mask\n");
if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
dev_warn(hsotg->dev, "can't set coherent DMA mask\n");
}
if (hsotg->params.change_speed_quirk) {
dwc2_hc_driver.free_dev = dwc2_free_dev;
dwc2_hc_driver.reset_device = dwc2_reset_device;
}
if (hsotg->params.host_dma)
dwc2_hc_driver.flags |= HCD_DMA;
hcd = usb_create_hcd(&dwc2_hc_driver, hsotg->dev, dev_name(hsotg->dev));
if (!hcd)
goto error1;
hcd->has_tt = 1;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
retval = -EINVAL;
goto error2;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
((struct wrapper_priv_data *)&hcd->hcd_priv)->hsotg = hsotg;
hsotg->priv = hcd;
/*
* Disable the global interrupt until all the interrupt handlers are
* installed
*/
dwc2_disable_global_interrupts(hsotg);
/* Initialize the DWC_otg core, and select the Phy type */
retval = dwc2_core_init(hsotg, true);
if (retval)
goto error2;
/* Create new workqueue and init work */
retval = -ENOMEM;
hsotg->wq_otg = alloc_ordered_workqueue("dwc2", 0);
if (!hsotg->wq_otg) {
dev_err(hsotg->dev, "Failed to create workqueue\n");
goto error2;
}
INIT_WORK(&hsotg->wf_otg, dwc2_conn_id_status_change);
timer_setup(&hsotg->wkp_timer, dwc2_wakeup_detected, 0);
/* Initialize the non-periodic schedule */
INIT_LIST_HEAD(&hsotg->non_periodic_sched_inactive);
INIT_LIST_HEAD(&hsotg->non_periodic_sched_waiting);
INIT_LIST_HEAD(&hsotg->non_periodic_sched_active);
/* Initialize the periodic schedule */
INIT_LIST_HEAD(&hsotg->periodic_sched_inactive);
INIT_LIST_HEAD(&hsotg->periodic_sched_ready);
INIT_LIST_HEAD(&hsotg->periodic_sched_assigned);
INIT_LIST_HEAD(&hsotg->periodic_sched_queued);
INIT_LIST_HEAD(&hsotg->split_order);
/*
* Create a host channel descriptor for each host channel implemented
* in the controller. Initialize the channel descriptor array.
*/
INIT_LIST_HEAD(&hsotg->free_hc_list);
num_channels = hsotg->params.host_channels;
memset(&hsotg->hc_ptr_array[0], 0, sizeof(hsotg->hc_ptr_array));
for (i = 0; i < num_channels; i++) {
channel = kzalloc(sizeof(*channel), GFP_KERNEL);
if (!channel)
goto error3;
channel->hc_num = i;
INIT_LIST_HEAD(&channel->split_order_list_entry);
hsotg->hc_ptr_array[i] = channel;
}
/* Initialize work */
INIT_DELAYED_WORK(&hsotg->start_work, dwc2_hcd_start_func);
INIT_DELAYED_WORK(&hsotg->reset_work, dwc2_hcd_reset_func);
INIT_WORK(&hsotg->phy_reset_work, dwc2_hcd_phy_reset_func);
/*
* Allocate space for storing data on status transactions. Normally no
* data is sent, but this space acts as a bit bucket. This must be
* done after usb_add_hcd since that function allocates the DMA buffer
* pool.
*/
if (hsotg->params.host_dma)
hsotg->status_buf = dma_alloc_coherent(hsotg->dev,
DWC2_HCD_STATUS_BUF_SIZE,
&hsotg->status_buf_dma, GFP_KERNEL);
else
hsotg->status_buf = kzalloc(DWC2_HCD_STATUS_BUF_SIZE,
GFP_KERNEL);
if (!hsotg->status_buf)
goto error3;
/*
* Create kmem caches to handle descriptor buffers in descriptor
* DMA mode.
* Alignment must be set to 512 bytes.
*/
if (hsotg->params.dma_desc_enable ||
hsotg->params.dma_desc_fs_enable) {
hsotg->desc_gen_cache = kmem_cache_create("dwc2-gen-desc",
sizeof(struct dwc2_dma_desc) *
MAX_DMA_DESC_NUM_GENERIC, 512, SLAB_CACHE_DMA,
NULL);
if (!hsotg->desc_gen_cache) {
dev_err(hsotg->dev,
"unable to create dwc2 generic desc cache\n");
/*
* Disable descriptor dma mode since it will not be
* usable.
*/
hsotg->params.dma_desc_enable = false;
hsotg->params.dma_desc_fs_enable = false;
}
hsotg->desc_hsisoc_cache = kmem_cache_create("dwc2-hsisoc-desc",
sizeof(struct dwc2_dma_desc) *
MAX_DMA_DESC_NUM_HS_ISOC, 512, 0, NULL);
if (!hsotg->desc_hsisoc_cache) {
dev_err(hsotg->dev,
"unable to create dwc2 hs isoc desc cache\n");
kmem_cache_destroy(hsotg->desc_gen_cache);
/*
* Disable descriptor dma mode since it will not be
* usable.
*/
hsotg->params.dma_desc_enable = false;
hsotg->params.dma_desc_fs_enable = false;
}
}
if (hsotg->params.host_dma) {
/*
* Create kmem caches to handle non-aligned buffer
* in Buffer DMA mode.
*/
hsotg->unaligned_cache = kmem_cache_create("dwc2-unaligned-dma",
DWC2_KMEM_UNALIGNED_BUF_SIZE, 4,
SLAB_CACHE_DMA, NULL);
if (!hsotg->unaligned_cache)
dev_err(hsotg->dev,
"unable to create dwc2 unaligned cache\n");
}
hsotg->otg_port = 1;
hsotg->frame_list = NULL;
hsotg->frame_list_dma = 0;
hsotg->periodic_qh_count = 0;
/* Initiate lx_state to L3 disconnected state */
hsotg->lx_state = DWC2_L3;
hcd->self.otg_port = hsotg->otg_port;
/* Don't support SG list at this point */
hcd->self.sg_tablesize = 0;
hcd->tpl_support = of_usb_host_tpl_support(hsotg->dev->of_node);
if (!IS_ERR_OR_NULL(hsotg->uphy))
otg_set_host(hsotg->uphy->otg, &hcd->self);
/*
* Finish generic HCD initialization and start the HCD. This function
* allocates the DMA buffer pool, registers the USB bus, requests the
* IRQ line, and calls hcd_start method.
*/
retval = usb_add_hcd(hcd, hsotg->irq, IRQF_SHARED);
if (retval < 0)
goto error4;
device_wakeup_enable(hcd->self.controller);
dwc2_hcd_dump_state(hsotg);
dwc2_enable_global_interrupts(hsotg);
return 0;
error4:
kmem_cache_destroy(hsotg->unaligned_cache);
kmem_cache_destroy(hsotg->desc_hsisoc_cache);
kmem_cache_destroy(hsotg->desc_gen_cache);
error3:
dwc2_hcd_release(hsotg);
error2:
usb_put_hcd(hcd);
error1:
#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
kfree(hsotg->last_frame_num_array);
kfree(hsotg->frame_num_array);
#endif
dev_err(hsotg->dev, "%s() FAILED, returning %d\n", __func__, retval);
return retval;
}
/*
* Removes the HCD.
* Frees memory and resources associated with the HCD and deregisters the bus.
*/
void dwc2_hcd_remove(struct dwc2_hsotg *hsotg)
{
struct usb_hcd *hcd;
dev_dbg(hsotg->dev, "DWC OTG HCD REMOVE\n");
hcd = dwc2_hsotg_to_hcd(hsotg);
dev_dbg(hsotg->dev, "hsotg->hcd = %p\n", hcd);
if (!hcd) {
dev_dbg(hsotg->dev, "%s: dwc2_hsotg_to_hcd(hsotg) NULL!\n",
__func__);
return;
}
if (!IS_ERR_OR_NULL(hsotg->uphy))
otg_set_host(hsotg->uphy->otg, NULL);
usb_remove_hcd(hcd);
hsotg->priv = NULL;
kmem_cache_destroy(hsotg->unaligned_cache);
kmem_cache_destroy(hsotg->desc_hsisoc_cache);
kmem_cache_destroy(hsotg->desc_gen_cache);
dwc2_hcd_release(hsotg);
usb_put_hcd(hcd);
#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
kfree(hsotg->last_frame_num_array);
kfree(hsotg->frame_num_array);
#endif
}
/**
* dwc2_backup_host_registers() - Backup controller host registers.
* When suspending usb bus, registers needs to be backuped
* if controller power is disabled once suspended.
*
* @hsotg: Programming view of the DWC_otg controller
*/
int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
{
struct dwc2_hregs_backup *hr;
int i;
dev_dbg(hsotg->dev, "%s\n", __func__);
/* Backup Host regs */
hr = &hsotg->hr_backup;
hr->hcfg = dwc2_readl(hsotg, HCFG);
hr->haintmsk = dwc2_readl(hsotg, HAINTMSK);
for (i = 0; i < hsotg->params.host_channels; ++i)
hr->hcintmsk[i] = dwc2_readl(hsotg, HCINTMSK(i));
hr->hprt0 = dwc2_read_hprt0(hsotg);
hr->hfir = dwc2_readl(hsotg, HFIR);
hr->hptxfsiz = dwc2_readl(hsotg, HPTXFSIZ);
hr->valid = true;
return 0;
}
/**
* dwc2_restore_host_registers() - Restore controller host registers.
* When resuming usb bus, device registers needs to be restored
* if controller power were disabled.
*
* @hsotg: Programming view of the DWC_otg controller
*/
int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
{
struct dwc2_hregs_backup *hr;
int i;
dev_dbg(hsotg->dev, "%s\n", __func__);
/* Restore host regs */
hr = &hsotg->hr_backup;
if (!hr->valid) {
dev_err(hsotg->dev, "%s: no host registers to restore\n",
__func__);
return -EINVAL;
}
hr->valid = false;
dwc2_writel(hsotg, hr->hcfg, HCFG);
dwc2_writel(hsotg, hr->haintmsk, HAINTMSK);
for (i = 0; i < hsotg->params.host_channels; ++i)
dwc2_writel(hsotg, hr->hcintmsk[i], HCINTMSK(i));
dwc2_writel(hsotg, hr->hprt0, HPRT0);
dwc2_writel(hsotg, hr->hfir, HFIR);
dwc2_writel(hsotg, hr->hptxfsiz, HPTXFSIZ);
hsotg->frame_number = 0;
return 0;
}
/**
* dwc2_host_enter_hibernation() - Put controller in Hibernation.
*
* @hsotg: Programming view of the DWC_otg controller
*/
int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
{
unsigned long flags;
int ret = 0;
u32 hprt0;
u32 pcgcctl;
u32 gusbcfg;
u32 gpwrdn;
dev_dbg(hsotg->dev, "Preparing host for hibernation\n");
ret = dwc2_backup_global_registers(hsotg);
if (ret) {
dev_err(hsotg->dev, "%s: failed to backup global registers\n",
__func__);
return ret;
}
ret = dwc2_backup_host_registers(hsotg);
if (ret) {
dev_err(hsotg->dev, "%s: failed to backup host registers\n",
__func__);
return ret;
}
/* Enter USB Suspend Mode */
hprt0 = dwc2_readl(hsotg, HPRT0);
hprt0 |= HPRT0_SUSP;
hprt0 &= ~HPRT0_ENA;
dwc2_writel(hsotg, hprt0, HPRT0);
/* Wait for the HPRT0.PrtSusp register field to be set */
if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 5000))
dev_warn(hsotg->dev, "Suspend wasn't generated\n");
/*
* We need to disable interrupts to prevent servicing of any IRQ
* during going to hibernation
*/
spin_lock_irqsave(&hsotg->lock, flags);
hsotg->lx_state = DWC2_L2;
gusbcfg = dwc2_readl(hsotg, GUSBCFG);
if (gusbcfg & GUSBCFG_ULPI_UTMI_SEL) {
/* ULPI interface */
/* Suspend the Phy Clock */
pcgcctl = dwc2_readl(hsotg, PCGCTL);
pcgcctl |= PCGCTL_STOPPCLK;
dwc2_writel(hsotg, pcgcctl, PCGCTL);
udelay(10);
gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_PMUACTV;
dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
} else {
/* UTMI+ Interface */
gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_PMUACTV;
dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
pcgcctl = dwc2_readl(hsotg, PCGCTL);
pcgcctl |= PCGCTL_STOPPCLK;
dwc2_writel(hsotg, pcgcctl, PCGCTL);
udelay(10);
}
/* Enable interrupts from wake up logic */
gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_PMUINTSEL;
dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Unmask host mode interrupts in GPWRDN */
gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_DISCONN_DET_MSK;
gpwrdn |= GPWRDN_LNSTSCHG_MSK;
gpwrdn |= GPWRDN_STS_CHGINT_MSK;
dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Enable Power Down Clamp */
gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_PWRDNCLMP;
dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Switch off VDD */
gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_PWRDNSWTCH;
dwc2_writel(hsotg, gpwrdn, GPWRDN);
hsotg->hibernated = 1;
hsotg->bus_suspended = 1;
dev_dbg(hsotg->dev, "Host hibernation completed\n");
spin_unlock_irqrestore(&hsotg->lock, flags);
return ret;
}
/*
* dwc2_host_exit_hibernation()
*
* @hsotg: Programming view of the DWC_otg controller
* @rem_wakeup: indicates whether resume is initiated by Device or Host.
* @param reset: indicates whether resume is initiated by Reset.
*
* Return: non-zero if failed to enter to hibernation.
*
* This function is for exiting from Host mode hibernation by
* Host Initiated Resume/Reset and Device Initiated Remote-Wakeup.
*/
int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
int reset)
{
u32 gpwrdn;
u32 hprt0;
int ret = 0;
struct dwc2_gregs_backup *gr;
struct dwc2_hregs_backup *hr;
gr = &hsotg->gr_backup;
hr = &hsotg->hr_backup;
dev_dbg(hsotg->dev,
"%s: called with rem_wakeup = %d reset = %d\n",
__func__, rem_wakeup, reset);
dwc2_hib_restore_common(hsotg, rem_wakeup, 1);
hsotg->hibernated = 0;
/*
* This step is not described in functional spec but if not wait for
* this delay, mismatch interrupts occurred because just after restore
* core is in Device mode(gintsts.curmode == 0)
*/
mdelay(100);
/* Clear all pending interupts */
dwc2_writel(hsotg, 0xffffffff, GINTSTS);
/* De-assert Restore */
gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn &= ~GPWRDN_RESTORE;
dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Restore GUSBCFG, HCFG */
dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
dwc2_writel(hsotg, hr->hcfg, HCFG);
/* De-assert Wakeup Logic */
gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn &= ~GPWRDN_PMUACTV;
dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
hprt0 = hr->hprt0;
hprt0 |= HPRT0_PWR;
hprt0 &= ~HPRT0_ENA;
hprt0 &= ~HPRT0_SUSP;
dwc2_writel(hsotg, hprt0, HPRT0);
hprt0 = hr->hprt0;
hprt0 |= HPRT0_PWR;
hprt0 &= ~HPRT0_ENA;
hprt0 &= ~HPRT0_SUSP;
if (reset) {
hprt0 |= HPRT0_RST;
dwc2_writel(hsotg, hprt0, HPRT0);
/* Wait for Resume time and then program HPRT again */
mdelay(60);
hprt0 &= ~HPRT0_RST;
dwc2_writel(hsotg, hprt0, HPRT0);
} else {
hprt0 |= HPRT0_RES;
dwc2_writel(hsotg, hprt0, HPRT0);
/* Wait for Resume time and then program HPRT again */
mdelay(100);
hprt0 &= ~HPRT0_RES;
dwc2_writel(hsotg, hprt0, HPRT0);
}
/* Clear all interrupt status */
hprt0 = dwc2_readl(hsotg, HPRT0);
hprt0 |= HPRT0_CONNDET;
hprt0 |= HPRT0_ENACHG;
hprt0 &= ~HPRT0_ENA;
dwc2_writel(hsotg, hprt0, HPRT0);
hprt0 = dwc2_readl(hsotg, HPRT0);
/* Clear all pending interupts */
dwc2_writel(hsotg, 0xffffffff, GINTSTS);
/* Restore global registers */
ret = dwc2_restore_global_registers(hsotg);
if (ret) {
dev_err(hsotg->dev, "%s: failed to restore registers\n",
__func__);
return ret;
}
/* Restore host registers */
ret = dwc2_restore_host_registers(hsotg);
if (ret) {
dev_err(hsotg->dev, "%s: failed to restore host registers\n",
__func__);
return ret;
}
if (rem_wakeup) {
dwc2_hcd_rem_wakeup(hsotg);
/*
* Change "port_connect_status_change" flag to re-enumerate,
* because after exit from hibernation port connection status
* is not detected.
*/
hsotg->flags.b.port_connect_status_change = 1;
}
hsotg->hibernated = 0;
hsotg->bus_suspended = 0;
hsotg->lx_state = DWC2_L0;
dev_dbg(hsotg->dev, "Host hibernation restore complete\n");
return ret;
}
bool dwc2_host_can_poweroff_phy(struct dwc2_hsotg *dwc2)
{
struct usb_device *root_hub = dwc2_hsotg_to_hcd(dwc2)->self.root_hub;
/* If the controller isn't allowed to wakeup then we can power off. */
if (!device_may_wakeup(dwc2->dev))
return true;
/*
* We don't want to power off the PHY if something under the
* root hub has wakeup enabled.
*/
if (usb_wakeup_enabled_descendants(root_hub))
return false;
/* No reason to keep the PHY powered, so allow poweroff */
return true;
}
/**
* dwc2_host_enter_partial_power_down() - Put controller in partial
* power down.
*
* @hsotg: Programming view of the DWC_otg controller
*
* Return: non-zero if failed to enter host partial power down.
*
* This function is for entering Host mode partial power down.
*/
int dwc2_host_enter_partial_power_down(struct dwc2_hsotg *hsotg)
{
u32 pcgcctl;
u32 hprt0;
int ret = 0;
dev_dbg(hsotg->dev, "Entering host partial power down started.\n");
/* Put this port in suspend mode. */
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 |= HPRT0_SUSP;
dwc2_writel(hsotg, hprt0, HPRT0);
udelay(5);
/* Wait for the HPRT0.PrtSusp register field to be set */
if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000))
dev_warn(hsotg->dev, "Suspend wasn't generated\n");
/* Backup all registers */
ret = dwc2_backup_global_registers(hsotg);
if (ret) {
dev_err(hsotg->dev, "%s: failed to backup global registers\n",
__func__);
return ret;
}
ret = dwc2_backup_host_registers(hsotg);
if (ret) {
dev_err(hsotg->dev, "%s: failed to backup host registers\n",
__func__);
return ret;
}
/*
* Clear any pending interrupts since dwc2 will not be able to
* clear them after entering partial_power_down.
*/
dwc2_writel(hsotg, 0xffffffff, GINTSTS);
/* Put the controller in low power state */
pcgcctl = dwc2_readl(hsotg, PCGCTL);
pcgcctl |= PCGCTL_PWRCLMP;
dwc2_writel(hsotg, pcgcctl, PCGCTL);
udelay(5);
pcgcctl |= PCGCTL_RSTPDWNMODULE;
dwc2_writel(hsotg, pcgcctl, PCGCTL);
udelay(5);
pcgcctl |= PCGCTL_STOPPCLK;
dwc2_writel(hsotg, pcgcctl, PCGCTL);
/* Set in_ppd flag to 1 as here core enters suspend. */
hsotg->in_ppd = 1;
hsotg->lx_state = DWC2_L2;
hsotg->bus_suspended = true;
dev_dbg(hsotg->dev, "Entering host partial power down completed.\n");
return ret;
}
/*
* dwc2_host_exit_partial_power_down() - Exit controller from host partial
* power down.
*
* @hsotg: Programming view of the DWC_otg controller
* @rem_wakeup: indicates whether resume is initiated by Reset.
* @restore: indicates whether need to restore the registers or not.
*
* Return: non-zero if failed to exit host partial power down.
*
* This function is for exiting from Host mode partial power down.
*/
int dwc2_host_exit_partial_power_down(struct dwc2_hsotg *hsotg,
int rem_wakeup, bool restore)
{
u32 pcgcctl;
int ret = 0;
u32 hprt0;
dev_dbg(hsotg->dev, "Exiting host partial power down started.\n");
pcgcctl = dwc2_readl(hsotg, PCGCTL);
pcgcctl &= ~PCGCTL_STOPPCLK;
dwc2_writel(hsotg, pcgcctl, PCGCTL);
udelay(5);
pcgcctl = dwc2_readl(hsotg, PCGCTL);
pcgcctl &= ~PCGCTL_PWRCLMP;
dwc2_writel(hsotg, pcgcctl, PCGCTL);
udelay(5);
pcgcctl = dwc2_readl(hsotg, PCGCTL);
pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
dwc2_writel(hsotg, pcgcctl, PCGCTL);
udelay(100);
if (restore) {
ret = dwc2_restore_global_registers(hsotg);
if (ret) {
dev_err(hsotg->dev, "%s: failed to restore registers\n",
__func__);
return ret;
}
ret = dwc2_restore_host_registers(hsotg);
if (ret) {
dev_err(hsotg->dev, "%s: failed to restore host registers\n",
__func__);
return ret;
}
}
/* Drive resume signaling and exit suspend mode on the port. */
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 |= HPRT0_RES;
hprt0 &= ~HPRT0_SUSP;
dwc2_writel(hsotg, hprt0, HPRT0);
udelay(5);
if (!rem_wakeup) {
/* Stop driveing resume signaling on the port. */
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 &= ~HPRT0_RES;
dwc2_writel(hsotg, hprt0, HPRT0);
hsotg->bus_suspended = false;
} else {
/* Turn on the port power bit. */
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 |= HPRT0_PWR;
dwc2_writel(hsotg, hprt0, HPRT0);
/* Connect hcd. */
dwc2_hcd_connect(hsotg);
mod_timer(&hsotg->wkp_timer,
jiffies + msecs_to_jiffies(71));
}
/* Set lx_state to and in_ppd to 0 as here core exits from suspend. */
hsotg->in_ppd = 0;
hsotg->lx_state = DWC2_L0;
dev_dbg(hsotg->dev, "Exiting host partial power down completed.\n");
return ret;
}
/**
* dwc2_host_enter_clock_gating() - Put controller in clock gating.
*
* @hsotg: Programming view of the DWC_otg controller
*
* This function is for entering Host mode clock gating.
*/
void dwc2_host_enter_clock_gating(struct dwc2_hsotg *hsotg)
{
u32 hprt0;
u32 pcgctl;
dev_dbg(hsotg->dev, "Entering host clock gating.\n");
/* Put this port in suspend mode. */
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 |= HPRT0_SUSP;
dwc2_writel(hsotg, hprt0, HPRT0);
/* Set the Phy Clock bit as suspend is received. */
pcgctl = dwc2_readl(hsotg, PCGCTL);
pcgctl |= PCGCTL_STOPPCLK;
dwc2_writel(hsotg, pcgctl, PCGCTL);
udelay(5);
/* Set the Gate hclk as suspend is received. */
pcgctl = dwc2_readl(hsotg, PCGCTL);
pcgctl |= PCGCTL_GATEHCLK;
dwc2_writel(hsotg, pcgctl, PCGCTL);
udelay(5);
hsotg->bus_suspended = true;
hsotg->lx_state = DWC2_L2;
}
/**
* dwc2_host_exit_clock_gating() - Exit controller from clock gating.
*
* @hsotg: Programming view of the DWC_otg controller
* @rem_wakeup: indicates whether resume is initiated by remote wakeup
*
* This function is for exiting Host mode clock gating.
*/
void dwc2_host_exit_clock_gating(struct dwc2_hsotg *hsotg, int rem_wakeup)
{
u32 hprt0;
u32 pcgctl;
dev_dbg(hsotg->dev, "Exiting host clock gating.\n");
/* Clear the Gate hclk. */
pcgctl = dwc2_readl(hsotg, PCGCTL);
pcgctl &= ~PCGCTL_GATEHCLK;
dwc2_writel(hsotg, pcgctl, PCGCTL);
udelay(5);
/* Phy Clock bit. */
pcgctl = dwc2_readl(hsotg, PCGCTL);
pcgctl &= ~PCGCTL_STOPPCLK;
dwc2_writel(hsotg, pcgctl, PCGCTL);
udelay(5);
/* Drive resume signaling and exit suspend mode on the port. */
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 |= HPRT0_RES;
hprt0 &= ~HPRT0_SUSP;
dwc2_writel(hsotg, hprt0, HPRT0);
udelay(5);
if (!rem_wakeup) {
/* In case of port resume need to wait for 40 ms */
msleep(USB_RESUME_TIMEOUT);
/* Stop driveing resume signaling on the port. */
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 &= ~HPRT0_RES;
dwc2_writel(hsotg, hprt0, HPRT0);
hsotg->bus_suspended = false;
hsotg->lx_state = DWC2_L0;
} else {
mod_timer(&hsotg->wkp_timer,
jiffies + msecs_to_jiffies(71));
}
}
| linux-master | drivers/usb/dwc2/hcd.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* c67x00-hcd.c: Cypress C67X00 USB Host Controller Driver
*
* Copyright (C) 2006-2008 Barco N.V.
* Derived from the Cypress cy7c67200/300 ezusb linux driver and
* based on multiple host controller drivers inside the linux kernel.
*/
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/usb.h>
#include "c67x00.h"
#include "c67x00-hcd.h"
/* --------------------------------------------------------------------------
* Root Hub Support
*/
static __u8 c67x00_hub_des[] = {
0x09, /* __u8 bLength; */
USB_DT_HUB, /* __u8 bDescriptorType; Hub-descriptor */
0x02, /* __u8 bNbrPorts; */
0x00, /* __u16 wHubCharacteristics; */
0x00, /* (per-port OC, no power switching) */
0x32, /* __u8 bPwrOn2pwrGood; 2ms */
0x00, /* __u8 bHubContrCurrent; 0 mA */
0x00, /* __u8 DeviceRemovable; ** 7 Ports max ** */
0xff, /* __u8 PortPwrCtrlMask; ** 7 ports max ** */
};
static void c67x00_hub_reset_host_port(struct c67x00_sie *sie, int port)
{
struct c67x00_hcd *c67x00 = sie->private_data;
unsigned long flags;
c67x00_ll_husb_reset(sie, port);
spin_lock_irqsave(&c67x00->lock, flags);
c67x00_ll_husb_reset_port(sie, port);
spin_unlock_irqrestore(&c67x00->lock, flags);
c67x00_ll_set_husb_eot(sie->dev, DEFAULT_EOT);
}
static int c67x00_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
struct c67x00_sie *sie = c67x00->sie;
u16 status;
int i;
*buf = 0;
status = c67x00_ll_usb_get_status(sie);
for (i = 0; i < C67X00_PORTS; i++)
if (status & PORT_CONNECT_CHANGE(i))
*buf |= (1 << i);
/* bit 0 denotes hub change, b1..n port change */
*buf <<= 1;
return !!*buf;
}
static int c67x00_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
struct c67x00_sie *sie = c67x00->sie;
u16 status, usb_status;
int len = 0;
unsigned int port = wIndex-1;
u16 wPortChange, wPortStatus;
switch (typeReq) {
case GetHubStatus:
*(__le32 *) buf = cpu_to_le32(0);
len = 4; /* hub power */
break;
case GetPortStatus:
if (wIndex > C67X00_PORTS)
return -EPIPE;
status = c67x00_ll_usb_get_status(sie);
usb_status = c67x00_ll_get_usb_ctl(sie);
wPortChange = 0;
if (status & PORT_CONNECT_CHANGE(port))
wPortChange |= USB_PORT_STAT_C_CONNECTION;
wPortStatus = USB_PORT_STAT_POWER;
if (!(status & PORT_SE0_STATUS(port)))
wPortStatus |= USB_PORT_STAT_CONNECTION;
if (usb_status & LOW_SPEED_PORT(port)) {
wPortStatus |= USB_PORT_STAT_LOW_SPEED;
c67x00->low_speed_ports |= (1 << port);
} else
c67x00->low_speed_ports &= ~(1 << port);
if (usb_status & SOF_EOP_EN(port))
wPortStatus |= USB_PORT_STAT_ENABLE;
*(__le16 *) buf = cpu_to_le16(wPortStatus);
*(__le16 *) (buf + 2) = cpu_to_le16(wPortChange);
len = 4;
break;
case SetHubFeature: /* We don't implement these */
case ClearHubFeature:
switch (wValue) {
case C_HUB_OVER_CURRENT:
case C_HUB_LOCAL_POWER:
len = 0;
break;
default:
return -EPIPE;
}
break;
case SetPortFeature:
if (wIndex > C67X00_PORTS)
return -EPIPE;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
dev_dbg(c67x00_hcd_dev(c67x00),
"SetPortFeature %d (SUSPEND)\n", port);
len = 0;
break;
case USB_PORT_FEAT_RESET:
c67x00_hub_reset_host_port(sie, port);
len = 0;
break;
case USB_PORT_FEAT_POWER:
/* Power always enabled */
len = 0;
break;
default:
dev_dbg(c67x00_hcd_dev(c67x00),
"%s: SetPortFeature %d (0x%04x) Error!\n",
__func__, port, wValue);
return -EPIPE;
}
break;
case ClearPortFeature:
if (wIndex > C67X00_PORTS)
return -EPIPE;
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
/* Reset the port so that the c67x00 also notices the
* disconnect */
c67x00_hub_reset_host_port(sie, port);
len = 0;
break;
case USB_PORT_FEAT_C_ENABLE:
dev_dbg(c67x00_hcd_dev(c67x00),
"ClearPortFeature (%d): C_ENABLE\n", port);
len = 0;
break;
case USB_PORT_FEAT_SUSPEND:
dev_dbg(c67x00_hcd_dev(c67x00),
"ClearPortFeature (%d): SUSPEND\n", port);
len = 0;
break;
case USB_PORT_FEAT_C_SUSPEND:
dev_dbg(c67x00_hcd_dev(c67x00),
"ClearPortFeature (%d): C_SUSPEND\n", port);
len = 0;
break;
case USB_PORT_FEAT_POWER:
dev_dbg(c67x00_hcd_dev(c67x00),
"ClearPortFeature (%d): POWER\n", port);
return -EPIPE;
case USB_PORT_FEAT_C_CONNECTION:
c67x00_ll_usb_clear_status(sie,
PORT_CONNECT_CHANGE(port));
len = 0;
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
dev_dbg(c67x00_hcd_dev(c67x00),
"ClearPortFeature (%d): OVER_CURRENT\n", port);
len = 0;
break;
case USB_PORT_FEAT_C_RESET:
dev_dbg(c67x00_hcd_dev(c67x00),
"ClearPortFeature (%d): C_RESET\n", port);
len = 0;
break;
default:
dev_dbg(c67x00_hcd_dev(c67x00),
"%s: ClearPortFeature %d (0x%04x) Error!\n",
__func__, port, wValue);
return -EPIPE;
}
break;
case GetHubDescriptor:
len = min_t(unsigned int, sizeof(c67x00_hub_des), wLength);
memcpy(buf, c67x00_hub_des, len);
break;
default:
dev_dbg(c67x00_hcd_dev(c67x00), "%s: unknown\n", __func__);
return -EPIPE;
}
return 0;
}
/* ---------------------------------------------------------------------
* Main part of host controller driver
*/
/*
* c67x00_hcd_irq
*
* This function is called from the interrupt handler in c67x00-drv.c
*/
static void c67x00_hcd_irq(struct c67x00_sie *sie, u16 int_status, u16 msg)
{
struct c67x00_hcd *c67x00 = sie->private_data;
struct usb_hcd *hcd = c67x00_hcd_to_hcd(c67x00);
/* Handle sie message flags */
if (msg) {
if (msg & HUSB_TDListDone)
c67x00_sched_kick(c67x00);
else
dev_warn(c67x00_hcd_dev(c67x00),
"Unknown SIE msg flag(s): 0x%04x\n", msg);
}
if (unlikely(hcd->state == HC_STATE_HALT))
return;
if (!HCD_HW_ACCESSIBLE(hcd))
return;
/* Handle Start of frame events */
if (int_status & SOFEOP_FLG(sie->sie_num)) {
c67x00_ll_usb_clear_status(sie, SOF_EOP_IRQ_FLG);
c67x00_sched_kick(c67x00);
}
}
/*
* c67x00_hcd_start: Host controller start hook
*/
static int c67x00_hcd_start(struct usb_hcd *hcd)
{
hcd->uses_new_polling = 1;
hcd->state = HC_STATE_RUNNING;
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
return 0;
}
/*
* c67x00_hcd_stop: Host controller stop hook
*/
static void c67x00_hcd_stop(struct usb_hcd *hcd)
{
/* Nothing to do */
}
static int c67x00_hcd_get_frame(struct usb_hcd *hcd)
{
struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
u16 temp_val;
dev_dbg(c67x00_hcd_dev(c67x00), "%s\n", __func__);
temp_val = c67x00_ll_husb_get_frame(c67x00->sie);
temp_val &= HOST_FRAME_MASK;
return temp_val ? (temp_val - 1) : HOST_FRAME_MASK;
}
static const struct hc_driver c67x00_hc_driver = {
.description = "c67x00-hcd",
.product_desc = "Cypress C67X00 Host Controller",
.hcd_priv_size = sizeof(struct c67x00_hcd),
.flags = HCD_USB11 | HCD_MEMORY,
/*
* basic lifecycle operations
*/
.start = c67x00_hcd_start,
.stop = c67x00_hcd_stop,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = c67x00_urb_enqueue,
.urb_dequeue = c67x00_urb_dequeue,
.endpoint_disable = c67x00_endpoint_disable,
/*
* scheduling support
*/
.get_frame_number = c67x00_hcd_get_frame,
/*
* root hub support
*/
.hub_status_data = c67x00_hub_status_data,
.hub_control = c67x00_hub_control,
};
/* ---------------------------------------------------------------------
* Setup/Teardown routines
*/
int c67x00_hcd_probe(struct c67x00_sie *sie)
{
struct c67x00_hcd *c67x00;
struct usb_hcd *hcd;
unsigned long flags;
int retval;
if (usb_disabled())
return -ENODEV;
hcd = usb_create_hcd(&c67x00_hc_driver, sie_dev(sie), "c67x00_sie");
if (!hcd) {
retval = -ENOMEM;
goto err0;
}
c67x00 = hcd_to_c67x00_hcd(hcd);
spin_lock_init(&c67x00->lock);
c67x00->sie = sie;
INIT_LIST_HEAD(&c67x00->list[PIPE_ISOCHRONOUS]);
INIT_LIST_HEAD(&c67x00->list[PIPE_INTERRUPT]);
INIT_LIST_HEAD(&c67x00->list[PIPE_CONTROL]);
INIT_LIST_HEAD(&c67x00->list[PIPE_BULK]);
c67x00->urb_count = 0;
INIT_LIST_HEAD(&c67x00->td_list);
c67x00->td_base_addr = CY_HCD_BUF_ADDR + SIE_TD_OFFSET(sie->sie_num);
c67x00->buf_base_addr = CY_HCD_BUF_ADDR + SIE_BUF_OFFSET(sie->sie_num);
c67x00->max_frame_bw = MAX_FRAME_BW_STD;
c67x00_ll_husb_init_host_port(sie);
init_completion(&c67x00->endpoint_disable);
retval = c67x00_sched_start_scheduler(c67x00);
if (retval)
goto err1;
retval = usb_add_hcd(hcd, 0, 0);
if (retval) {
dev_dbg(sie_dev(sie), "%s: usb_add_hcd returned %d\n",
__func__, retval);
goto err2;
}
device_wakeup_enable(hcd->self.controller);
spin_lock_irqsave(&sie->lock, flags);
sie->private_data = c67x00;
sie->irq = c67x00_hcd_irq;
spin_unlock_irqrestore(&sie->lock, flags);
return retval;
err2:
c67x00_sched_stop_scheduler(c67x00);
err1:
usb_put_hcd(hcd);
err0:
return retval;
}
/* may be called with controller, bus, and devices active */
void c67x00_hcd_remove(struct c67x00_sie *sie)
{
struct c67x00_hcd *c67x00 = sie->private_data;
struct usb_hcd *hcd = c67x00_hcd_to_hcd(c67x00);
c67x00_sched_stop_scheduler(c67x00);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
}
| linux-master | drivers/usb/c67x00/c67x00-hcd.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* c67x00-drv.c: Cypress C67X00 USB Common infrastructure
*
* Copyright (C) 2006-2008 Barco N.V.
* Derived from the Cypress cy7c67200/300 ezusb linux driver and
* based on multiple host controller drivers inside the linux kernel.
*/
/*
* This file implements the common infrastructure for using the c67x00.
* It is both the link between the platform configuration and subdrivers and
* the link between the common hardware parts and the subdrivers (e.g.
* interrupt handling).
*
* The c67x00 has 2 SIE's (serial interface engine) which can be configured
* to be host, device or OTG (with some limitations, E.G. only SIE1 can be OTG).
*
* Depending on the platform configuration, the SIE's are created and
* the corresponding subdriver is initialized (c67x00_probe_sie).
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/c67x00.h>
#include "c67x00.h"
#include "c67x00-hcd.h"
static void c67x00_probe_sie(struct c67x00_sie *sie,
struct c67x00_device *dev, int sie_num)
{
spin_lock_init(&sie->lock);
sie->dev = dev;
sie->sie_num = sie_num;
sie->mode = c67x00_sie_config(dev->pdata->sie_config, sie_num);
switch (sie->mode) {
case C67X00_SIE_HOST:
c67x00_hcd_probe(sie);
break;
case C67X00_SIE_UNUSED:
dev_info(sie_dev(sie),
"Not using SIE %d as requested\n", sie->sie_num);
break;
default:
dev_err(sie_dev(sie),
"Unsupported configuration: 0x%x for SIE %d\n",
sie->mode, sie->sie_num);
break;
}
}
static void c67x00_remove_sie(struct c67x00_sie *sie)
{
switch (sie->mode) {
case C67X00_SIE_HOST:
c67x00_hcd_remove(sie);
break;
default:
break;
}
}
static irqreturn_t c67x00_irq(int irq, void *__dev)
{
struct c67x00_device *c67x00 = __dev;
struct c67x00_sie *sie;
u16 msg, int_status;
int i, count = 8;
int_status = c67x00_ll_hpi_status(c67x00);
if (!int_status)
return IRQ_NONE;
while (int_status != 0 && (count-- >= 0)) {
c67x00_ll_irq(c67x00, int_status);
for (i = 0; i < C67X00_SIES; i++) {
sie = &c67x00->sie[i];
msg = 0;
if (int_status & SIEMSG_FLG(i))
msg = c67x00_ll_fetch_siemsg(c67x00, i);
if (sie->irq)
sie->irq(sie, int_status, msg);
}
int_status = c67x00_ll_hpi_status(c67x00);
}
if (int_status)
dev_warn(&c67x00->pdev->dev, "Not all interrupts handled! "
"status = 0x%04x\n", int_status);
return IRQ_HANDLED;
}
/* ------------------------------------------------------------------------- */
static int c67x00_drv_probe(struct platform_device *pdev)
{
struct c67x00_device *c67x00;
struct c67x00_platform_data *pdata;
struct resource *res, *res2;
int ret, i;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res2)
return -ENODEV;
pdata = dev_get_platdata(&pdev->dev);
if (!pdata)
return -ENODEV;
c67x00 = kzalloc(sizeof(*c67x00), GFP_KERNEL);
if (!c67x00)
return -ENOMEM;
if (!request_mem_region(res->start, resource_size(res),
pdev->name)) {
dev_err(&pdev->dev, "Memory region busy\n");
ret = -EBUSY;
goto request_mem_failed;
}
c67x00->hpi.base = ioremap(res->start, resource_size(res));
if (!c67x00->hpi.base) {
dev_err(&pdev->dev, "Unable to map HPI registers\n");
ret = -EIO;
goto map_failed;
}
spin_lock_init(&c67x00->hpi.lock);
c67x00->hpi.regstep = pdata->hpi_regstep;
c67x00->pdata = dev_get_platdata(&pdev->dev);
c67x00->pdev = pdev;
c67x00_ll_init(c67x00);
c67x00_ll_hpi_reg_init(c67x00);
ret = request_irq(res2->start, c67x00_irq, 0, pdev->name, c67x00);
if (ret) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
goto request_irq_failed;
}
ret = c67x00_ll_reset(c67x00);
if (ret) {
dev_err(&pdev->dev, "Device reset failed\n");
goto reset_failed;
}
for (i = 0; i < C67X00_SIES; i++)
c67x00_probe_sie(&c67x00->sie[i], c67x00, i);
platform_set_drvdata(pdev, c67x00);
return 0;
reset_failed:
free_irq(res2->start, c67x00);
request_irq_failed:
iounmap(c67x00->hpi.base);
map_failed:
release_mem_region(res->start, resource_size(res));
request_mem_failed:
kfree(c67x00);
return ret;
}
static void c67x00_drv_remove(struct platform_device *pdev)
{
struct c67x00_device *c67x00 = platform_get_drvdata(pdev);
struct resource *res;
int i;
for (i = 0; i < C67X00_SIES; i++)
c67x00_remove_sie(&c67x00->sie[i]);
c67x00_ll_release(c67x00);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
free_irq(res->start, c67x00);
iounmap(c67x00->hpi.base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
kfree(c67x00);
}
static struct platform_driver c67x00_driver = {
.probe = c67x00_drv_probe,
.remove_new = c67x00_drv_remove,
.driver = {
.name = "c67x00",
},
};
module_platform_driver(c67x00_driver);
MODULE_AUTHOR("Peter Korsgaard, Jan Veldeman, Grant Likely");
MODULE_DESCRIPTION("Cypress C67X00 USB Controller Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:c67x00");
| linux-master | drivers/usb/c67x00/c67x00-drv.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* c67x00-ll-hpi.c: Cypress C67X00 USB Low level interface using HPI
*
* Copyright (C) 2006-2008 Barco N.V.
* Derived from the Cypress cy7c67200/300 ezusb linux driver and
* based on multiple host controller drivers inside the linux kernel.
*/
#include <asm/byteorder.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/usb/c67x00.h>
#include "c67x00.h"
#define COMM_REGS 14
struct c67x00_lcp_int_data {
u16 regs[COMM_REGS];
};
/* -------------------------------------------------------------------------- */
/* Interface definitions */
#define COMM_ACK 0x0FED
#define COMM_NAK 0xDEAD
#define COMM_RESET 0xFA50
#define COMM_EXEC_INT 0xCE01
#define COMM_INT_NUM 0x01C2
/* Registers 0 to COMM_REGS-1 */
#define COMM_R(x) (0x01C4 + 2 * (x))
#define HUSB_SIE_pCurrentTDPtr(x) ((x) ? 0x01B2 : 0x01B0)
#define HUSB_SIE_pTDListDone_Sem(x) ((x) ? 0x01B8 : 0x01B6)
#define HUSB_pEOT 0x01B4
/* Software interrupts */
/* 114, 115: */
#define HUSB_SIE_INIT_INT(x) ((x) ? 0x0073 : 0x0072)
#define HUSB_RESET_INT 0x0074
#define SUSB_INIT_INT 0x0071
#define SUSB_INIT_INT_LOC (SUSB_INIT_INT * 2)
/* -----------------------------------------------------------------------
* HPI implementation
*
* The c67x00 chip also support control via SPI or HSS serial
* interfaces. However, this driver assumes that register access can
* be performed from IRQ context. While this is a safe assumption with
* the HPI interface, it is not true for the serial interfaces.
*/
/* HPI registers */
#define HPI_DATA 0
#define HPI_MAILBOX 1
#define HPI_ADDR 2
#define HPI_STATUS 3
/*
* According to CY7C67300 specification (tables 140 and 141) HPI read and
* write cycle duration Tcyc must be at least 6T long, where T is 1/48MHz,
* which is 125ns.
*/
#define HPI_T_CYC_NS 125
static inline u16 hpi_read_reg(struct c67x00_device *dev, int reg)
{
ndelay(HPI_T_CYC_NS);
return __raw_readw(dev->hpi.base + reg * dev->hpi.regstep);
}
static inline void hpi_write_reg(struct c67x00_device *dev, int reg, u16 value)
{
ndelay(HPI_T_CYC_NS);
__raw_writew(value, dev->hpi.base + reg * dev->hpi.regstep);
}
static inline u16 hpi_read_word_nolock(struct c67x00_device *dev, u16 reg)
{
hpi_write_reg(dev, HPI_ADDR, reg);
return hpi_read_reg(dev, HPI_DATA);
}
static u16 hpi_read_word(struct c67x00_device *dev, u16 reg)
{
u16 value;
unsigned long flags;
spin_lock_irqsave(&dev->hpi.lock, flags);
value = hpi_read_word_nolock(dev, reg);
spin_unlock_irqrestore(&dev->hpi.lock, flags);
return value;
}
static void hpi_write_word_nolock(struct c67x00_device *dev, u16 reg, u16 value)
{
hpi_write_reg(dev, HPI_ADDR, reg);
hpi_write_reg(dev, HPI_DATA, value);
}
static void hpi_write_word(struct c67x00_device *dev, u16 reg, u16 value)
{
unsigned long flags;
spin_lock_irqsave(&dev->hpi.lock, flags);
hpi_write_word_nolock(dev, reg, value);
spin_unlock_irqrestore(&dev->hpi.lock, flags);
}
/*
* Only data is little endian, addr has cpu endianess
*/
static void hpi_write_words_le16(struct c67x00_device *dev, u16 addr,
__le16 *data, u16 count)
{
unsigned long flags;
int i;
spin_lock_irqsave(&dev->hpi.lock, flags);
hpi_write_reg(dev, HPI_ADDR, addr);
for (i = 0; i < count; i++)
hpi_write_reg(dev, HPI_DATA, le16_to_cpu(*data++));
spin_unlock_irqrestore(&dev->hpi.lock, flags);
}
/*
* Only data is little endian, addr has cpu endianess
*/
static void hpi_read_words_le16(struct c67x00_device *dev, u16 addr,
__le16 *data, u16 count)
{
unsigned long flags;
int i;
spin_lock_irqsave(&dev->hpi.lock, flags);
hpi_write_reg(dev, HPI_ADDR, addr);
for (i = 0; i < count; i++)
*data++ = cpu_to_le16(hpi_read_reg(dev, HPI_DATA));
spin_unlock_irqrestore(&dev->hpi.lock, flags);
}
static void hpi_set_bits(struct c67x00_device *dev, u16 reg, u16 mask)
{
u16 value;
unsigned long flags;
spin_lock_irqsave(&dev->hpi.lock, flags);
value = hpi_read_word_nolock(dev, reg);
hpi_write_word_nolock(dev, reg, value | mask);
spin_unlock_irqrestore(&dev->hpi.lock, flags);
}
static void hpi_clear_bits(struct c67x00_device *dev, u16 reg, u16 mask)
{
u16 value;
unsigned long flags;
spin_lock_irqsave(&dev->hpi.lock, flags);
value = hpi_read_word_nolock(dev, reg);
hpi_write_word_nolock(dev, reg, value & ~mask);
spin_unlock_irqrestore(&dev->hpi.lock, flags);
}
static u16 hpi_recv_mbox(struct c67x00_device *dev)
{
u16 value;
unsigned long flags;
spin_lock_irqsave(&dev->hpi.lock, flags);
value = hpi_read_reg(dev, HPI_MAILBOX);
spin_unlock_irqrestore(&dev->hpi.lock, flags);
return value;
}
static u16 hpi_send_mbox(struct c67x00_device *dev, u16 value)
{
unsigned long flags;
spin_lock_irqsave(&dev->hpi.lock, flags);
hpi_write_reg(dev, HPI_MAILBOX, value);
spin_unlock_irqrestore(&dev->hpi.lock, flags);
return value;
}
u16 c67x00_ll_hpi_status(struct c67x00_device *dev)
{
u16 value;
unsigned long flags;
spin_lock_irqsave(&dev->hpi.lock, flags);
value = hpi_read_reg(dev, HPI_STATUS);
spin_unlock_irqrestore(&dev->hpi.lock, flags);
return value;
}
void c67x00_ll_hpi_reg_init(struct c67x00_device *dev)
{
int i;
hpi_recv_mbox(dev);
c67x00_ll_hpi_status(dev);
hpi_write_word(dev, HPI_IRQ_ROUTING_REG, 0);
for (i = 0; i < C67X00_SIES; i++) {
hpi_write_word(dev, SIEMSG_REG(i), 0);
hpi_read_word(dev, SIEMSG_REG(i));
}
}
void c67x00_ll_hpi_enable_sofeop(struct c67x00_sie *sie)
{
hpi_set_bits(sie->dev, HPI_IRQ_ROUTING_REG,
SOFEOP_TO_HPI_EN(sie->sie_num));
}
void c67x00_ll_hpi_disable_sofeop(struct c67x00_sie *sie)
{
hpi_clear_bits(sie->dev, HPI_IRQ_ROUTING_REG,
SOFEOP_TO_HPI_EN(sie->sie_num));
}
/* -------------------------------------------------------------------------- */
/* Transactions */
static inline int ll_recv_msg(struct c67x00_device *dev)
{
u16 res;
res = wait_for_completion_timeout(&dev->hpi.lcp.msg_received, 5 * HZ);
WARN_ON(!res);
return (res == 0) ? -EIO : 0;
}
/* -------------------------------------------------------------------------- */
/* General functions */
u16 c67x00_ll_fetch_siemsg(struct c67x00_device *dev, int sie_num)
{
u16 val;
val = hpi_read_word(dev, SIEMSG_REG(sie_num));
/* clear register to allow next message */
hpi_write_word(dev, SIEMSG_REG(sie_num), 0);
return val;
}
u16 c67x00_ll_get_usb_ctl(struct c67x00_sie *sie)
{
return hpi_read_word(sie->dev, USB_CTL_REG(sie->sie_num));
}
/*
* c67x00_ll_usb_clear_status - clear the USB status bits
*/
void c67x00_ll_usb_clear_status(struct c67x00_sie *sie, u16 bits)
{
hpi_write_word(sie->dev, USB_STAT_REG(sie->sie_num), bits);
}
u16 c67x00_ll_usb_get_status(struct c67x00_sie *sie)
{
return hpi_read_word(sie->dev, USB_STAT_REG(sie->sie_num));
}
/* -------------------------------------------------------------------------- */
static int c67x00_comm_exec_int(struct c67x00_device *dev, u16 nr,
struct c67x00_lcp_int_data *data)
{
int i, rc;
mutex_lock(&dev->hpi.lcp.mutex);
hpi_write_word(dev, COMM_INT_NUM, nr);
for (i = 0; i < COMM_REGS; i++)
hpi_write_word(dev, COMM_R(i), data->regs[i]);
hpi_send_mbox(dev, COMM_EXEC_INT);
rc = ll_recv_msg(dev);
mutex_unlock(&dev->hpi.lcp.mutex);
return rc;
}
/* -------------------------------------------------------------------------- */
/* Host specific functions */
void c67x00_ll_set_husb_eot(struct c67x00_device *dev, u16 value)
{
mutex_lock(&dev->hpi.lcp.mutex);
hpi_write_word(dev, HUSB_pEOT, value);
mutex_unlock(&dev->hpi.lcp.mutex);
}
static inline void c67x00_ll_husb_sie_init(struct c67x00_sie *sie)
{
struct c67x00_device *dev = sie->dev;
struct c67x00_lcp_int_data data;
int rc;
rc = c67x00_comm_exec_int(dev, HUSB_SIE_INIT_INT(sie->sie_num), &data);
BUG_ON(rc); /* No return path for error code; crash spectacularly */
}
void c67x00_ll_husb_reset(struct c67x00_sie *sie, int port)
{
struct c67x00_device *dev = sie->dev;
struct c67x00_lcp_int_data data;
int rc;
data.regs[0] = 50; /* Reset USB port for 50ms */
data.regs[1] = port | (sie->sie_num << 1);
rc = c67x00_comm_exec_int(dev, HUSB_RESET_INT, &data);
BUG_ON(rc); /* No return path for error code; crash spectacularly */
}
void c67x00_ll_husb_set_current_td(struct c67x00_sie *sie, u16 addr)
{
hpi_write_word(sie->dev, HUSB_SIE_pCurrentTDPtr(sie->sie_num), addr);
}
u16 c67x00_ll_husb_get_current_td(struct c67x00_sie *sie)
{
return hpi_read_word(sie->dev, HUSB_SIE_pCurrentTDPtr(sie->sie_num));
}
u16 c67x00_ll_husb_get_frame(struct c67x00_sie *sie)
{
return hpi_read_word(sie->dev, HOST_FRAME_REG(sie->sie_num));
}
void c67x00_ll_husb_init_host_port(struct c67x00_sie *sie)
{
/* Set port into host mode */
hpi_set_bits(sie->dev, USB_CTL_REG(sie->sie_num), HOST_MODE);
c67x00_ll_husb_sie_init(sie);
/* Clear interrupts */
c67x00_ll_usb_clear_status(sie, HOST_STAT_MASK);
/* Check */
if (!(hpi_read_word(sie->dev, USB_CTL_REG(sie->sie_num)) & HOST_MODE))
dev_warn(sie_dev(sie),
"SIE %d not set to host mode\n", sie->sie_num);
}
void c67x00_ll_husb_reset_port(struct c67x00_sie *sie, int port)
{
/* Clear connect change */
c67x00_ll_usb_clear_status(sie, PORT_CONNECT_CHANGE(port));
/* Enable interrupts */
hpi_set_bits(sie->dev, HPI_IRQ_ROUTING_REG,
SOFEOP_TO_CPU_EN(sie->sie_num));
hpi_set_bits(sie->dev, HOST_IRQ_EN_REG(sie->sie_num),
SOF_EOP_IRQ_EN | DONE_IRQ_EN);
/* Enable pull down transistors */
hpi_set_bits(sie->dev, USB_CTL_REG(sie->sie_num), PORT_RES_EN(port));
}
/* -------------------------------------------------------------------------- */
void c67x00_ll_irq(struct c67x00_device *dev, u16 int_status)
{
if ((int_status & MBX_OUT_FLG) == 0)
return;
dev->hpi.lcp.last_msg = hpi_recv_mbox(dev);
complete(&dev->hpi.lcp.msg_received);
}
/* -------------------------------------------------------------------------- */
int c67x00_ll_reset(struct c67x00_device *dev)
{
int rc;
mutex_lock(&dev->hpi.lcp.mutex);
hpi_send_mbox(dev, COMM_RESET);
rc = ll_recv_msg(dev);
mutex_unlock(&dev->hpi.lcp.mutex);
return rc;
}
/* -------------------------------------------------------------------------- */
/*
* c67x00_ll_write_mem_le16 - write into c67x00 memory
* Only data is little endian, addr has cpu endianess.
*/
void c67x00_ll_write_mem_le16(struct c67x00_device *dev, u16 addr,
void *data, int len)
{
u8 *buf = data;
/* Sanity check */
if (addr + len > 0xffff) {
dev_err(&dev->pdev->dev,
"Trying to write beyond writable region!\n");
return;
}
if (addr & 0x01) {
/* unaligned access */
u16 tmp;
tmp = hpi_read_word(dev, addr - 1);
tmp = (tmp & 0x00ff) | (*buf++ << 8);
hpi_write_word(dev, addr - 1, tmp);
addr++;
len--;
}
hpi_write_words_le16(dev, addr, (__le16 *)buf, len / 2);
buf += len & ~0x01;
addr += len & ~0x01;
len &= 0x01;
if (len) {
u16 tmp;
tmp = hpi_read_word(dev, addr);
tmp = (tmp & 0xff00) | *buf;
hpi_write_word(dev, addr, tmp);
}
}
/*
* c67x00_ll_read_mem_le16 - read from c67x00 memory
* Only data is little endian, addr has cpu endianess.
*/
void c67x00_ll_read_mem_le16(struct c67x00_device *dev, u16 addr,
void *data, int len)
{
u8 *buf = data;
if (addr & 0x01) {
/* unaligned access */
u16 tmp;
tmp = hpi_read_word(dev, addr - 1);
*buf++ = (tmp >> 8) & 0x00ff;
addr++;
len--;
}
hpi_read_words_le16(dev, addr, (__le16 *)buf, len / 2);
buf += len & ~0x01;
addr += len & ~0x01;
len &= 0x01;
if (len) {
u16 tmp;
tmp = hpi_read_word(dev, addr);
*buf = tmp & 0x00ff;
}
}
/* -------------------------------------------------------------------------- */
void c67x00_ll_init(struct c67x00_device *dev)
{
mutex_init(&dev->hpi.lcp.mutex);
init_completion(&dev->hpi.lcp.msg_received);
}
void c67x00_ll_release(struct c67x00_device *dev)
{
}
| linux-master | drivers/usb/c67x00/c67x00-ll-hpi.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* c67x00-sched.c: Cypress C67X00 USB Host Controller Driver - TD scheduling
*
* Copyright (C) 2006-2008 Barco N.V.
* Derived from the Cypress cy7c67200/300 ezusb linux driver and
* based on multiple host controller drivers inside the linux kernel.
*/
#include <linux/kthread.h>
#include <linux/slab.h>
#include "c67x00.h"
#include "c67x00-hcd.h"
/*
* These are the stages for a control urb, they are kept
* in both urb->interval and td->privdata.
*/
#define SETUP_STAGE 0
#define DATA_STAGE 1
#define STATUS_STAGE 2
/* -------------------------------------------------------------------------- */
/*
* struct c67x00_ep_data: Host endpoint data structure
*/
struct c67x00_ep_data {
struct list_head queue;
struct list_head node;
struct usb_host_endpoint *hep;
struct usb_device *dev;
u16 next_frame; /* For int/isoc transactions */
};
/*
* struct c67x00_td
*
* Hardware parts are little endiannes, SW in CPU endianess.
*/
struct c67x00_td {
/* HW specific part */
__le16 ly_base_addr; /* Bytes 0-1 */
__le16 port_length; /* Bytes 2-3 */
u8 pid_ep; /* Byte 4 */
u8 dev_addr; /* Byte 5 */
u8 ctrl_reg; /* Byte 6 */
u8 status; /* Byte 7 */
u8 retry_cnt; /* Byte 8 */
#define TT_OFFSET 2
#define TT_CONTROL 0
#define TT_ISOCHRONOUS 1
#define TT_BULK 2
#define TT_INTERRUPT 3
u8 residue; /* Byte 9 */
__le16 next_td_addr; /* Bytes 10-11 */
/* SW part */
struct list_head td_list;
u16 td_addr;
void *data;
struct urb *urb;
unsigned long privdata;
/* These are needed for handling the toggle bits:
* an urb can be dequeued while a td is in progress
* after checking the td, the toggle bit might need to
* be fixed */
struct c67x00_ep_data *ep_data;
unsigned int pipe;
};
struct c67x00_urb_priv {
struct list_head hep_node;
struct urb *urb;
int port;
int cnt; /* packet number for isoc */
int status;
struct c67x00_ep_data *ep_data;
};
#define td_udev(td) ((td)->ep_data->dev)
#define CY_TD_SIZE 12
#define TD_PIDEP_OFFSET 0x04
#define TD_PIDEPMASK_PID 0xF0
#define TD_PIDEPMASK_EP 0x0F
#define TD_PORTLENMASK_DL 0x03FF
#define TD_PORTLENMASK_PN 0xC000
#define TD_STATUS_OFFSET 0x07
#define TD_STATUSMASK_ACK 0x01
#define TD_STATUSMASK_ERR 0x02
#define TD_STATUSMASK_TMOUT 0x04
#define TD_STATUSMASK_SEQ 0x08
#define TD_STATUSMASK_SETUP 0x10
#define TD_STATUSMASK_OVF 0x20
#define TD_STATUSMASK_NAK 0x40
#define TD_STATUSMASK_STALL 0x80
#define TD_ERROR_MASK (TD_STATUSMASK_ERR | TD_STATUSMASK_TMOUT | \
TD_STATUSMASK_STALL)
#define TD_RETRYCNT_OFFSET 0x08
#define TD_RETRYCNTMASK_ACT_FLG 0x10
#define TD_RETRYCNTMASK_TX_TYPE 0x0C
#define TD_RETRYCNTMASK_RTY_CNT 0x03
#define TD_RESIDUE_OVERFLOW 0x80
#define TD_PID_IN 0x90
/* Residue: signed 8bits, neg -> OVERFLOW, pos -> UNDERFLOW */
#define td_residue(td) ((__s8)(td->residue))
#define td_ly_base_addr(td) (__le16_to_cpu((td)->ly_base_addr))
#define td_port_length(td) (__le16_to_cpu((td)->port_length))
#define td_next_td_addr(td) (__le16_to_cpu((td)->next_td_addr))
#define td_active(td) ((td)->retry_cnt & TD_RETRYCNTMASK_ACT_FLG)
#define td_length(td) (td_port_length(td) & TD_PORTLENMASK_DL)
#define td_sequence_ok(td) (!td->status || \
(!(td->status & TD_STATUSMASK_SEQ) == \
!(td->ctrl_reg & SEQ_SEL)))
#define td_acked(td) (!td->status || \
(td->status & TD_STATUSMASK_ACK))
#define td_actual_bytes(td) (td_length(td) - td_residue(td))
/* -------------------------------------------------------------------------- */
/*
* dbg_td - Dump the contents of the TD
*/
static void dbg_td(struct c67x00_hcd *c67x00, struct c67x00_td *td, char *msg)
{
struct device *dev = c67x00_hcd_dev(c67x00);
dev_dbg(dev, "### %s at 0x%04x\n", msg, td->td_addr);
dev_dbg(dev, "urb: 0x%p\n", td->urb);
dev_dbg(dev, "endpoint: %4d\n", usb_pipeendpoint(td->pipe));
dev_dbg(dev, "pipeout: %4d\n", usb_pipeout(td->pipe));
dev_dbg(dev, "ly_base_addr: 0x%04x\n", td_ly_base_addr(td));
dev_dbg(dev, "port_length: 0x%04x\n", td_port_length(td));
dev_dbg(dev, "pid_ep: 0x%02x\n", td->pid_ep);
dev_dbg(dev, "dev_addr: 0x%02x\n", td->dev_addr);
dev_dbg(dev, "ctrl_reg: 0x%02x\n", td->ctrl_reg);
dev_dbg(dev, "status: 0x%02x\n", td->status);
dev_dbg(dev, "retry_cnt: 0x%02x\n", td->retry_cnt);
dev_dbg(dev, "residue: 0x%02x\n", td->residue);
dev_dbg(dev, "next_td_addr: 0x%04x\n", td_next_td_addr(td));
dev_dbg(dev, "data: %*ph\n", td_length(td), td->data);
}
/* -------------------------------------------------------------------------- */
/* Helper functions */
static inline u16 c67x00_get_current_frame_number(struct c67x00_hcd *c67x00)
{
return c67x00_ll_husb_get_frame(c67x00->sie) & HOST_FRAME_MASK;
}
/*
* frame_add
* Software wraparound for framenumbers.
*/
static inline u16 frame_add(u16 a, u16 b)
{
return (a + b) & HOST_FRAME_MASK;
}
/*
* frame_after - is frame a after frame b
*/
static inline int frame_after(u16 a, u16 b)
{
return ((HOST_FRAME_MASK + a - b) & HOST_FRAME_MASK) <
(HOST_FRAME_MASK / 2);
}
/*
* frame_after_eq - is frame a after or equal to frame b
*/
static inline int frame_after_eq(u16 a, u16 b)
{
return ((HOST_FRAME_MASK + 1 + a - b) & HOST_FRAME_MASK) <
(HOST_FRAME_MASK / 2);
}
/* -------------------------------------------------------------------------- */
/*
* c67x00_release_urb - remove link from all tds to this urb
* Disconnects the urb from it's tds, so that it can be given back.
* pre: urb->hcpriv != NULL
*/
static void c67x00_release_urb(struct c67x00_hcd *c67x00, struct urb *urb)
{
struct c67x00_td *td;
struct c67x00_urb_priv *urbp;
BUG_ON(!urb);
c67x00->urb_count--;
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
c67x00->urb_iso_count--;
if (c67x00->urb_iso_count == 0)
c67x00->max_frame_bw = MAX_FRAME_BW_STD;
}
/* TODO this might be not so efficient when we've got many urbs!
* Alternatives:
* * only clear when needed
* * keep a list of tds with each urbp
*/
list_for_each_entry(td, &c67x00->td_list, td_list)
if (urb == td->urb)
td->urb = NULL;
urbp = urb->hcpriv;
urb->hcpriv = NULL;
list_del(&urbp->hep_node);
kfree(urbp);
}
/* -------------------------------------------------------------------------- */
static struct c67x00_ep_data *
c67x00_ep_data_alloc(struct c67x00_hcd *c67x00, struct urb *urb)
{
struct usb_host_endpoint *hep = urb->ep;
struct c67x00_ep_data *ep_data;
int type;
c67x00->current_frame = c67x00_get_current_frame_number(c67x00);
/* Check if endpoint already has a c67x00_ep_data struct allocated */
if (hep->hcpriv) {
ep_data = hep->hcpriv;
if (frame_after(c67x00->current_frame, ep_data->next_frame))
ep_data->next_frame =
frame_add(c67x00->current_frame, 1);
return hep->hcpriv;
}
/* Allocate and initialize a new c67x00 endpoint data structure */
ep_data = kzalloc(sizeof(*ep_data), GFP_ATOMIC);
if (!ep_data)
return NULL;
INIT_LIST_HEAD(&ep_data->queue);
INIT_LIST_HEAD(&ep_data->node);
ep_data->hep = hep;
/* hold a reference to udev as long as this endpoint lives,
* this is needed to possibly fix the data toggle */
ep_data->dev = usb_get_dev(urb->dev);
hep->hcpriv = ep_data;
/* For ISOC and INT endpoints, start ASAP: */
ep_data->next_frame = frame_add(c67x00->current_frame, 1);
/* Add the endpoint data to one of the pipe lists; must be added
in order of endpoint address */
type = usb_pipetype(urb->pipe);
if (list_empty(&ep_data->node)) {
list_add(&ep_data->node, &c67x00->list[type]);
} else {
struct c67x00_ep_data *prev;
list_for_each_entry(prev, &c67x00->list[type], node) {
if (prev->hep->desc.bEndpointAddress >
hep->desc.bEndpointAddress) {
list_add(&ep_data->node, prev->node.prev);
break;
}
}
}
return ep_data;
}
static int c67x00_ep_data_free(struct usb_host_endpoint *hep)
{
struct c67x00_ep_data *ep_data = hep->hcpriv;
if (!ep_data)
return 0;
if (!list_empty(&ep_data->queue))
return -EBUSY;
usb_put_dev(ep_data->dev);
list_del(&ep_data->queue);
list_del(&ep_data->node);
kfree(ep_data);
hep->hcpriv = NULL;
return 0;
}
void c67x00_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
{
struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
unsigned long flags;
if (!list_empty(&ep->urb_list))
dev_warn(c67x00_hcd_dev(c67x00), "error: urb list not empty\n");
spin_lock_irqsave(&c67x00->lock, flags);
/* loop waiting for all transfers in the endpoint queue to complete */
while (c67x00_ep_data_free(ep)) {
/* Drop the lock so we can sleep waiting for the hardware */
spin_unlock_irqrestore(&c67x00->lock, flags);
/* it could happen that we reinitialize this completion, while
* somebody was waiting for that completion. The timeout and
* while loop handle such cases, but this might be improved */
reinit_completion(&c67x00->endpoint_disable);
c67x00_sched_kick(c67x00);
wait_for_completion_timeout(&c67x00->endpoint_disable, 1 * HZ);
spin_lock_irqsave(&c67x00->lock, flags);
}
spin_unlock_irqrestore(&c67x00->lock, flags);
}
/* -------------------------------------------------------------------------- */
static inline int get_root_port(struct usb_device *dev)
{
while (dev->parent->parent)
dev = dev->parent;
return dev->portnum;
}
int c67x00_urb_enqueue(struct usb_hcd *hcd,
struct urb *urb, gfp_t mem_flags)
{
int ret;
unsigned long flags;
struct c67x00_urb_priv *urbp;
struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
int port = get_root_port(urb->dev)-1;
/* Allocate and initialize urb private data */
urbp = kzalloc(sizeof(*urbp), mem_flags);
if (!urbp) {
ret = -ENOMEM;
goto err_urbp;
}
spin_lock_irqsave(&c67x00->lock, flags);
/* Make sure host controller is running */
if (!HC_IS_RUNNING(hcd->state)) {
ret = -ENODEV;
goto err_not_linked;
}
ret = usb_hcd_link_urb_to_ep(hcd, urb);
if (ret)
goto err_not_linked;
INIT_LIST_HEAD(&urbp->hep_node);
urbp->urb = urb;
urbp->port = port;
urbp->ep_data = c67x00_ep_data_alloc(c67x00, urb);
if (!urbp->ep_data) {
ret = -ENOMEM;
goto err_epdata;
}
/* TODO claim bandwidth with usb_claim_bandwidth?
* also release it somewhere! */
urb->hcpriv = urbp;
urb->actual_length = 0; /* Nothing received/transmitted yet */
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
urb->interval = SETUP_STAGE;
break;
case PIPE_INTERRUPT:
break;
case PIPE_BULK:
break;
case PIPE_ISOCHRONOUS:
if (c67x00->urb_iso_count == 0)
c67x00->max_frame_bw = MAX_FRAME_BW_ISO;
c67x00->urb_iso_count++;
/* Assume always URB_ISO_ASAP, FIXME */
if (list_empty(&urbp->ep_data->queue))
urb->start_frame = urbp->ep_data->next_frame;
else {
/* Go right after the last one */
struct urb *last_urb;
last_urb = list_entry(urbp->ep_data->queue.prev,
struct c67x00_urb_priv,
hep_node)->urb;
urb->start_frame =
frame_add(last_urb->start_frame,
last_urb->number_of_packets *
last_urb->interval);
}
urbp->cnt = 0;
break;
}
/* Add the URB to the endpoint queue */
list_add_tail(&urbp->hep_node, &urbp->ep_data->queue);
/* If this is the only URB, kick start the controller */
if (!c67x00->urb_count++)
c67x00_ll_hpi_enable_sofeop(c67x00->sie);
c67x00_sched_kick(c67x00);
spin_unlock_irqrestore(&c67x00->lock, flags);
return 0;
err_epdata:
usb_hcd_unlink_urb_from_ep(hcd, urb);
err_not_linked:
spin_unlock_irqrestore(&c67x00->lock, flags);
kfree(urbp);
err_urbp:
return ret;
}
int c67x00_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
unsigned long flags;
int rc;
spin_lock_irqsave(&c67x00->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc)
goto done;
c67x00_release_urb(c67x00, urb);
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock(&c67x00->lock);
usb_hcd_giveback_urb(hcd, urb, status);
spin_lock(&c67x00->lock);
spin_unlock_irqrestore(&c67x00->lock, flags);
return 0;
done:
spin_unlock_irqrestore(&c67x00->lock, flags);
return rc;
}
/* -------------------------------------------------------------------------- */
/*
* pre: c67x00 locked, urb unlocked
*/
static void
c67x00_giveback_urb(struct c67x00_hcd *c67x00, struct urb *urb, int status)
{
struct c67x00_urb_priv *urbp;
if (!urb)
return;
urbp = urb->hcpriv;
urbp->status = status;
list_del_init(&urbp->hep_node);
c67x00_release_urb(c67x00, urb);
usb_hcd_unlink_urb_from_ep(c67x00_hcd_to_hcd(c67x00), urb);
spin_unlock(&c67x00->lock);
usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, status);
spin_lock(&c67x00->lock);
}
/* -------------------------------------------------------------------------- */
static int c67x00_claim_frame_bw(struct c67x00_hcd *c67x00, struct urb *urb,
int len, int periodic)
{
struct c67x00_urb_priv *urbp = urb->hcpriv;
int bit_time;
/* According to the C67x00 BIOS user manual, page 3-18,19, the
* following calculations provide the full speed bit times for
* a transaction.
*
* FS(in) = 112.5 + 9.36*BC + HOST_DELAY
* FS(in,iso) = 90.5 + 9.36*BC + HOST_DELAY
* FS(out) = 112.5 + 9.36*BC + HOST_DELAY
* FS(out,iso) = 78.4 + 9.36*BC + HOST_DELAY
* LS(in) = 802.4 + 75.78*BC + HOST_DELAY
* LS(out) = 802.6 + 74.67*BC + HOST_DELAY
*
* HOST_DELAY == 106 for the c67200 and c67300.
*/
/* make calculations in 1/100 bit times to maintain resolution */
if (urbp->ep_data->dev->speed == USB_SPEED_LOW) {
/* Low speed pipe */
if (usb_pipein(urb->pipe))
bit_time = 80240 + 7578*len;
else
bit_time = 80260 + 7467*len;
} else {
/* FS pipes */
if (usb_pipeisoc(urb->pipe))
bit_time = usb_pipein(urb->pipe) ? 9050 : 7840;
else
bit_time = 11250;
bit_time += 936*len;
}
/* Scale back down to integer bit times. Use a host delay of 106.
* (this is the only place it is used) */
bit_time = ((bit_time+50) / 100) + 106;
if (unlikely(bit_time + c67x00->bandwidth_allocated >=
c67x00->max_frame_bw))
return -EMSGSIZE;
if (unlikely(c67x00->next_td_addr + CY_TD_SIZE >=
c67x00->td_base_addr + SIE_TD_SIZE))
return -EMSGSIZE;
if (unlikely(c67x00->next_buf_addr + len >=
c67x00->buf_base_addr + SIE_TD_BUF_SIZE))
return -EMSGSIZE;
if (periodic) {
if (unlikely(bit_time + c67x00->periodic_bw_allocated >=
MAX_PERIODIC_BW(c67x00->max_frame_bw)))
return -EMSGSIZE;
c67x00->periodic_bw_allocated += bit_time;
}
c67x00->bandwidth_allocated += bit_time;
return 0;
}
/* -------------------------------------------------------------------------- */
/*
* td_addr and buf_addr must be word aligned
*/
static int c67x00_create_td(struct c67x00_hcd *c67x00, struct urb *urb,
void *data, int len, int pid, int toggle,
unsigned long privdata)
{
struct c67x00_td *td;
struct c67x00_urb_priv *urbp = urb->hcpriv;
const __u8 active_flag = 1, retry_cnt = 3;
__u8 cmd = 0;
int tt = 0;
if (c67x00_claim_frame_bw(c67x00, urb, len, usb_pipeisoc(urb->pipe)
|| usb_pipeint(urb->pipe)))
return -EMSGSIZE; /* Not really an error, but expected */
td = kzalloc(sizeof(*td), GFP_ATOMIC);
if (!td)
return -ENOMEM;
td->pipe = urb->pipe;
td->ep_data = urbp->ep_data;
if ((td_udev(td)->speed == USB_SPEED_LOW) &&
!(c67x00->low_speed_ports & (1 << urbp->port)))
cmd |= PREAMBLE_EN;
switch (usb_pipetype(td->pipe)) {
case PIPE_ISOCHRONOUS:
tt = TT_ISOCHRONOUS;
cmd |= ISO_EN;
break;
case PIPE_CONTROL:
tt = TT_CONTROL;
break;
case PIPE_BULK:
tt = TT_BULK;
break;
case PIPE_INTERRUPT:
tt = TT_INTERRUPT;
break;
}
if (toggle)
cmd |= SEQ_SEL;
cmd |= ARM_EN;
/* SW part */
td->td_addr = c67x00->next_td_addr;
c67x00->next_td_addr = c67x00->next_td_addr + CY_TD_SIZE;
/* HW part */
td->ly_base_addr = __cpu_to_le16(c67x00->next_buf_addr);
td->port_length = __cpu_to_le16((c67x00->sie->sie_num << 15) |
(urbp->port << 14) | (len & 0x3FF));
td->pid_ep = ((pid & 0xF) << TD_PIDEP_OFFSET) |
(usb_pipeendpoint(td->pipe) & 0xF);
td->dev_addr = usb_pipedevice(td->pipe) & 0x7F;
td->ctrl_reg = cmd;
td->status = 0;
td->retry_cnt = (tt << TT_OFFSET) | (active_flag << 4) | retry_cnt;
td->residue = 0;
td->next_td_addr = __cpu_to_le16(c67x00->next_td_addr);
/* SW part */
td->data = data;
td->urb = urb;
td->privdata = privdata;
c67x00->next_buf_addr += (len + 1) & ~0x01; /* properly align */
list_add_tail(&td->td_list, &c67x00->td_list);
return 0;
}
static inline void c67x00_release_td(struct c67x00_td *td)
{
list_del_init(&td->td_list);
kfree(td);
}
/* -------------------------------------------------------------------------- */
static int c67x00_add_data_urb(struct c67x00_hcd *c67x00, struct urb *urb)
{
int remaining;
int toggle;
int pid;
int ret = 0;
int maxps;
int need_empty;
toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe));
remaining = urb->transfer_buffer_length - urb->actual_length;
maxps = usb_maxpacket(urb->dev, urb->pipe);
need_empty = (urb->transfer_flags & URB_ZERO_PACKET) &&
usb_pipeout(urb->pipe) && !(remaining % maxps);
while (remaining || need_empty) {
int len;
char *td_buf;
len = (remaining > maxps) ? maxps : remaining;
if (!len)
need_empty = 0;
pid = usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN;
td_buf = urb->transfer_buffer + urb->transfer_buffer_length -
remaining;
ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, toggle,
DATA_STAGE);
if (ret)
return ret; /* td wasn't created */
toggle ^= 1;
remaining -= len;
if (usb_pipecontrol(urb->pipe))
break;
}
return 0;
}
/*
* return 0 in case more bandwidth is available, else errorcode
*/
static int c67x00_add_ctrl_urb(struct c67x00_hcd *c67x00, struct urb *urb)
{
int ret;
int pid;
switch (urb->interval) {
default:
case SETUP_STAGE:
ret = c67x00_create_td(c67x00, urb, urb->setup_packet,
8, USB_PID_SETUP, 0, SETUP_STAGE);
if (ret)
return ret;
urb->interval = SETUP_STAGE;
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe), 1);
break;
case DATA_STAGE:
if (urb->transfer_buffer_length) {
ret = c67x00_add_data_urb(c67x00, urb);
if (ret)
return ret;
break;
}
fallthrough;
case STATUS_STAGE:
pid = !usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN;
ret = c67x00_create_td(c67x00, urb, NULL, 0, pid, 1,
STATUS_STAGE);
if (ret)
return ret;
break;
}
return 0;
}
/*
* return 0 in case more bandwidth is available, else errorcode
*/
static int c67x00_add_int_urb(struct c67x00_hcd *c67x00, struct urb *urb)
{
struct c67x00_urb_priv *urbp = urb->hcpriv;
if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) {
urbp->ep_data->next_frame =
frame_add(urbp->ep_data->next_frame, urb->interval);
return c67x00_add_data_urb(c67x00, urb);
}
return 0;
}
static int c67x00_add_iso_urb(struct c67x00_hcd *c67x00, struct urb *urb)
{
struct c67x00_urb_priv *urbp = urb->hcpriv;
if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) {
char *td_buf;
int len, pid, ret;
BUG_ON(urbp->cnt >= urb->number_of_packets);
td_buf = urb->transfer_buffer +
urb->iso_frame_desc[urbp->cnt].offset;
len = urb->iso_frame_desc[urbp->cnt].length;
pid = usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN;
ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, 0,
urbp->cnt);
if (ret) {
dev_dbg(c67x00_hcd_dev(c67x00), "create failed: %d\n",
ret);
urb->iso_frame_desc[urbp->cnt].actual_length = 0;
urb->iso_frame_desc[urbp->cnt].status = ret;
if (urbp->cnt + 1 == urb->number_of_packets)
c67x00_giveback_urb(c67x00, urb, 0);
}
urbp->ep_data->next_frame =
frame_add(urbp->ep_data->next_frame, urb->interval);
urbp->cnt++;
}
return 0;
}
/* -------------------------------------------------------------------------- */
static void c67x00_fill_from_list(struct c67x00_hcd *c67x00, int type,
int (*add)(struct c67x00_hcd *, struct urb *))
{
struct c67x00_ep_data *ep_data;
struct urb *urb;
/* traverse every endpoint on the list */
list_for_each_entry(ep_data, &c67x00->list[type], node) {
if (!list_empty(&ep_data->queue)) {
/* and add the first urb */
/* isochronous transfer rely on this */
urb = list_entry(ep_data->queue.next,
struct c67x00_urb_priv,
hep_node)->urb;
add(c67x00, urb);
}
}
}
static void c67x00_fill_frame(struct c67x00_hcd *c67x00)
{
struct c67x00_td *td, *ttd;
/* Check if we can proceed */
if (!list_empty(&c67x00->td_list)) {
dev_warn(c67x00_hcd_dev(c67x00),
"TD list not empty! This should not happen!\n");
list_for_each_entry_safe(td, ttd, &c67x00->td_list, td_list) {
dbg_td(c67x00, td, "Unprocessed td");
c67x00_release_td(td);
}
}
/* Reinitialize variables */
c67x00->bandwidth_allocated = 0;
c67x00->periodic_bw_allocated = 0;
c67x00->next_td_addr = c67x00->td_base_addr;
c67x00->next_buf_addr = c67x00->buf_base_addr;
/* Fill the list */
c67x00_fill_from_list(c67x00, PIPE_ISOCHRONOUS, c67x00_add_iso_urb);
c67x00_fill_from_list(c67x00, PIPE_INTERRUPT, c67x00_add_int_urb);
c67x00_fill_from_list(c67x00, PIPE_CONTROL, c67x00_add_ctrl_urb);
c67x00_fill_from_list(c67x00, PIPE_BULK, c67x00_add_data_urb);
}
/* -------------------------------------------------------------------------- */
/*
* Get TD from C67X00
*/
static inline void
c67x00_parse_td(struct c67x00_hcd *c67x00, struct c67x00_td *td)
{
c67x00_ll_read_mem_le16(c67x00->sie->dev,
td->td_addr, td, CY_TD_SIZE);
if (usb_pipein(td->pipe) && td_actual_bytes(td))
c67x00_ll_read_mem_le16(c67x00->sie->dev, td_ly_base_addr(td),
td->data, td_actual_bytes(td));
}
static int c67x00_td_to_error(struct c67x00_hcd *c67x00, struct c67x00_td *td)
{
if (td->status & TD_STATUSMASK_ERR) {
dbg_td(c67x00, td, "ERROR_FLAG");
return -EILSEQ;
}
if (td->status & TD_STATUSMASK_STALL) {
/* dbg_td(c67x00, td, "STALL"); */
return -EPIPE;
}
if (td->status & TD_STATUSMASK_TMOUT) {
dbg_td(c67x00, td, "TIMEOUT");
return -ETIMEDOUT;
}
return 0;
}
static inline int c67x00_end_of_data(struct c67x00_td *td)
{
int maxps, need_empty, remaining;
struct urb *urb = td->urb;
int act_bytes;
act_bytes = td_actual_bytes(td);
if (unlikely(!act_bytes))
return 1; /* This was an empty packet */
maxps = usb_maxpacket(td_udev(td), td->pipe);
if (unlikely(act_bytes < maxps))
return 1; /* Smaller then full packet */
remaining = urb->transfer_buffer_length - urb->actual_length;
need_empty = (urb->transfer_flags & URB_ZERO_PACKET) &&
usb_pipeout(urb->pipe) && !(remaining % maxps);
if (unlikely(!remaining && !need_empty))
return 1;
return 0;
}
/* -------------------------------------------------------------------------- */
/* Remove all td's from the list which come
* after last_td and are meant for the same pipe.
* This is used when a short packet has occurred */
static inline void c67x00_clear_pipe(struct c67x00_hcd *c67x00,
struct c67x00_td *last_td)
{
struct c67x00_td *td, *tmp;
td = last_td;
tmp = last_td;
while (td->td_list.next != &c67x00->td_list) {
td = list_entry(td->td_list.next, struct c67x00_td, td_list);
if (td->pipe == last_td->pipe) {
c67x00_release_td(td);
td = tmp;
}
tmp = td;
}
}
/* -------------------------------------------------------------------------- */
static void c67x00_handle_successful_td(struct c67x00_hcd *c67x00,
struct c67x00_td *td)
{
struct urb *urb = td->urb;
if (!urb)
return;
urb->actual_length += td_actual_bytes(td);
switch (usb_pipetype(td->pipe)) {
/* isochronous tds are handled separately */
case PIPE_CONTROL:
switch (td->privdata) {
case SETUP_STAGE:
urb->interval =
urb->transfer_buffer_length ?
DATA_STAGE : STATUS_STAGE;
/* Don't count setup_packet with normal data: */
urb->actual_length = 0;
break;
case DATA_STAGE:
if (c67x00_end_of_data(td)) {
urb->interval = STATUS_STAGE;
c67x00_clear_pipe(c67x00, td);
}
break;
case STATUS_STAGE:
urb->interval = 0;
c67x00_giveback_urb(c67x00, urb, 0);
break;
}
break;
case PIPE_INTERRUPT:
case PIPE_BULK:
if (unlikely(c67x00_end_of_data(td))) {
c67x00_clear_pipe(c67x00, td);
c67x00_giveback_urb(c67x00, urb, 0);
}
break;
}
}
static void c67x00_handle_isoc(struct c67x00_hcd *c67x00, struct c67x00_td *td)
{
struct urb *urb = td->urb;
int cnt;
if (!urb)
return;
cnt = td->privdata;
if (td->status & TD_ERROR_MASK)
urb->error_count++;
urb->iso_frame_desc[cnt].actual_length = td_actual_bytes(td);
urb->iso_frame_desc[cnt].status = c67x00_td_to_error(c67x00, td);
if (cnt + 1 == urb->number_of_packets) /* Last packet */
c67x00_giveback_urb(c67x00, urb, 0);
}
/* -------------------------------------------------------------------------- */
/*
* c67x00_check_td_list - handle tds which have been processed by the c67x00
* pre: current_td == 0
*/
static inline void c67x00_check_td_list(struct c67x00_hcd *c67x00)
{
struct c67x00_td *td, *tmp;
struct urb *urb;
int ack_ok;
int clear_endpoint;
list_for_each_entry_safe(td, tmp, &c67x00->td_list, td_list) {
/* get the TD */
c67x00_parse_td(c67x00, td);
urb = td->urb; /* urb can be NULL! */
ack_ok = 0;
clear_endpoint = 1;
/* Handle isochronous transfers separately */
if (usb_pipeisoc(td->pipe)) {
clear_endpoint = 0;
c67x00_handle_isoc(c67x00, td);
goto cont;
}
/* When an error occurs, all td's for that pipe go into an
* inactive state. This state matches successful transfers so
* we must make sure not to service them. */
if (td->status & TD_ERROR_MASK) {
c67x00_giveback_urb(c67x00, urb,
c67x00_td_to_error(c67x00, td));
goto cont;
}
if ((td->status & TD_STATUSMASK_NAK) || !td_sequence_ok(td) ||
!td_acked(td))
goto cont;
/* Sequence ok and acked, don't need to fix toggle */
ack_ok = 1;
if (unlikely(td->status & TD_STATUSMASK_OVF)) {
if (td_residue(td) & TD_RESIDUE_OVERFLOW) {
/* Overflow */
c67x00_giveback_urb(c67x00, urb, -EOVERFLOW);
goto cont;
}
}
clear_endpoint = 0;
c67x00_handle_successful_td(c67x00, td);
cont:
if (clear_endpoint)
c67x00_clear_pipe(c67x00, td);
if (ack_ok)
usb_settoggle(td_udev(td), usb_pipeendpoint(td->pipe),
usb_pipeout(td->pipe),
!(td->ctrl_reg & SEQ_SEL));
/* next in list could have been removed, due to clear_pipe! */
tmp = list_entry(td->td_list.next, typeof(*td), td_list);
c67x00_release_td(td);
}
}
/* -------------------------------------------------------------------------- */
static inline int c67x00_all_tds_processed(struct c67x00_hcd *c67x00)
{
/* If all tds are processed, we can check the previous frame (if
* there was any) and start our next frame.
*/
return !c67x00_ll_husb_get_current_td(c67x00->sie);
}
/*
* Send td to C67X00
*/
static void c67x00_send_td(struct c67x00_hcd *c67x00, struct c67x00_td *td)
{
int len = td_length(td);
if (len && ((td->pid_ep & TD_PIDEPMASK_PID) != TD_PID_IN))
c67x00_ll_write_mem_le16(c67x00->sie->dev, td_ly_base_addr(td),
td->data, len);
c67x00_ll_write_mem_le16(c67x00->sie->dev,
td->td_addr, td, CY_TD_SIZE);
}
static void c67x00_send_frame(struct c67x00_hcd *c67x00)
{
struct c67x00_td *td;
if (list_empty(&c67x00->td_list))
dev_warn(c67x00_hcd_dev(c67x00),
"%s: td list should not be empty here!\n",
__func__);
list_for_each_entry(td, &c67x00->td_list, td_list) {
if (td->td_list.next == &c67x00->td_list)
td->next_td_addr = 0; /* Last td in list */
c67x00_send_td(c67x00, td);
}
c67x00_ll_husb_set_current_td(c67x00->sie, c67x00->td_base_addr);
}
/* -------------------------------------------------------------------------- */
/*
* c67x00_do_work - Schedulers state machine
*/
static void c67x00_do_work(struct c67x00_hcd *c67x00)
{
spin_lock(&c67x00->lock);
/* Make sure all tds are processed */
if (!c67x00_all_tds_processed(c67x00))
goto out;
c67x00_check_td_list(c67x00);
/* no td's are being processed (current == 0)
* and all have been "checked" */
complete(&c67x00->endpoint_disable);
if (!list_empty(&c67x00->td_list))
goto out;
c67x00->current_frame = c67x00_get_current_frame_number(c67x00);
if (c67x00->current_frame == c67x00->last_frame)
goto out; /* Don't send tds in same frame */
c67x00->last_frame = c67x00->current_frame;
/* If no urbs are scheduled, our work is done */
if (!c67x00->urb_count) {
c67x00_ll_hpi_disable_sofeop(c67x00->sie);
goto out;
}
c67x00_fill_frame(c67x00);
if (!list_empty(&c67x00->td_list))
/* TD's have been added to the frame */
c67x00_send_frame(c67x00);
out:
spin_unlock(&c67x00->lock);
}
/* -------------------------------------------------------------------------- */
static void c67x00_sched_work(struct work_struct *work)
{
struct c67x00_hcd *c67x00;
c67x00 = container_of(work, struct c67x00_hcd, work);
c67x00_do_work(c67x00);
}
void c67x00_sched_kick(struct c67x00_hcd *c67x00)
{
queue_work(system_highpri_wq, &c67x00->work);
}
int c67x00_sched_start_scheduler(struct c67x00_hcd *c67x00)
{
INIT_WORK(&c67x00->work, c67x00_sched_work);
return 0;
}
void c67x00_sched_stop_scheduler(struct c67x00_hcd *c67x00)
{
cancel_work_sync(&c67x00->work);
}
| linux-master | drivers/usb/c67x00/c67x00-sched.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Safe Encapsulated USB Serial Driver
*
* Copyright (C) 2010 Johan Hovold <[email protected]>
* Copyright (C) 2001 Lineo
* Copyright (C) 2001 Hewlett-Packard
*
* By:
* Stuart Lynne <[email protected]>, Tom Rushworth <[email protected]>
*/
/*
* The encapsultaion is designed to overcome difficulties with some USB
* hardware.
*
* While the USB protocol has a CRC over the data while in transit, i.e. while
* being carried over the bus, there is no end to end protection. If the
* hardware has any problems getting the data into or out of the USB transmit
* and receive FIFO's then data can be lost.
*
* This protocol adds a two byte trailer to each USB packet to specify the
* number of bytes of valid data and a 10 bit CRC that will allow the receiver
* to verify that the entire USB packet was received without error.
*
* Because in this case the sender and receiver are the class and function
* drivers there is now end to end protection.
*
* There is an additional option that can be used to force all transmitted
* packets to be padded to the maximum packet size. This provides a work
* around for some devices which have problems with small USB packets.
*
* Assuming a packetsize of N:
*
* 0..N-2 data and optional padding
*
* N-2 bits 7-2 - number of bytes of valid data
* bits 1-0 top two bits of 10 bit CRC
* N-1 bottom 8 bits of 10 bit CRC
*
*
* | Data Length | 10 bit CRC |
* + 7 . 6 . 5 . 4 . 3 . 2 . 1 . 0 | 7 . 6 . 5 . 4 . 3 . 2 . 1 . 0 +
*
* The 10 bit CRC is computed across the sent data, followed by the trailer
* with the length set and the CRC set to zero. The CRC is then OR'd into
* the trailer.
*
* When received a 10 bit CRC is computed over the entire frame including
* the trailer and should be equal to zero.
*
* Two module parameters are used to control the encapsulation, if both are
* turned of the module works as a simple serial device with NO
* encapsulation.
*
* See linux/drivers/usbd/serial_fd for a device function driver
* implementation of this.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/gfp.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
static bool safe = true;
static bool padded = IS_ENABLED(CONFIG_USB_SERIAL_SAFE_PADDED);
#define DRIVER_AUTHOR "[email protected], [email protected], Johan Hovold <[email protected]>"
#define DRIVER_DESC "USB Safe Encapsulated Serial"
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(safe, bool, 0);
MODULE_PARM_DESC(safe, "Turn Safe Encapsulation On/Off");
module_param(padded, bool, 0);
MODULE_PARM_DESC(padded, "Pad to full wMaxPacketSize On/Off");
#define CDC_DEVICE_CLASS 0x02
#define CDC_INTERFACE_CLASS 0x02
#define CDC_INTERFACE_SUBCLASS 0x06
#define LINEO_INTERFACE_CLASS 0xff
#define LINEO_INTERFACE_SUBCLASS_SAFENET 0x01
#define LINEO_SAFENET_CRC 0x01
#define LINEO_SAFENET_CRC_PADDED 0x02
#define LINEO_INTERFACE_SUBCLASS_SAFESERIAL 0x02
#define LINEO_SAFESERIAL_CRC 0x01
#define LINEO_SAFESERIAL_CRC_PADDED 0x02
#define MY_USB_DEVICE(vend, prod, dc, ic, isc) \
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
USB_DEVICE_ID_MATCH_DEV_CLASS | \
USB_DEVICE_ID_MATCH_INT_CLASS | \
USB_DEVICE_ID_MATCH_INT_SUBCLASS, \
.idVendor = (vend), \
.idProduct = (prod),\
.bDeviceClass = (dc),\
.bInterfaceClass = (ic), \
.bInterfaceSubClass = (isc),
static const struct usb_device_id id_table[] = {
{MY_USB_DEVICE(0x49f, 0xffff, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, /* Itsy */
{MY_USB_DEVICE(0x3f0, 0x2101, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, /* Calypso */
{MY_USB_DEVICE(0x4dd, 0x8001, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, /* Iris */
{MY_USB_DEVICE(0x4dd, 0x8002, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, /* Collie */
{MY_USB_DEVICE(0x4dd, 0x8003, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, /* Collie */
{MY_USB_DEVICE(0x4dd, 0x8004, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, /* Collie */
{MY_USB_DEVICE(0x5f9, 0xffff, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, /* Sharp tmp */
{} /* terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
static const __u16 crc10_table[256] = {
0x000, 0x233, 0x255, 0x066, 0x299, 0x0aa, 0x0cc, 0x2ff,
0x301, 0x132, 0x154, 0x367, 0x198, 0x3ab, 0x3cd, 0x1fe,
0x031, 0x202, 0x264, 0x057, 0x2a8, 0x09b, 0x0fd, 0x2ce,
0x330, 0x103, 0x165, 0x356, 0x1a9, 0x39a, 0x3fc, 0x1cf,
0x062, 0x251, 0x237, 0x004, 0x2fb, 0x0c8, 0x0ae, 0x29d,
0x363, 0x150, 0x136, 0x305, 0x1fa, 0x3c9, 0x3af, 0x19c,
0x053, 0x260, 0x206, 0x035, 0x2ca, 0x0f9, 0x09f, 0x2ac,
0x352, 0x161, 0x107, 0x334, 0x1cb, 0x3f8, 0x39e, 0x1ad,
0x0c4, 0x2f7, 0x291, 0x0a2, 0x25d, 0x06e, 0x008, 0x23b,
0x3c5, 0x1f6, 0x190, 0x3a3, 0x15c, 0x36f, 0x309, 0x13a,
0x0f5, 0x2c6, 0x2a0, 0x093, 0x26c, 0x05f, 0x039, 0x20a,
0x3f4, 0x1c7, 0x1a1, 0x392, 0x16d, 0x35e, 0x338, 0x10b,
0x0a6, 0x295, 0x2f3, 0x0c0, 0x23f, 0x00c, 0x06a, 0x259,
0x3a7, 0x194, 0x1f2, 0x3c1, 0x13e, 0x30d, 0x36b, 0x158,
0x097, 0x2a4, 0x2c2, 0x0f1, 0x20e, 0x03d, 0x05b, 0x268,
0x396, 0x1a5, 0x1c3, 0x3f0, 0x10f, 0x33c, 0x35a, 0x169,
0x188, 0x3bb, 0x3dd, 0x1ee, 0x311, 0x122, 0x144, 0x377,
0x289, 0x0ba, 0x0dc, 0x2ef, 0x010, 0x223, 0x245, 0x076,
0x1b9, 0x38a, 0x3ec, 0x1df, 0x320, 0x113, 0x175, 0x346,
0x2b8, 0x08b, 0x0ed, 0x2de, 0x021, 0x212, 0x274, 0x047,
0x1ea, 0x3d9, 0x3bf, 0x18c, 0x373, 0x140, 0x126, 0x315,
0x2eb, 0x0d8, 0x0be, 0x28d, 0x072, 0x241, 0x227, 0x014,
0x1db, 0x3e8, 0x38e, 0x1bd, 0x342, 0x171, 0x117, 0x324,
0x2da, 0x0e9, 0x08f, 0x2bc, 0x043, 0x270, 0x216, 0x025,
0x14c, 0x37f, 0x319, 0x12a, 0x3d5, 0x1e6, 0x180, 0x3b3,
0x24d, 0x07e, 0x018, 0x22b, 0x0d4, 0x2e7, 0x281, 0x0b2,
0x17d, 0x34e, 0x328, 0x11b, 0x3e4, 0x1d7, 0x1b1, 0x382,
0x27c, 0x04f, 0x029, 0x21a, 0x0e5, 0x2d6, 0x2b0, 0x083,
0x12e, 0x31d, 0x37b, 0x148, 0x3b7, 0x184, 0x1e2, 0x3d1,
0x22f, 0x01c, 0x07a, 0x249, 0x0b6, 0x285, 0x2e3, 0x0d0,
0x11f, 0x32c, 0x34a, 0x179, 0x386, 0x1b5, 0x1d3, 0x3e0,
0x21e, 0x02d, 0x04b, 0x278, 0x087, 0x2b4, 0x2d2, 0x0e1,
};
#define CRC10_INITFCS 0x000 /* Initial FCS value */
#define CRC10_GOODFCS 0x000 /* Good final FCS value */
#define CRC10_FCS(fcs, c) ((((fcs) << 8) & 0x3ff) ^ crc10_table[((fcs) >> 2) & 0xff] ^ (c))
/**
* fcs_compute10 - memcpy and calculate 10 bit CRC across buffer
* @sp: pointer to buffer
* @len: number of bytes
* @fcs: starting FCS
*
* Perform a memcpy and calculate fcs using ppp 10bit CRC algorithm. Return
* new 10 bit FCS.
*/
static inline __u16 fcs_compute10(unsigned char *sp, int len, __u16 fcs)
{
for (; len-- > 0; fcs = CRC10_FCS(fcs, *sp++));
return fcs;
}
static void safe_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
unsigned char length = urb->actual_length;
int actual_length;
__u16 fcs;
if (!length)
return;
if (!safe)
goto out;
if (length < 2) {
dev_err(&port->dev, "malformed packet\n");
return;
}
fcs = fcs_compute10(data, length, CRC10_INITFCS);
if (fcs) {
dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs);
return;
}
actual_length = data[length - 2] >> 2;
if (actual_length > (length - 2)) {
dev_err(&port->dev, "%s - inconsistent lengths %d:%d\n",
__func__, actual_length, length);
return;
}
dev_info(&urb->dev->dev, "%s - actual: %d\n", __func__, actual_length);
length = actual_length;
out:
tty_insert_flip_string(&port->port, data, length);
tty_flip_buffer_push(&port->port);
}
static int safe_prepare_write_buffer(struct usb_serial_port *port,
void *dest, size_t size)
{
unsigned char *buf = dest;
int count;
int trailer_len;
int pkt_len;
__u16 fcs;
trailer_len = safe ? 2 : 0;
count = kfifo_out_locked(&port->write_fifo, buf, size - trailer_len,
&port->lock);
if (!safe)
return count;
/* pad if necessary */
if (padded) {
pkt_len = size;
memset(buf + count, '0', pkt_len - count - trailer_len);
} else {
pkt_len = count + trailer_len;
}
/* set count */
buf[pkt_len - 2] = count << 2;
buf[pkt_len - 1] = 0;
/* compute fcs and insert into trailer */
fcs = fcs_compute10(buf, pkt_len, CRC10_INITFCS);
buf[pkt_len - 2] |= fcs >> 8;
buf[pkt_len - 1] |= fcs & 0xff;
return pkt_len;
}
static int safe_startup(struct usb_serial *serial)
{
struct usb_interface_descriptor *desc;
if (serial->dev->descriptor.bDeviceClass != CDC_DEVICE_CLASS)
return -ENODEV;
desc = &serial->interface->cur_altsetting->desc;
if (desc->bInterfaceClass != LINEO_INTERFACE_CLASS)
return -ENODEV;
if (desc->bInterfaceSubClass != LINEO_INTERFACE_SUBCLASS_SAFESERIAL)
return -ENODEV;
switch (desc->bInterfaceProtocol) {
case LINEO_SAFESERIAL_CRC:
break;
case LINEO_SAFESERIAL_CRC_PADDED:
padded = true;
break;
default:
return -EINVAL;
}
return 0;
}
static struct usb_serial_driver safe_device = {
.driver = {
.owner = THIS_MODULE,
.name = "safe_serial",
},
.id_table = id_table,
.num_ports = 1,
.process_read_urb = safe_process_read_urb,
.prepare_write_buffer = safe_prepare_write_buffer,
.attach = safe_startup,
};
static struct usb_serial_driver * const serial_drivers[] = {
&safe_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
| linux-master | drivers/usb/serial/safe_serial.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2007, Frank A Kingswood <[email protected]>
* Copyright 2007, Werner Cornelius <[email protected]>
* Copyright 2009, Boris Hajduk <[email protected]>
*
* ch341.c implements a serial port driver for the Winchiphead CH341.
*
* The CH341 device can be used to implement an RS232 asynchronous
* serial port, an IEEE-1284 parallel printer port or a memory-like
* interface. In all cases the CH341 supports an I2C interface as well.
* This driver only supports the asynchronous serial interface.
*/
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/serial.h>
#include <asm/unaligned.h>
#define DEFAULT_BAUD_RATE 9600
#define DEFAULT_TIMEOUT 1000
/* flags for IO-Bits */
#define CH341_BIT_RTS (1 << 6)
#define CH341_BIT_DTR (1 << 5)
/******************************/
/* interrupt pipe definitions */
/******************************/
/* always 4 interrupt bytes */
/* first irq byte normally 0x08 */
/* second irq byte base 0x7d + below */
/* third irq byte base 0x94 + below */
/* fourth irq byte normally 0xee */
/* second interrupt byte */
#define CH341_MULT_STAT 0x04 /* multiple status since last interrupt event */
/* status returned in third interrupt answer byte, inverted in data
from irq */
#define CH341_BIT_CTS 0x01
#define CH341_BIT_DSR 0x02
#define CH341_BIT_RI 0x04
#define CH341_BIT_DCD 0x08
#define CH341_BITS_MODEM_STAT 0x0f /* all bits */
/* Break support - the information used to implement this was gleaned from
* the Net/FreeBSD uchcom.c driver by Takanori Watanabe. Domo arigato.
*/
#define CH341_REQ_READ_VERSION 0x5F
#define CH341_REQ_WRITE_REG 0x9A
#define CH341_REQ_READ_REG 0x95
#define CH341_REQ_SERIAL_INIT 0xA1
#define CH341_REQ_MODEM_CTRL 0xA4
#define CH341_REG_BREAK 0x05
#define CH341_REG_PRESCALER 0x12
#define CH341_REG_DIVISOR 0x13
#define CH341_REG_LCR 0x18
#define CH341_REG_LCR2 0x25
#define CH341_NBREAK_BITS 0x01
#define CH341_LCR_ENABLE_RX 0x80
#define CH341_LCR_ENABLE_TX 0x40
#define CH341_LCR_MARK_SPACE 0x20
#define CH341_LCR_PAR_EVEN 0x10
#define CH341_LCR_ENABLE_PAR 0x08
#define CH341_LCR_STOP_BITS_2 0x04
#define CH341_LCR_CS8 0x03
#define CH341_LCR_CS7 0x02
#define CH341_LCR_CS6 0x01
#define CH341_LCR_CS5 0x00
#define CH341_QUIRK_LIMITED_PRESCALER BIT(0)
#define CH341_QUIRK_SIMULATE_BREAK BIT(1)
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1a86, 0x5523) },
{ USB_DEVICE(0x1a86, 0x7522) },
{ USB_DEVICE(0x1a86, 0x7523) },
{ USB_DEVICE(0x2184, 0x0057) },
{ USB_DEVICE(0x4348, 0x5523) },
{ USB_DEVICE(0x9986, 0x7523) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
struct ch341_private {
spinlock_t lock; /* access lock */
unsigned baud_rate; /* set baud rate */
u8 mcr;
u8 msr;
u8 lcr;
unsigned long quirks;
u8 version;
unsigned long break_end;
};
static void ch341_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios);
static int ch341_control_out(struct usb_device *dev, u8 request,
u16 value, u16 index)
{
int r;
dev_dbg(&dev->dev, "%s - (%02x,%04x,%04x)\n", __func__,
request, value, index);
r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
value, index, NULL, 0, DEFAULT_TIMEOUT);
if (r < 0)
dev_err(&dev->dev, "failed to send control message: %d\n", r);
return r;
}
static int ch341_control_in(struct usb_device *dev,
u8 request, u16 value, u16 index,
char *buf, unsigned bufsize)
{
int r;
dev_dbg(&dev->dev, "%s - (%02x,%04x,%04x,%u)\n", __func__,
request, value, index, bufsize);
r = usb_control_msg_recv(dev, 0, request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
value, index, buf, bufsize, DEFAULT_TIMEOUT,
GFP_KERNEL);
if (r) {
dev_err(&dev->dev, "failed to receive control message: %d\n",
r);
return r;
}
return 0;
}
#define CH341_CLKRATE 48000000
#define CH341_CLK_DIV(ps, fact) (1 << (12 - 3 * (ps) - (fact)))
#define CH341_MIN_RATE(ps) (CH341_CLKRATE / (CH341_CLK_DIV((ps), 1) * 512))
static const speed_t ch341_min_rates[] = {
CH341_MIN_RATE(0),
CH341_MIN_RATE(1),
CH341_MIN_RATE(2),
CH341_MIN_RATE(3),
};
/* Supported range is 46 to 3000000 bps. */
#define CH341_MIN_BPS DIV_ROUND_UP(CH341_CLKRATE, CH341_CLK_DIV(0, 0) * 256)
#define CH341_MAX_BPS (CH341_CLKRATE / (CH341_CLK_DIV(3, 0) * 2))
/*
* The device line speed is given by the following equation:
*
* baudrate = 48000000 / (2^(12 - 3 * ps - fact) * div), where
*
* 0 <= ps <= 3,
* 0 <= fact <= 1,
* 2 <= div <= 256 if fact = 0, or
* 9 <= div <= 256 if fact = 1
*/
static int ch341_get_divisor(struct ch341_private *priv, speed_t speed)
{
unsigned int fact, div, clk_div;
bool force_fact0 = false;
int ps;
/*
* Clamp to supported range, this makes the (ps < 0) and (div < 2)
* sanity checks below redundant.
*/
speed = clamp_val(speed, CH341_MIN_BPS, CH341_MAX_BPS);
/*
* Start with highest possible base clock (fact = 1) that will give a
* divisor strictly less than 512.
*/
fact = 1;
for (ps = 3; ps >= 0; ps--) {
if (speed > ch341_min_rates[ps])
break;
}
if (ps < 0)
return -EINVAL;
/* Determine corresponding divisor, rounding down. */
clk_div = CH341_CLK_DIV(ps, fact);
div = CH341_CLKRATE / (clk_div * speed);
/* Some devices require a lower base clock if ps < 3. */
if (ps < 3 && (priv->quirks & CH341_QUIRK_LIMITED_PRESCALER))
force_fact0 = true;
/* Halve base clock (fact = 0) if required. */
if (div < 9 || div > 255 || force_fact0) {
div /= 2;
clk_div *= 2;
fact = 0;
}
if (div < 2)
return -EINVAL;
/*
* Pick next divisor if resulting rate is closer to the requested one,
* scale up to avoid rounding errors on low rates.
*/
if (16 * CH341_CLKRATE / (clk_div * div) - 16 * speed >=
16 * speed - 16 * CH341_CLKRATE / (clk_div * (div + 1)))
div++;
/*
* Prefer lower base clock (fact = 0) if even divisor.
*
* Note that this makes the receiver more tolerant to errors.
*/
if (fact == 1 && div % 2 == 0) {
div /= 2;
fact = 0;
}
return (0x100 - div) << 8 | fact << 2 | ps;
}
static int ch341_set_baudrate_lcr(struct usb_device *dev,
struct ch341_private *priv,
speed_t baud_rate, u8 lcr)
{
int val;
int r;
if (!baud_rate)
return -EINVAL;
val = ch341_get_divisor(priv, baud_rate);
if (val < 0)
return -EINVAL;
/*
* CH341A buffers data until a full endpoint-size packet (32 bytes)
* has been received unless bit 7 is set.
*
* At least one device with version 0x27 appears to have this bit
* inverted.
*/
if (priv->version > 0x27)
val |= BIT(7);
r = ch341_control_out(dev, CH341_REQ_WRITE_REG,
CH341_REG_DIVISOR << 8 | CH341_REG_PRESCALER,
val);
if (r)
return r;
/*
* Chip versions before version 0x30 as read using
* CH341_REQ_READ_VERSION used separate registers for line control
* (stop bits, parity and word length). Version 0x30 and above use
* CH341_REG_LCR only and CH341_REG_LCR2 is always set to zero.
*/
if (priv->version < 0x30)
return 0;
r = ch341_control_out(dev, CH341_REQ_WRITE_REG,
CH341_REG_LCR2 << 8 | CH341_REG_LCR, lcr);
if (r)
return r;
return r;
}
static int ch341_set_handshake(struct usb_device *dev, u8 control)
{
return ch341_control_out(dev, CH341_REQ_MODEM_CTRL, ~control, 0);
}
static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
{
const unsigned int size = 2;
u8 buffer[2];
int r;
unsigned long flags;
r = ch341_control_in(dev, CH341_REQ_READ_REG, 0x0706, 0, buffer, size);
if (r)
return r;
spin_lock_irqsave(&priv->lock, flags);
priv->msr = (~(*buffer)) & CH341_BITS_MODEM_STAT;
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
/* -------------------------------------------------------------------------- */
static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
{
const unsigned int size = 2;
u8 buffer[2];
int r;
/* expect two bytes 0x27 0x00 */
r = ch341_control_in(dev, CH341_REQ_READ_VERSION, 0, 0, buffer, size);
if (r)
return r;
priv->version = buffer[0];
dev_dbg(&dev->dev, "Chip version: 0x%02x\n", priv->version);
r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT, 0, 0);
if (r < 0)
return r;
r = ch341_set_baudrate_lcr(dev, priv, priv->baud_rate, priv->lcr);
if (r < 0)
return r;
r = ch341_set_handshake(dev, priv->mcr);
if (r < 0)
return r;
return 0;
}
static int ch341_detect_quirks(struct usb_serial_port *port)
{
struct ch341_private *priv = usb_get_serial_port_data(port);
struct usb_device *udev = port->serial->dev;
const unsigned int size = 2;
unsigned long quirks = 0;
u8 buffer[2];
int r;
/*
* A subset of CH34x devices does not support all features. The
* prescaler is limited and there is no support for sending a RS232
* break condition. A read failure when trying to set up the latter is
* used to detect these devices.
*/
r = usb_control_msg_recv(udev, 0, CH341_REQ_READ_REG,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
CH341_REG_BREAK, 0, &buffer, size,
DEFAULT_TIMEOUT, GFP_KERNEL);
if (r == -EPIPE) {
dev_info(&port->dev, "break control not supported, using simulated break\n");
quirks = CH341_QUIRK_LIMITED_PRESCALER | CH341_QUIRK_SIMULATE_BREAK;
r = 0;
} else if (r) {
dev_err(&port->dev, "failed to read break control: %d\n", r);
}
if (quirks) {
dev_dbg(&port->dev, "enabling quirk flags: 0x%02lx\n", quirks);
priv->quirks |= quirks;
}
return r;
}
static int ch341_port_probe(struct usb_serial_port *port)
{
struct ch341_private *priv;
int r;
priv = kzalloc(sizeof(struct ch341_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
spin_lock_init(&priv->lock);
priv->baud_rate = DEFAULT_BAUD_RATE;
/*
* Some CH340 devices appear unable to change the initial LCR
* settings, so set a sane 8N1 default.
*/
priv->lcr = CH341_LCR_ENABLE_RX | CH341_LCR_ENABLE_TX | CH341_LCR_CS8;
r = ch341_configure(port->serial->dev, priv);
if (r < 0)
goto error;
usb_set_serial_port_data(port, priv);
r = ch341_detect_quirks(port);
if (r < 0)
goto error;
return 0;
error: kfree(priv);
return r;
}
static void ch341_port_remove(struct usb_serial_port *port)
{
struct ch341_private *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
}
static int ch341_carrier_raised(struct usb_serial_port *port)
{
struct ch341_private *priv = usb_get_serial_port_data(port);
if (priv->msr & CH341_BIT_DCD)
return 1;
return 0;
}
static void ch341_dtr_rts(struct usb_serial_port *port, int on)
{
struct ch341_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
/* drop DTR and RTS */
spin_lock_irqsave(&priv->lock, flags);
if (on)
priv->mcr |= CH341_BIT_RTS | CH341_BIT_DTR;
else
priv->mcr &= ~(CH341_BIT_RTS | CH341_BIT_DTR);
spin_unlock_irqrestore(&priv->lock, flags);
ch341_set_handshake(port->serial->dev, priv->mcr);
}
static void ch341_close(struct usb_serial_port *port)
{
usb_serial_generic_close(port);
usb_kill_urb(port->interrupt_in_urb);
}
/* open this device, set default parameters */
static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct ch341_private *priv = usb_get_serial_port_data(port);
int r;
if (tty)
ch341_set_termios(tty, port, NULL);
dev_dbg(&port->dev, "%s - submitting interrupt urb\n", __func__);
r = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (r) {
dev_err(&port->dev, "%s - failed to submit interrupt urb: %d\n",
__func__, r);
return r;
}
r = ch341_get_status(port->serial->dev, priv);
if (r < 0) {
dev_err(&port->dev, "failed to read modem status: %d\n", r);
goto err_kill_interrupt_urb;
}
r = usb_serial_generic_open(tty, port);
if (r)
goto err_kill_interrupt_urb;
return 0;
err_kill_interrupt_urb:
usb_kill_urb(port->interrupt_in_urb);
return r;
}
/* Old_termios contains the original termios settings and
* tty->termios contains the new setting to be used.
*/
static void ch341_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct ch341_private *priv = usb_get_serial_port_data(port);
unsigned baud_rate;
unsigned long flags;
u8 lcr;
int r;
/* redundant changes may cause the chip to lose bytes */
if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
return;
baud_rate = tty_get_baud_rate(tty);
lcr = CH341_LCR_ENABLE_RX | CH341_LCR_ENABLE_TX;
switch (C_CSIZE(tty)) {
case CS5:
lcr |= CH341_LCR_CS5;
break;
case CS6:
lcr |= CH341_LCR_CS6;
break;
case CS7:
lcr |= CH341_LCR_CS7;
break;
case CS8:
lcr |= CH341_LCR_CS8;
break;
}
if (C_PARENB(tty)) {
lcr |= CH341_LCR_ENABLE_PAR;
if (C_PARODD(tty) == 0)
lcr |= CH341_LCR_PAR_EVEN;
if (C_CMSPAR(tty))
lcr |= CH341_LCR_MARK_SPACE;
}
if (C_CSTOPB(tty))
lcr |= CH341_LCR_STOP_BITS_2;
if (baud_rate) {
priv->baud_rate = baud_rate;
r = ch341_set_baudrate_lcr(port->serial->dev, priv,
priv->baud_rate, lcr);
if (r < 0 && old_termios) {
priv->baud_rate = tty_termios_baud_rate(old_termios);
tty_termios_copy_hw(&tty->termios, old_termios);
} else if (r == 0) {
priv->lcr = lcr;
}
}
spin_lock_irqsave(&priv->lock, flags);
if (C_BAUD(tty) == B0)
priv->mcr &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
priv->mcr |= (CH341_BIT_DTR | CH341_BIT_RTS);
spin_unlock_irqrestore(&priv->lock, flags);
ch341_set_handshake(port->serial->dev, priv->mcr);
}
/*
* A subset of all CH34x devices don't support a real break condition and
* reading CH341_REG_BREAK fails (see also ch341_detect_quirks). This function
* simulates a break condition by lowering the baud rate to the minimum
* supported by the hardware upon enabling the break condition and sending
* a NUL byte.
*
* Incoming data is corrupted while the break condition is being simulated.
*
* Normally the duration of the break condition can be controlled individually
* by userspace using TIOCSBRK and TIOCCBRK or by passing an argument to
* TCSBRKP. Due to how the simulation is implemented the duration can't be
* controlled. The duration is always about (1s / 46bd * 9bit) = 196ms.
*/
static int ch341_simulate_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct ch341_private *priv = usb_get_serial_port_data(port);
unsigned long now, delay;
int r, r2;
if (break_state != 0) {
dev_dbg(&port->dev, "enter break state requested\n");
r = ch341_set_baudrate_lcr(port->serial->dev, priv,
CH341_MIN_BPS,
CH341_LCR_ENABLE_RX | CH341_LCR_ENABLE_TX | CH341_LCR_CS8);
if (r < 0) {
dev_err(&port->dev,
"failed to change baud rate to %u: %d\n",
CH341_MIN_BPS, r);
goto restore;
}
r = tty_put_char(tty, '\0');
if (r < 0) {
dev_err(&port->dev,
"failed to write NUL byte for simulated break condition: %d\n",
r);
goto restore;
}
/*
* Compute expected transmission duration including safety
* margin. The original baud rate is only restored after the
* computed point in time.
*
* 11 bits = 1 start, 8 data, 1 stop, 1 margin
*/
priv->break_end = jiffies + (11 * HZ / CH341_MIN_BPS);
return 0;
}
dev_dbg(&port->dev, "leave break state requested\n");
now = jiffies;
if (time_before(now, priv->break_end)) {
/* Wait until NUL byte is written */
delay = priv->break_end - now;
dev_dbg(&port->dev,
"wait %d ms while transmitting NUL byte at %u baud\n",
jiffies_to_msecs(delay), CH341_MIN_BPS);
schedule_timeout_interruptible(delay);
}
r = 0;
restore:
/* Restore original baud rate */
r2 = ch341_set_baudrate_lcr(port->serial->dev, priv, priv->baud_rate,
priv->lcr);
if (r2 < 0) {
dev_err(&port->dev,
"restoring original baud rate of %u failed: %d\n",
priv->baud_rate, r2);
return r2;
}
return r;
}
static int ch341_break_ctl(struct tty_struct *tty, int break_state)
{
const uint16_t ch341_break_reg =
((uint16_t) CH341_REG_LCR << 8) | CH341_REG_BREAK;
struct usb_serial_port *port = tty->driver_data;
struct ch341_private *priv = usb_get_serial_port_data(port);
int r;
uint16_t reg_contents;
uint8_t break_reg[2];
if (priv->quirks & CH341_QUIRK_SIMULATE_BREAK)
return ch341_simulate_break(tty, break_state);
r = ch341_control_in(port->serial->dev, CH341_REQ_READ_REG,
ch341_break_reg, 0, break_reg, 2);
if (r) {
dev_err(&port->dev, "%s - USB control read error (%d)\n",
__func__, r);
if (r > 0)
r = -EIO;
return r;
}
dev_dbg(&port->dev, "%s - initial ch341 break register contents - reg1: %x, reg2: %x\n",
__func__, break_reg[0], break_reg[1]);
if (break_state != 0) {
dev_dbg(&port->dev, "%s - Enter break state requested\n", __func__);
break_reg[0] &= ~CH341_NBREAK_BITS;
break_reg[1] &= ~CH341_LCR_ENABLE_TX;
} else {
dev_dbg(&port->dev, "%s - Leave break state requested\n", __func__);
break_reg[0] |= CH341_NBREAK_BITS;
break_reg[1] |= CH341_LCR_ENABLE_TX;
}
dev_dbg(&port->dev, "%s - New ch341 break register contents - reg1: %x, reg2: %x\n",
__func__, break_reg[0], break_reg[1]);
reg_contents = get_unaligned_le16(break_reg);
r = ch341_control_out(port->serial->dev, CH341_REQ_WRITE_REG,
ch341_break_reg, reg_contents);
if (r < 0) {
dev_err(&port->dev, "%s - USB control write error (%d)\n",
__func__, r);
return r;
}
return 0;
}
static int ch341_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct ch341_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
u8 control;
spin_lock_irqsave(&priv->lock, flags);
if (set & TIOCM_RTS)
priv->mcr |= CH341_BIT_RTS;
if (set & TIOCM_DTR)
priv->mcr |= CH341_BIT_DTR;
if (clear & TIOCM_RTS)
priv->mcr &= ~CH341_BIT_RTS;
if (clear & TIOCM_DTR)
priv->mcr &= ~CH341_BIT_DTR;
control = priv->mcr;
spin_unlock_irqrestore(&priv->lock, flags);
return ch341_set_handshake(port->serial->dev, control);
}
static void ch341_update_status(struct usb_serial_port *port,
unsigned char *data, size_t len)
{
struct ch341_private *priv = usb_get_serial_port_data(port);
struct tty_struct *tty;
unsigned long flags;
u8 status;
u8 delta;
if (len < 4)
return;
status = ~data[2] & CH341_BITS_MODEM_STAT;
spin_lock_irqsave(&priv->lock, flags);
delta = status ^ priv->msr;
priv->msr = status;
spin_unlock_irqrestore(&priv->lock, flags);
if (data[1] & CH341_MULT_STAT)
dev_dbg(&port->dev, "%s - multiple status change\n", __func__);
if (!delta)
return;
if (delta & CH341_BIT_CTS)
port->icount.cts++;
if (delta & CH341_BIT_DSR)
port->icount.dsr++;
if (delta & CH341_BIT_RI)
port->icount.rng++;
if (delta & CH341_BIT_DCD) {
port->icount.dcd++;
tty = tty_port_tty_get(&port->port);
if (tty) {
usb_serial_handle_dcd_change(port, tty,
status & CH341_BIT_DCD);
tty_kref_put(tty);
}
}
wake_up_interruptible(&port->port.delta_msr_wait);
}
static void ch341_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
unsigned int len = urb->actual_length;
int status;
switch (urb->status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&urb->dev->dev, "%s - urb shutting down: %d\n",
__func__, urb->status);
return;
default:
dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n",
__func__, urb->status);
goto exit;
}
usb_serial_debug_data(&port->dev, __func__, len, data);
ch341_update_status(port, data, len);
exit:
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
dev_err(&urb->dev->dev, "%s - usb_submit_urb failed: %d\n",
__func__, status);
}
}
static int ch341_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct ch341_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
u8 mcr;
u8 status;
unsigned int result;
spin_lock_irqsave(&priv->lock, flags);
mcr = priv->mcr;
status = priv->msr;
spin_unlock_irqrestore(&priv->lock, flags);
result = ((mcr & CH341_BIT_DTR) ? TIOCM_DTR : 0)
| ((mcr & CH341_BIT_RTS) ? TIOCM_RTS : 0)
| ((status & CH341_BIT_CTS) ? TIOCM_CTS : 0)
| ((status & CH341_BIT_DSR) ? TIOCM_DSR : 0)
| ((status & CH341_BIT_RI) ? TIOCM_RI : 0)
| ((status & CH341_BIT_DCD) ? TIOCM_CD : 0);
dev_dbg(&port->dev, "%s - result = %x\n", __func__, result);
return result;
}
static int ch341_reset_resume(struct usb_serial *serial)
{
struct usb_serial_port *port = serial->port[0];
struct ch341_private *priv;
int ret;
priv = usb_get_serial_port_data(port);
if (!priv)
return 0;
/* reconfigure ch341 serial port after bus-reset */
ch341_configure(serial->dev, priv);
if (tty_port_initialized(&port->port)) {
ret = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
if (ret) {
dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
ret);
return ret;
}
ret = ch341_get_status(port->serial->dev, priv);
if (ret < 0) {
dev_err(&port->dev, "failed to read modem status: %d\n",
ret);
}
}
return usb_serial_generic_resume(serial);
}
static struct usb_serial_driver ch341_device = {
.driver = {
.owner = THIS_MODULE,
.name = "ch341-uart",
},
.id_table = id_table,
.num_ports = 1,
.open = ch341_open,
.dtr_rts = ch341_dtr_rts,
.carrier_raised = ch341_carrier_raised,
.close = ch341_close,
.set_termios = ch341_set_termios,
.break_ctl = ch341_break_ctl,
.tiocmget = ch341_tiocmget,
.tiocmset = ch341_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.read_int_callback = ch341_read_int_callback,
.port_probe = ch341_port_probe,
.port_remove = ch341_port_remove,
.reset_resume = ch341_reset_resume,
};
static struct usb_serial_driver * const serial_drivers[] = {
&ch341_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/ch341.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* KOBIL USB Smart Card Terminal Driver
*
* Copyright (C) 2002 KOBIL Systems GmbH
* Author: Thomas Wahrenbruch
*
* Contact: [email protected]
*
* This program is largely derived from work by the linux-usb group
* and associated source files. Please see the usb/serial files for
* individual credits and copyrights.
*
* Thanks to Greg Kroah-Hartman ([email protected]) for his help and
* patience.
*
* Supported readers: USB TWIN, KAAN Standard Plus and SecOVID Reader Plus
* (Adapter K), B1 Professional and KAAN Professional (Adapter B)
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/ioctl.h>
#include "kobil_sct.h"
#define DRIVER_AUTHOR "KOBIL Systems GmbH - http://www.kobil.com"
#define DRIVER_DESC "KOBIL USB Smart Card Terminal Driver (experimental)"
#define KOBIL_VENDOR_ID 0x0D46
#define KOBIL_ADAPTER_B_PRODUCT_ID 0x2011
#define KOBIL_ADAPTER_K_PRODUCT_ID 0x2012
#define KOBIL_USBTWIN_PRODUCT_ID 0x0078
#define KOBIL_KAAN_SIM_PRODUCT_ID 0x0081
#define KOBIL_TIMEOUT 500
#define KOBIL_BUF_LENGTH 300
/* Function prototypes */
static int kobil_port_probe(struct usb_serial_port *probe);
static void kobil_port_remove(struct usb_serial_port *probe);
static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port);
static void kobil_close(struct usb_serial_port *port);
static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static unsigned int kobil_write_room(struct tty_struct *tty);
static int kobil_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg);
static int kobil_tiocmget(struct tty_struct *tty);
static int kobil_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
static void kobil_read_int_callback(struct urb *urb);
static void kobil_write_int_callback(struct urb *urb);
static void kobil_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old);
static void kobil_init_termios(struct tty_struct *tty);
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_ADAPTER_B_PRODUCT_ID) },
{ USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_ADAPTER_K_PRODUCT_ID) },
{ USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_USBTWIN_PRODUCT_ID) },
{ USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_KAAN_SIM_PRODUCT_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver kobil_device = {
.driver = {
.owner = THIS_MODULE,
.name = "kobil",
},
.description = "KOBIL USB smart card terminal",
.id_table = id_table,
.num_ports = 1,
.num_interrupt_out = 1,
.port_probe = kobil_port_probe,
.port_remove = kobil_port_remove,
.ioctl = kobil_ioctl,
.set_termios = kobil_set_termios,
.init_termios = kobil_init_termios,
.tiocmget = kobil_tiocmget,
.tiocmset = kobil_tiocmset,
.open = kobil_open,
.close = kobil_close,
.write = kobil_write,
.write_room = kobil_write_room,
.read_int_callback = kobil_read_int_callback,
.write_int_callback = kobil_write_int_callback,
};
static struct usb_serial_driver * const serial_drivers[] = {
&kobil_device, NULL
};
struct kobil_private {
unsigned char buf[KOBIL_BUF_LENGTH]; /* buffer for the APDU to send */
int filled; /* index of the last char in buf */
int cur_pos; /* index of the next char to send in buf */
__u16 device_type;
};
static int kobil_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct kobil_private *priv;
priv = kmalloc(sizeof(struct kobil_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->filled = 0;
priv->cur_pos = 0;
priv->device_type = le16_to_cpu(serial->dev->descriptor.idProduct);
switch (priv->device_type) {
case KOBIL_ADAPTER_B_PRODUCT_ID:
dev_dbg(&serial->dev->dev, "KOBIL B1 PRO / KAAN PRO detected\n");
break;
case KOBIL_ADAPTER_K_PRODUCT_ID:
dev_dbg(&serial->dev->dev, "KOBIL KAAN Standard Plus / SecOVID Reader Plus detected\n");
break;
case KOBIL_USBTWIN_PRODUCT_ID:
dev_dbg(&serial->dev->dev, "KOBIL USBTWIN detected\n");
break;
case KOBIL_KAAN_SIM_PRODUCT_ID:
dev_dbg(&serial->dev->dev, "KOBIL KAAN SIM detected\n");
break;
}
usb_set_serial_port_data(port, priv);
return 0;
}
static void kobil_port_remove(struct usb_serial_port *port)
{
struct kobil_private *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
}
static void kobil_init_termios(struct tty_struct *tty)
{
/* Default to echo off and other sane device settings */
tty->termios.c_lflag = 0;
tty->termios.c_iflag &= ~(ISIG | ICANON | ECHO | IEXTEN | XCASE);
tty->termios.c_iflag |= IGNBRK | IGNPAR | IXOFF;
/* do NOT translate CR to CR-NL (0x0A -> 0x0A 0x0D) */
tty->termios.c_oflag &= ~ONLCR;
}
static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct device *dev = &port->dev;
int result = 0;
struct kobil_private *priv;
unsigned char *transfer_buffer;
int transfer_buffer_length = 8;
priv = usb_get_serial_port_data(port);
/* allocate memory for transfer buffer */
transfer_buffer = kzalloc(transfer_buffer_length, GFP_KERNEL);
if (!transfer_buffer)
return -ENOMEM;
/* get hardware version */
result = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
SUSBCRequest_GetMisc,
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_IN,
SUSBCR_MSC_GetHWVersion,
0,
transfer_buffer,
transfer_buffer_length,
KOBIL_TIMEOUT
);
dev_dbg(dev, "%s - Send get_HW_version URB returns: %i\n", __func__, result);
if (result >= 3) {
dev_dbg(dev, "Hardware version: %i.%i.%i\n", transfer_buffer[0],
transfer_buffer[1], transfer_buffer[2]);
}
/* get firmware version */
result = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
SUSBCRequest_GetMisc,
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_IN,
SUSBCR_MSC_GetFWVersion,
0,
transfer_buffer,
transfer_buffer_length,
KOBIL_TIMEOUT
);
dev_dbg(dev, "%s - Send get_FW_version URB returns: %i\n", __func__, result);
if (result >= 3) {
dev_dbg(dev, "Firmware version: %i.%i.%i\n", transfer_buffer[0],
transfer_buffer[1], transfer_buffer[2]);
}
if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID ||
priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) {
/* Setting Baudrate, Parity and Stopbits */
result = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
SUSBCRequest_SetBaudRateParityAndStopBits,
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT,
SUSBCR_SBR_9600 | SUSBCR_SPASB_EvenParity |
SUSBCR_SPASB_1StopBit,
0,
NULL,
0,
KOBIL_TIMEOUT
);
dev_dbg(dev, "%s - Send set_baudrate URB returns: %i\n", __func__, result);
/* reset all queues */
result = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
SUSBCRequest_Misc,
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT,
SUSBCR_MSC_ResetAllQueues,
0,
NULL,
0,
KOBIL_TIMEOUT
);
dev_dbg(dev, "%s - Send reset_all_queues URB returns: %i\n", __func__, result);
}
if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID ||
priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID ||
priv->device_type == KOBIL_KAAN_SIM_PRODUCT_ID) {
/* start reading (Adapter B 'cause PNP string) */
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
dev_dbg(dev, "%s - Send read URB returns: %i\n", __func__, result);
}
kfree(transfer_buffer);
return 0;
}
static void kobil_close(struct usb_serial_port *port)
{
/* FIXME: Add rts/dtr methods */
usb_kill_urb(port->interrupt_out_urb);
usb_kill_urb(port->interrupt_in_urb);
}
static void kobil_read_int_callback(struct urb *urb)
{
int result;
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
if (status) {
dev_dbg(&port->dev, "%s - Read int status not zero: %d\n", __func__, status);
return;
}
if (urb->actual_length) {
usb_serial_debug_data(&port->dev, __func__, urb->actual_length,
data);
tty_insert_flip_string(&port->port, data, urb->actual_length);
tty_flip_buffer_push(&port->port);
}
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
dev_dbg(&port->dev, "%s - Send read URB returns: %i\n", __func__, result);
}
static void kobil_write_int_callback(struct urb *urb)
{
}
static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
int length = 0;
int result = 0;
int todo = 0;
struct kobil_private *priv;
if (count == 0) {
dev_dbg(&port->dev, "%s - write request of 0 bytes\n", __func__);
return 0;
}
priv = usb_get_serial_port_data(port);
if (count > (KOBIL_BUF_LENGTH - priv->filled)) {
dev_dbg(&port->dev, "%s - Error: write request bigger than buffer size\n", __func__);
return -ENOMEM;
}
/* Copy data to buffer */
memcpy(priv->buf + priv->filled, buf, count);
usb_serial_debug_data(&port->dev, __func__, count, priv->buf + priv->filled);
priv->filled = priv->filled + count;
/* only send complete block. TWIN, KAAN SIM and adapter K
use the same protocol. */
if (((priv->device_type != KOBIL_ADAPTER_B_PRODUCT_ID) && (priv->filled > 2) && (priv->filled >= (priv->buf[1] + 3))) ||
((priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID) && (priv->filled > 3) && (priv->filled >= (priv->buf[2] + 4)))) {
/* stop reading (except TWIN and KAAN SIM) */
if ((priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID)
|| (priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID))
usb_kill_urb(port->interrupt_in_urb);
todo = priv->filled - priv->cur_pos;
while (todo > 0) {
/* max 8 byte in one urb (endpoint size) */
length = min(todo, port->interrupt_out_size);
/* copy data to transfer buffer */
memcpy(port->interrupt_out_buffer,
priv->buf + priv->cur_pos, length);
port->interrupt_out_urb->transfer_buffer_length = length;
priv->cur_pos = priv->cur_pos + length;
result = usb_submit_urb(port->interrupt_out_urb,
GFP_ATOMIC);
dev_dbg(&port->dev, "%s - Send write URB returns: %i\n", __func__, result);
todo = priv->filled - priv->cur_pos;
if (todo > 0)
msleep(24);
}
priv->filled = 0;
priv->cur_pos = 0;
/* start reading (except TWIN and KAAN SIM) */
if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID ||
priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) {
result = usb_submit_urb(port->interrupt_in_urb,
GFP_ATOMIC);
dev_dbg(&port->dev, "%s - Send read URB returns: %i\n", __func__, result);
}
}
return count;
}
static unsigned int kobil_write_room(struct tty_struct *tty)
{
/* FIXME */
return 8;
}
static int kobil_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct kobil_private *priv;
int result;
unsigned char *transfer_buffer;
int transfer_buffer_length = 8;
priv = usb_get_serial_port_data(port);
if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID
|| priv->device_type == KOBIL_KAAN_SIM_PRODUCT_ID) {
/* This device doesn't support ioctl calls */
return -EINVAL;
}
/* allocate memory for transfer buffer */
transfer_buffer = kzalloc(transfer_buffer_length, GFP_KERNEL);
if (!transfer_buffer)
return -ENOMEM;
result = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
SUSBCRequest_GetStatusLineState,
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_IN,
0,
0,
transfer_buffer,
transfer_buffer_length,
KOBIL_TIMEOUT);
dev_dbg(&port->dev, "Send get_status_line_state URB returns: %i\n",
result);
if (result < 1) {
if (result >= 0)
result = -EIO;
goto out_free;
}
dev_dbg(&port->dev, "Statusline: %02x\n", transfer_buffer[0]);
result = 0;
if ((transfer_buffer[0] & SUSBCR_GSL_DSR) != 0)
result = TIOCM_DSR;
out_free:
kfree(transfer_buffer);
return result;
}
static int kobil_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct device *dev = &port->dev;
struct kobil_private *priv;
int result;
int dtr = 0;
int rts = 0;
/* FIXME: locking ? */
priv = usb_get_serial_port_data(port);
if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID
|| priv->device_type == KOBIL_KAAN_SIM_PRODUCT_ID) {
/* This device doesn't support ioctl calls */
return -EINVAL;
}
if (set & TIOCM_RTS)
rts = 1;
if (set & TIOCM_DTR)
dtr = 1;
if (clear & TIOCM_RTS)
rts = 0;
if (clear & TIOCM_DTR)
dtr = 0;
if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID) {
if (dtr != 0)
dev_dbg(dev, "%s - Setting DTR\n", __func__);
else
dev_dbg(dev, "%s - Clearing DTR\n", __func__);
result = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
SUSBCRequest_SetStatusLinesOrQueues,
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT,
((dtr != 0) ? SUSBCR_SSL_SETDTR : SUSBCR_SSL_CLRDTR),
0,
NULL,
0,
KOBIL_TIMEOUT);
} else {
if (rts != 0)
dev_dbg(dev, "%s - Setting RTS\n", __func__);
else
dev_dbg(dev, "%s - Clearing RTS\n", __func__);
result = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
SUSBCRequest_SetStatusLinesOrQueues,
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT,
((rts != 0) ? SUSBCR_SSL_SETRTS : SUSBCR_SSL_CLRRTS),
0,
NULL,
0,
KOBIL_TIMEOUT);
}
dev_dbg(dev, "%s - Send set_status_line URB returns: %i\n", __func__, result);
return (result < 0) ? result : 0;
}
static void kobil_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old)
{
struct kobil_private *priv;
int result;
unsigned short urb_val = 0;
int c_cflag = tty->termios.c_cflag;
speed_t speed;
priv = usb_get_serial_port_data(port);
if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID ||
priv->device_type == KOBIL_KAAN_SIM_PRODUCT_ID) {
/* This device doesn't support ioctl calls */
tty_termios_copy_hw(&tty->termios, old);
return;
}
speed = tty_get_baud_rate(tty);
switch (speed) {
case 1200:
urb_val = SUSBCR_SBR_1200;
break;
default:
speed = 9600;
fallthrough;
case 9600:
urb_val = SUSBCR_SBR_9600;
break;
}
urb_val |= (c_cflag & CSTOPB) ? SUSBCR_SPASB_2StopBits :
SUSBCR_SPASB_1StopBit;
if (c_cflag & PARENB) {
if (c_cflag & PARODD)
urb_val |= SUSBCR_SPASB_OddParity;
else
urb_val |= SUSBCR_SPASB_EvenParity;
} else
urb_val |= SUSBCR_SPASB_NoParity;
tty->termios.c_cflag &= ~CMSPAR;
tty_encode_baud_rate(tty, speed, speed);
result = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
SUSBCRequest_SetBaudRateParityAndStopBits,
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT,
urb_val,
0,
NULL,
0,
KOBIL_TIMEOUT
);
if (result) {
dev_err(&port->dev, "failed to update line settings: %d\n",
result);
}
}
static int kobil_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
struct kobil_private *priv = usb_get_serial_port_data(port);
int result;
if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID ||
priv->device_type == KOBIL_KAAN_SIM_PRODUCT_ID)
/* This device doesn't support ioctl calls */
return -ENOIOCTLCMD;
switch (cmd) {
case TCFLSH:
result = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
SUSBCRequest_Misc,
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT,
SUSBCR_MSC_ResetAllQueues,
0,
NULL,
0,
KOBIL_TIMEOUT
);
dev_dbg(&port->dev,
"%s - Send reset_all_queues (FLUSH) URB returns: %i\n",
__func__, result);
return (result < 0) ? -EIO: 0;
default:
return -ENOIOCTLCMD;
}
}
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/serial/kobil_sct.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* MaxLinear/Exar USB to Serial driver
*
* Copyright (c) 2020 Manivannan Sadhasivam <[email protected]>
* Copyright (c) 2021 Johan Hovold <[email protected]>
*
* Based on the initial driver written by Patong Yang:
*
* https://lore.kernel.org/r/20180404070634.nhspvmxcjwfgjkcv@advantechmxl-desktop
*
* Copyright (c) 2018 Patong Yang <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/usb.h>
#include <linux/usb/cdc.h>
#include <linux/usb/serial.h>
struct xr_txrx_clk_mask {
u16 tx;
u16 rx0;
u16 rx1;
};
#define XR_INT_OSC_HZ 48000000U
#define XR21V141X_MIN_SPEED 46U
#define XR21V141X_MAX_SPEED XR_INT_OSC_HZ
/* XR21V141X register blocks */
#define XR21V141X_UART_REG_BLOCK 0
#define XR21V141X_UM_REG_BLOCK 4
#define XR21V141X_UART_CUSTOM_BLOCK 0x66
/* XR21V141X UART registers */
#define XR21V141X_CLOCK_DIVISOR_0 0x04
#define XR21V141X_CLOCK_DIVISOR_1 0x05
#define XR21V141X_CLOCK_DIVISOR_2 0x06
#define XR21V141X_TX_CLOCK_MASK_0 0x07
#define XR21V141X_TX_CLOCK_MASK_1 0x08
#define XR21V141X_RX_CLOCK_MASK_0 0x09
#define XR21V141X_RX_CLOCK_MASK_1 0x0a
#define XR21V141X_REG_FORMAT 0x0b
/* XR21V141X UART Manager registers */
#define XR21V141X_UM_FIFO_ENABLE_REG 0x10
#define XR21V141X_UM_ENABLE_TX_FIFO 0x01
#define XR21V141X_UM_ENABLE_RX_FIFO 0x02
#define XR21V141X_UM_RX_FIFO_RESET 0x18
#define XR21V141X_UM_TX_FIFO_RESET 0x1c
#define XR_UART_ENABLE_TX 0x1
#define XR_UART_ENABLE_RX 0x2
#define XR_GPIO_RI BIT(0)
#define XR_GPIO_CD BIT(1)
#define XR_GPIO_DSR BIT(2)
#define XR_GPIO_DTR BIT(3)
#define XR_GPIO_CTS BIT(4)
#define XR_GPIO_RTS BIT(5)
#define XR_GPIO_CLK BIT(6)
#define XR_GPIO_XEN BIT(7)
#define XR_GPIO_TXT BIT(8)
#define XR_GPIO_RXT BIT(9)
#define XR_UART_DATA_MASK GENMASK(3, 0)
#define XR_UART_DATA_7 0x7
#define XR_UART_DATA_8 0x8
#define XR_UART_PARITY_MASK GENMASK(6, 4)
#define XR_UART_PARITY_SHIFT 4
#define XR_UART_PARITY_NONE (0x0 << XR_UART_PARITY_SHIFT)
#define XR_UART_PARITY_ODD (0x1 << XR_UART_PARITY_SHIFT)
#define XR_UART_PARITY_EVEN (0x2 << XR_UART_PARITY_SHIFT)
#define XR_UART_PARITY_MARK (0x3 << XR_UART_PARITY_SHIFT)
#define XR_UART_PARITY_SPACE (0x4 << XR_UART_PARITY_SHIFT)
#define XR_UART_STOP_MASK BIT(7)
#define XR_UART_STOP_SHIFT 7
#define XR_UART_STOP_1 (0x0 << XR_UART_STOP_SHIFT)
#define XR_UART_STOP_2 (0x1 << XR_UART_STOP_SHIFT)
#define XR_UART_FLOW_MODE_NONE 0x0
#define XR_UART_FLOW_MODE_HW 0x1
#define XR_UART_FLOW_MODE_SW 0x2
#define XR_GPIO_MODE_SEL_MASK GENMASK(2, 0)
#define XR_GPIO_MODE_SEL_RTS_CTS 0x1
#define XR_GPIO_MODE_SEL_DTR_DSR 0x2
#define XR_GPIO_MODE_SEL_RS485 0x3
#define XR_GPIO_MODE_SEL_RS485_ADDR 0x4
#define XR_GPIO_MODE_RS485_TX_H 0x8
#define XR_GPIO_MODE_TX_TOGGLE 0x100
#define XR_GPIO_MODE_RX_TOGGLE 0x200
#define XR_FIFO_RESET 0x1
#define XR_CUSTOM_DRIVER_ACTIVE 0x1
static int xr21v141x_uart_enable(struct usb_serial_port *port);
static int xr21v141x_uart_disable(struct usb_serial_port *port);
static int xr21v141x_fifo_reset(struct usb_serial_port *port);
static void xr21v141x_set_line_settings(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios);
struct xr_type {
int reg_width;
u8 reg_recipient;
u8 set_reg;
u8 get_reg;
u16 uart_enable;
u16 flow_control;
u16 xon_char;
u16 xoff_char;
u16 tx_break;
u16 gpio_mode;
u16 gpio_direction;
u16 gpio_set;
u16 gpio_clear;
u16 gpio_status;
u16 tx_fifo_reset;
u16 rx_fifo_reset;
u16 custom_driver;
bool have_5_6_bit_mode;
bool have_xmit_toggle;
int (*enable)(struct usb_serial_port *port);
int (*disable)(struct usb_serial_port *port);
int (*fifo_reset)(struct usb_serial_port *port);
void (*set_line_settings)(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios);
};
enum xr_type_id {
XR21V141X,
XR21B142X,
XR21B1411,
XR2280X,
XR_TYPE_COUNT,
};
static const struct xr_type xr_types[] = {
[XR21V141X] = {
.reg_width = 8,
.reg_recipient = USB_RECIP_DEVICE,
.set_reg = 0x00,
.get_reg = 0x01,
.uart_enable = 0x03,
.flow_control = 0x0c,
.xon_char = 0x10,
.xoff_char = 0x11,
.tx_break = 0x14,
.gpio_mode = 0x1a,
.gpio_direction = 0x1b,
.gpio_set = 0x1d,
.gpio_clear = 0x1e,
.gpio_status = 0x1f,
.enable = xr21v141x_uart_enable,
.disable = xr21v141x_uart_disable,
.fifo_reset = xr21v141x_fifo_reset,
.set_line_settings = xr21v141x_set_line_settings,
},
[XR21B142X] = {
.reg_width = 16,
.reg_recipient = USB_RECIP_INTERFACE,
.set_reg = 0x00,
.get_reg = 0x00,
.uart_enable = 0x00,
.flow_control = 0x06,
.xon_char = 0x07,
.xoff_char = 0x08,
.tx_break = 0x0a,
.gpio_mode = 0x0c,
.gpio_direction = 0x0d,
.gpio_set = 0x0e,
.gpio_clear = 0x0f,
.gpio_status = 0x10,
.tx_fifo_reset = 0x40,
.rx_fifo_reset = 0x43,
.custom_driver = 0x60,
.have_5_6_bit_mode = true,
.have_xmit_toggle = true,
},
[XR21B1411] = {
.reg_width = 12,
.reg_recipient = USB_RECIP_DEVICE,
.set_reg = 0x00,
.get_reg = 0x01,
.uart_enable = 0xc00,
.flow_control = 0xc06,
.xon_char = 0xc07,
.xoff_char = 0xc08,
.tx_break = 0xc0a,
.gpio_mode = 0xc0c,
.gpio_direction = 0xc0d,
.gpio_set = 0xc0e,
.gpio_clear = 0xc0f,
.gpio_status = 0xc10,
.tx_fifo_reset = 0xc80,
.rx_fifo_reset = 0xcc0,
.custom_driver = 0x20d,
},
[XR2280X] = {
.reg_width = 16,
.reg_recipient = USB_RECIP_DEVICE,
.set_reg = 0x05,
.get_reg = 0x05,
.uart_enable = 0x40,
.flow_control = 0x46,
.xon_char = 0x47,
.xoff_char = 0x48,
.tx_break = 0x4a,
.gpio_mode = 0x4c,
.gpio_direction = 0x4d,
.gpio_set = 0x4e,
.gpio_clear = 0x4f,
.gpio_status = 0x50,
.tx_fifo_reset = 0x60,
.rx_fifo_reset = 0x63,
.custom_driver = 0x81,
},
};
struct xr_data {
const struct xr_type *type;
u8 channel; /* zero-based index or interface number */
struct serial_rs485 rs485;
};
static int xr_set_reg(struct usb_serial_port *port, u8 channel, u16 reg, u16 val)
{
struct xr_data *data = usb_get_serial_port_data(port);
const struct xr_type *type = data->type;
struct usb_serial *serial = port->serial;
int ret;
ret = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
type->set_reg,
USB_DIR_OUT | USB_TYPE_VENDOR | type->reg_recipient,
val, (channel << 8) | reg, NULL, 0,
USB_CTRL_SET_TIMEOUT);
if (ret < 0) {
dev_err(&port->dev, "Failed to set reg 0x%02x: %d\n", reg, ret);
return ret;
}
return 0;
}
static int xr_get_reg(struct usb_serial_port *port, u8 channel, u16 reg, u16 *val)
{
struct xr_data *data = usb_get_serial_port_data(port);
const struct xr_type *type = data->type;
struct usb_serial *serial = port->serial;
u8 *dmabuf;
int ret, len;
if (type->reg_width == 8)
len = 1;
else
len = 2;
dmabuf = kmalloc(len, GFP_KERNEL);
if (!dmabuf)
return -ENOMEM;
ret = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
type->get_reg,
USB_DIR_IN | USB_TYPE_VENDOR | type->reg_recipient,
0, (channel << 8) | reg, dmabuf, len,
USB_CTRL_GET_TIMEOUT);
if (ret == len) {
if (len == 2)
*val = le16_to_cpup((__le16 *)dmabuf);
else
*val = *dmabuf;
ret = 0;
} else {
dev_err(&port->dev, "Failed to get reg 0x%02x: %d\n", reg, ret);
if (ret >= 0)
ret = -EIO;
}
kfree(dmabuf);
return ret;
}
static int xr_set_reg_uart(struct usb_serial_port *port, u16 reg, u16 val)
{
struct xr_data *data = usb_get_serial_port_data(port);
return xr_set_reg(port, data->channel, reg, val);
}
static int xr_get_reg_uart(struct usb_serial_port *port, u16 reg, u16 *val)
{
struct xr_data *data = usb_get_serial_port_data(port);
return xr_get_reg(port, data->channel, reg, val);
}
static int xr_set_reg_um(struct usb_serial_port *port, u8 reg_base, u8 val)
{
struct xr_data *data = usb_get_serial_port_data(port);
u8 reg;
reg = reg_base + data->channel;
return xr_set_reg(port, XR21V141X_UM_REG_BLOCK, reg, val);
}
static int __xr_uart_enable(struct usb_serial_port *port)
{
struct xr_data *data = usb_get_serial_port_data(port);
return xr_set_reg_uart(port, data->type->uart_enable,
XR_UART_ENABLE_TX | XR_UART_ENABLE_RX);
}
static int __xr_uart_disable(struct usb_serial_port *port)
{
struct xr_data *data = usb_get_serial_port_data(port);
return xr_set_reg_uart(port, data->type->uart_enable, 0);
}
/*
* According to datasheet, below is the recommended sequence for enabling UART
* module in XR21V141X:
*
* Enable Tx FIFO
* Enable Tx and Rx
* Enable Rx FIFO
*/
static int xr21v141x_uart_enable(struct usb_serial_port *port)
{
int ret;
ret = xr_set_reg_um(port, XR21V141X_UM_FIFO_ENABLE_REG,
XR21V141X_UM_ENABLE_TX_FIFO);
if (ret)
return ret;
ret = __xr_uart_enable(port);
if (ret)
return ret;
ret = xr_set_reg_um(port, XR21V141X_UM_FIFO_ENABLE_REG,
XR21V141X_UM_ENABLE_TX_FIFO | XR21V141X_UM_ENABLE_RX_FIFO);
if (ret)
__xr_uart_disable(port);
return ret;
}
static int xr21v141x_uart_disable(struct usb_serial_port *port)
{
int ret;
ret = __xr_uart_disable(port);
if (ret)
return ret;
ret = xr_set_reg_um(port, XR21V141X_UM_FIFO_ENABLE_REG, 0);
return ret;
}
static int xr_uart_enable(struct usb_serial_port *port)
{
struct xr_data *data = usb_get_serial_port_data(port);
if (data->type->enable)
return data->type->enable(port);
return __xr_uart_enable(port);
}
static int xr_uart_disable(struct usb_serial_port *port)
{
struct xr_data *data = usb_get_serial_port_data(port);
if (data->type->disable)
return data->type->disable(port);
return __xr_uart_disable(port);
}
static int xr21v141x_fifo_reset(struct usb_serial_port *port)
{
int ret;
ret = xr_set_reg_um(port, XR21V141X_UM_TX_FIFO_RESET, XR_FIFO_RESET);
if (ret)
return ret;
ret = xr_set_reg_um(port, XR21V141X_UM_RX_FIFO_RESET, XR_FIFO_RESET);
if (ret)
return ret;
return 0;
}
static int xr_fifo_reset(struct usb_serial_port *port)
{
struct xr_data *data = usb_get_serial_port_data(port);
int ret;
if (data->type->fifo_reset)
return data->type->fifo_reset(port);
ret = xr_set_reg_uart(port, data->type->tx_fifo_reset, XR_FIFO_RESET);
if (ret)
return ret;
ret = xr_set_reg_uart(port, data->type->rx_fifo_reset, XR_FIFO_RESET);
if (ret)
return ret;
return 0;
}
static int xr_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct xr_data *data = usb_get_serial_port_data(port);
u16 status;
int ret;
ret = xr_get_reg_uart(port, data->type->gpio_status, &status);
if (ret)
return ret;
/*
* Modem control pins are active low, so reading '0' means it is active
* and '1' means not active.
*/
ret = ((status & XR_GPIO_DTR) ? 0 : TIOCM_DTR) |
((status & XR_GPIO_RTS) ? 0 : TIOCM_RTS) |
((status & XR_GPIO_CTS) ? 0 : TIOCM_CTS) |
((status & XR_GPIO_DSR) ? 0 : TIOCM_DSR) |
((status & XR_GPIO_RI) ? 0 : TIOCM_RI) |
((status & XR_GPIO_CD) ? 0 : TIOCM_CD);
return ret;
}
static int xr_tiocmset_port(struct usb_serial_port *port,
unsigned int set, unsigned int clear)
{
struct xr_data *data = usb_get_serial_port_data(port);
const struct xr_type *type = data->type;
u16 gpio_set = 0;
u16 gpio_clr = 0;
int ret = 0;
/* Modem control pins are active low, so set & clr are swapped */
if (set & TIOCM_RTS)
gpio_clr |= XR_GPIO_RTS;
if (set & TIOCM_DTR)
gpio_clr |= XR_GPIO_DTR;
if (clear & TIOCM_RTS)
gpio_set |= XR_GPIO_RTS;
if (clear & TIOCM_DTR)
gpio_set |= XR_GPIO_DTR;
/* Writing '0' to gpio_{set/clr} bits has no effect, so no need to do */
if (gpio_clr)
ret = xr_set_reg_uart(port, type->gpio_clear, gpio_clr);
if (gpio_set)
ret = xr_set_reg_uart(port, type->gpio_set, gpio_set);
return ret;
}
static int xr_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
return xr_tiocmset_port(port, set, clear);
}
static void xr_dtr_rts(struct usb_serial_port *port, int on)
{
if (on)
xr_tiocmset_port(port, TIOCM_DTR | TIOCM_RTS, 0);
else
xr_tiocmset_port(port, 0, TIOCM_DTR | TIOCM_RTS);
}
static int xr_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct xr_data *data = usb_get_serial_port_data(port);
const struct xr_type *type = data->type;
u16 state;
if (break_state == 0)
state = 0;
else
state = GENMASK(type->reg_width - 1, 0);
dev_dbg(&port->dev, "Turning break %s\n", state == 0 ? "off" : "on");
return xr_set_reg_uart(port, type->tx_break, state);
}
/* Tx and Rx clock mask values obtained from section 3.3.4 of datasheet */
static const struct xr_txrx_clk_mask xr21v141x_txrx_clk_masks[] = {
{ 0x000, 0x000, 0x000 },
{ 0x000, 0x000, 0x000 },
{ 0x100, 0x000, 0x100 },
{ 0x020, 0x400, 0x020 },
{ 0x010, 0x100, 0x010 },
{ 0x208, 0x040, 0x208 },
{ 0x104, 0x820, 0x108 },
{ 0x844, 0x210, 0x884 },
{ 0x444, 0x110, 0x444 },
{ 0x122, 0x888, 0x224 },
{ 0x912, 0x448, 0x924 },
{ 0x492, 0x248, 0x492 },
{ 0x252, 0x928, 0x292 },
{ 0x94a, 0x4a4, 0xa52 },
{ 0x52a, 0xaa4, 0x54a },
{ 0xaaa, 0x954, 0x4aa },
{ 0xaaa, 0x554, 0xaaa },
{ 0x555, 0xad4, 0x5aa },
{ 0xb55, 0xab4, 0x55a },
{ 0x6b5, 0x5ac, 0xb56 },
{ 0x5b5, 0xd6c, 0x6d6 },
{ 0xb6d, 0xb6a, 0xdb6 },
{ 0x76d, 0x6da, 0xbb6 },
{ 0xedd, 0xdda, 0x76e },
{ 0xddd, 0xbba, 0xeee },
{ 0x7bb, 0xf7a, 0xdde },
{ 0xf7b, 0xef6, 0x7de },
{ 0xdf7, 0xbf6, 0xf7e },
{ 0x7f7, 0xfee, 0xefe },
{ 0xfdf, 0xfbe, 0x7fe },
{ 0xf7f, 0xefe, 0xffe },
{ 0xfff, 0xffe, 0xffd },
};
static int xr21v141x_set_baudrate(struct tty_struct *tty, struct usb_serial_port *port)
{
u32 divisor, baud, idx;
u16 tx_mask, rx_mask;
int ret;
baud = tty->termios.c_ospeed;
if (!baud)
return 0;
baud = clamp(baud, XR21V141X_MIN_SPEED, XR21V141X_MAX_SPEED);
divisor = XR_INT_OSC_HZ / baud;
idx = ((32 * XR_INT_OSC_HZ) / baud) & 0x1f;
tx_mask = xr21v141x_txrx_clk_masks[idx].tx;
if (divisor & 0x01)
rx_mask = xr21v141x_txrx_clk_masks[idx].rx1;
else
rx_mask = xr21v141x_txrx_clk_masks[idx].rx0;
dev_dbg(&port->dev, "Setting baud rate: %u\n", baud);
/*
* XR21V141X uses fractional baud rate generator with 48MHz internal
* oscillator and 19-bit programmable divisor. So theoretically it can
* generate most commonly used baud rates with high accuracy.
*/
ret = xr_set_reg_uart(port, XR21V141X_CLOCK_DIVISOR_0,
divisor & 0xff);
if (ret)
return ret;
ret = xr_set_reg_uart(port, XR21V141X_CLOCK_DIVISOR_1,
(divisor >> 8) & 0xff);
if (ret)
return ret;
ret = xr_set_reg_uart(port, XR21V141X_CLOCK_DIVISOR_2,
(divisor >> 16) & 0xff);
if (ret)
return ret;
ret = xr_set_reg_uart(port, XR21V141X_TX_CLOCK_MASK_0,
tx_mask & 0xff);
if (ret)
return ret;
ret = xr_set_reg_uart(port, XR21V141X_TX_CLOCK_MASK_1,
(tx_mask >> 8) & 0xff);
if (ret)
return ret;
ret = xr_set_reg_uart(port, XR21V141X_RX_CLOCK_MASK_0,
rx_mask & 0xff);
if (ret)
return ret;
ret = xr_set_reg_uart(port, XR21V141X_RX_CLOCK_MASK_1,
(rx_mask >> 8) & 0xff);
if (ret)
return ret;
tty_encode_baud_rate(tty, baud, baud);
return 0;
}
static void xr_set_flow_mode(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct xr_data *data = usb_get_serial_port_data(port);
const struct xr_type *type = data->type;
u16 flow, gpio_mode;
bool rs485_enabled;
int ret;
ret = xr_get_reg_uart(port, type->gpio_mode, &gpio_mode);
if (ret)
return;
/*
* According to the datasheets, the UART needs to be disabled while
* writing to the FLOW_CONTROL register (XR21V141X), or any register
* but GPIO_SET, GPIO_CLEAR, TX_BREAK and ERROR_STATUS (XR21B142X).
*/
xr_uart_disable(port);
/* Set GPIO mode for controlling the pins manually by default. */
gpio_mode &= ~XR_GPIO_MODE_SEL_MASK;
rs485_enabled = !!(data->rs485.flags & SER_RS485_ENABLED);
if (rs485_enabled) {
dev_dbg(&port->dev, "Enabling RS-485\n");
gpio_mode |= XR_GPIO_MODE_SEL_RS485;
if (data->rs485.flags & SER_RS485_RTS_ON_SEND)
gpio_mode &= ~XR_GPIO_MODE_RS485_TX_H;
else
gpio_mode |= XR_GPIO_MODE_RS485_TX_H;
}
if (C_CRTSCTS(tty) && C_BAUD(tty) != B0 && !rs485_enabled) {
dev_dbg(&port->dev, "Enabling hardware flow ctrl\n");
gpio_mode |= XR_GPIO_MODE_SEL_RTS_CTS;
flow = XR_UART_FLOW_MODE_HW;
} else if (I_IXON(tty)) {
u8 start_char = START_CHAR(tty);
u8 stop_char = STOP_CHAR(tty);
dev_dbg(&port->dev, "Enabling sw flow ctrl\n");
flow = XR_UART_FLOW_MODE_SW;
xr_set_reg_uart(port, type->xon_char, start_char);
xr_set_reg_uart(port, type->xoff_char, stop_char);
} else {
dev_dbg(&port->dev, "Disabling flow ctrl\n");
flow = XR_UART_FLOW_MODE_NONE;
}
xr_set_reg_uart(port, type->flow_control, flow);
xr_set_reg_uart(port, type->gpio_mode, gpio_mode);
xr_uart_enable(port);
if (C_BAUD(tty) == B0)
xr_dtr_rts(port, 0);
else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
xr_dtr_rts(port, 1);
}
static void xr21v141x_set_line_settings(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct ktermios *termios = &tty->termios;
u8 bits = 0;
int ret;
if (!old_termios || (tty->termios.c_ospeed != old_termios->c_ospeed))
xr21v141x_set_baudrate(tty, port);
switch (C_CSIZE(tty)) {
case CS5:
case CS6:
/* CS5 and CS6 are not supported, so just restore old setting */
termios->c_cflag &= ~CSIZE;
if (old_termios)
termios->c_cflag |= old_termios->c_cflag & CSIZE;
else
termios->c_cflag |= CS8;
if (C_CSIZE(tty) == CS7)
bits |= XR_UART_DATA_7;
else
bits |= XR_UART_DATA_8;
break;
case CS7:
bits |= XR_UART_DATA_7;
break;
case CS8:
default:
bits |= XR_UART_DATA_8;
break;
}
if (C_PARENB(tty)) {
if (C_CMSPAR(tty)) {
if (C_PARODD(tty))
bits |= XR_UART_PARITY_MARK;
else
bits |= XR_UART_PARITY_SPACE;
} else {
if (C_PARODD(tty))
bits |= XR_UART_PARITY_ODD;
else
bits |= XR_UART_PARITY_EVEN;
}
}
if (C_CSTOPB(tty))
bits |= XR_UART_STOP_2;
else
bits |= XR_UART_STOP_1;
ret = xr_set_reg_uart(port, XR21V141X_REG_FORMAT, bits);
if (ret)
return;
}
static void xr_cdc_set_line_coding(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct xr_data *data = usb_get_serial_port_data(port);
struct usb_host_interface *alt = port->serial->interface->cur_altsetting;
struct usb_device *udev = port->serial->dev;
struct usb_cdc_line_coding *lc;
int ret;
lc = kzalloc(sizeof(*lc), GFP_KERNEL);
if (!lc)
return;
if (tty->termios.c_ospeed)
lc->dwDTERate = cpu_to_le32(tty->termios.c_ospeed);
else
lc->dwDTERate = cpu_to_le32(9600);
if (C_CSTOPB(tty))
lc->bCharFormat = USB_CDC_2_STOP_BITS;
else
lc->bCharFormat = USB_CDC_1_STOP_BITS;
if (C_PARENB(tty)) {
if (C_CMSPAR(tty)) {
if (C_PARODD(tty))
lc->bParityType = USB_CDC_MARK_PARITY;
else
lc->bParityType = USB_CDC_SPACE_PARITY;
} else {
if (C_PARODD(tty))
lc->bParityType = USB_CDC_ODD_PARITY;
else
lc->bParityType = USB_CDC_EVEN_PARITY;
}
} else {
lc->bParityType = USB_CDC_NO_PARITY;
}
if (!data->type->have_5_6_bit_mode &&
(C_CSIZE(tty) == CS5 || C_CSIZE(tty) == CS6)) {
tty->termios.c_cflag &= ~CSIZE;
if (old_termios)
tty->termios.c_cflag |= old_termios->c_cflag & CSIZE;
else
tty->termios.c_cflag |= CS8;
}
switch (C_CSIZE(tty)) {
case CS5:
lc->bDataBits = 5;
break;
case CS6:
lc->bDataBits = 6;
break;
case CS7:
lc->bDataBits = 7;
break;
case CS8:
default:
lc->bDataBits = 8;
break;
}
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_CDC_REQ_SET_LINE_CODING,
USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, alt->desc.bInterfaceNumber,
lc, sizeof(*lc), USB_CTRL_SET_TIMEOUT);
if (ret < 0)
dev_err(&port->dev, "Failed to set line coding: %d\n", ret);
kfree(lc);
}
static void xr_sanitize_serial_rs485(struct serial_rs485 *rs485)
{
if (!(rs485->flags & SER_RS485_ENABLED)) {
memset(rs485, 0, sizeof(*rs485));
return;
}
/* RTS always toggles after TX */
if (rs485->flags & SER_RS485_RTS_ON_SEND)
rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
else
rs485->flags |= SER_RS485_RTS_AFTER_SEND;
/* Only the flags are implemented at the moment */
rs485->flags &= SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
SER_RS485_RTS_AFTER_SEND;
rs485->delay_rts_before_send = 0;
rs485->delay_rts_after_send = 0;
memset(rs485->padding, 0, sizeof(rs485->padding));
}
static int xr_get_rs485_config(struct tty_struct *tty,
struct serial_rs485 __user *argp)
{
struct usb_serial_port *port = tty->driver_data;
struct xr_data *data = usb_get_serial_port_data(port);
down_read(&tty->termios_rwsem);
if (copy_to_user(argp, &data->rs485, sizeof(data->rs485))) {
up_read(&tty->termios_rwsem);
return -EFAULT;
}
up_read(&tty->termios_rwsem);
return 0;
}
static int xr_set_rs485_config(struct tty_struct *tty,
struct serial_rs485 __user *argp)
{
struct usb_serial_port *port = tty->driver_data;
struct xr_data *data = usb_get_serial_port_data(port);
struct serial_rs485 rs485;
if (copy_from_user(&rs485, argp, sizeof(rs485)))
return -EFAULT;
xr_sanitize_serial_rs485(&rs485);
down_write(&tty->termios_rwsem);
data->rs485 = rs485;
xr_set_flow_mode(tty, port, NULL);
up_write(&tty->termios_rwsem);
if (copy_to_user(argp, &rs485, sizeof(rs485)))
return -EFAULT;
return 0;
}
static int xr_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
switch (cmd) {
case TIOCGRS485:
return xr_get_rs485_config(tty, argp);
case TIOCSRS485:
return xr_set_rs485_config(tty, argp);
}
return -ENOIOCTLCMD;
}
static void xr_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct xr_data *data = usb_get_serial_port_data(port);
/*
* XR21V141X does not have a CUSTOM_DRIVER flag and always enters CDC
* mode upon receiving CDC requests.
*/
if (data->type->set_line_settings)
data->type->set_line_settings(tty, port, old_termios);
else
xr_cdc_set_line_coding(tty, port, old_termios);
xr_set_flow_mode(tty, port, old_termios);
}
static int xr_open(struct tty_struct *tty, struct usb_serial_port *port)
{
int ret;
ret = xr_fifo_reset(port);
if (ret)
return ret;
ret = xr_uart_enable(port);
if (ret) {
dev_err(&port->dev, "Failed to enable UART\n");
return ret;
}
/* Setup termios */
if (tty)
xr_set_termios(tty, port, NULL);
ret = usb_serial_generic_open(tty, port);
if (ret) {
xr_uart_disable(port);
return ret;
}
return 0;
}
static void xr_close(struct usb_serial_port *port)
{
usb_serial_generic_close(port);
xr_uart_disable(port);
}
static int xr_probe(struct usb_serial *serial, const struct usb_device_id *id)
{
struct usb_interface *control = serial->interface;
struct usb_host_interface *alt = control->cur_altsetting;
struct usb_cdc_parsed_header hdrs;
struct usb_cdc_union_desc *desc;
struct usb_interface *data;
int ret;
ret = cdc_parse_cdc_header(&hdrs, control, alt->extra, alt->extralen);
if (ret < 0)
return -ENODEV;
desc = hdrs.usb_cdc_union_desc;
if (!desc)
return -ENODEV;
data = usb_ifnum_to_if(serial->dev, desc->bSlaveInterface0);
if (!data)
return -ENODEV;
ret = usb_serial_claim_interface(serial, data);
if (ret)
return ret;
usb_set_serial_data(serial, (void *)id->driver_info);
return 0;
}
static int xr_gpio_init(struct usb_serial_port *port, const struct xr_type *type)
{
u16 mask, mode;
int ret;
/*
* Configure all pins as GPIO except for Receive and Transmit Toggle.
*/
mode = 0;
if (type->have_xmit_toggle)
mode |= XR_GPIO_MODE_RX_TOGGLE | XR_GPIO_MODE_TX_TOGGLE;
ret = xr_set_reg_uart(port, type->gpio_mode, mode);
if (ret)
return ret;
/*
* Configure DTR and RTS as outputs and make sure they are deasserted
* (active low), and configure RI, CD, DSR and CTS as inputs.
*/
mask = XR_GPIO_DTR | XR_GPIO_RTS;
ret = xr_set_reg_uart(port, type->gpio_direction, mask);
if (ret)
return ret;
ret = xr_set_reg_uart(port, type->gpio_set, mask);
if (ret)
return ret;
return 0;
}
static int xr_port_probe(struct usb_serial_port *port)
{
struct usb_interface_descriptor *desc;
const struct xr_type *type;
struct xr_data *data;
enum xr_type_id type_id;
int ret;
type_id = (int)(unsigned long)usb_get_serial_data(port->serial);
type = &xr_types[type_id];
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->type = type;
desc = &port->serial->interface->cur_altsetting->desc;
if (type_id == XR21V141X)
data->channel = desc->bInterfaceNumber / 2;
else
data->channel = desc->bInterfaceNumber;
usb_set_serial_port_data(port, data);
if (type->custom_driver) {
ret = xr_set_reg_uart(port, type->custom_driver,
XR_CUSTOM_DRIVER_ACTIVE);
if (ret)
goto err_free;
}
ret = xr_gpio_init(port, type);
if (ret)
goto err_free;
return 0;
err_free:
kfree(data);
return ret;
}
static void xr_port_remove(struct usb_serial_port *port)
{
struct xr_data *data = usb_get_serial_port_data(port);
kfree(data);
}
#define XR_DEVICE(vid, pid, type) \
USB_DEVICE_INTERFACE_CLASS((vid), (pid), USB_CLASS_COMM), \
.driver_info = (type)
static const struct usb_device_id id_table[] = {
{ XR_DEVICE(0x04e2, 0x1400, XR2280X) },
{ XR_DEVICE(0x04e2, 0x1401, XR2280X) },
{ XR_DEVICE(0x04e2, 0x1402, XR2280X) },
{ XR_DEVICE(0x04e2, 0x1403, XR2280X) },
{ XR_DEVICE(0x04e2, 0x1410, XR21V141X) },
{ XR_DEVICE(0x04e2, 0x1411, XR21B1411) },
{ XR_DEVICE(0x04e2, 0x1412, XR21V141X) },
{ XR_DEVICE(0x04e2, 0x1414, XR21V141X) },
{ XR_DEVICE(0x04e2, 0x1420, XR21B142X) },
{ XR_DEVICE(0x04e2, 0x1422, XR21B142X) },
{ XR_DEVICE(0x04e2, 0x1424, XR21B142X) },
{ }
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver xr_device = {
.driver = {
.owner = THIS_MODULE,
.name = "xr_serial",
},
.id_table = id_table,
.num_ports = 1,
.probe = xr_probe,
.port_probe = xr_port_probe,
.port_remove = xr_port_remove,
.open = xr_open,
.close = xr_close,
.break_ctl = xr_break_ctl,
.set_termios = xr_set_termios,
.tiocmget = xr_tiocmget,
.tiocmset = xr_tiocmset,
.ioctl = xr_ioctl,
.dtr_rts = xr_dtr_rts
};
static struct usb_serial_driver * const serial_drivers[] = {
&xr_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR("Manivannan Sadhasivam <[email protected]>");
MODULE_DESCRIPTION("MaxLinear/Exar USB to Serial driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/serial/xr_serial.c |
// SPDX-License-Identifier: GPL-2.0
/*
USB Driver for GSM modems
Copyright (C) 2005 Matthias Urlichs <[email protected]>
Portions copied from the Keyspan driver by Hugh Blemings <[email protected]>
History: see the git log.
Work sponsored by: Sigos GmbH, Germany <[email protected]>
This driver exists because the "normal" serial driver doesn't work too well
with GSM modems. Issues:
- data loss -- one single Receive URB is not nearly enough
- nonstandard flow (Option devices) control
- controlling the baud rate doesn't make sense
This driver is named "option" because the most common device it's
used for is a PC-Card (with an internal OHCI-USB interface, behind
which the GSM interface sits), made by Option Inc.
Some of the "one port" devices actually exhibit multiple USB instances
on the USB bus. This is not a bug, these ports are used for different
device features.
*/
#define DRIVER_AUTHOR "Matthias Urlichs <[email protected]>"
#define DRIVER_DESC "USB Driver for GSM modems"
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/errno.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include "usb-wwan.h"
/* Function prototypes */
static int option_probe(struct usb_serial *serial,
const struct usb_device_id *id);
static int option_attach(struct usb_serial *serial);
static void option_release(struct usb_serial *serial);
static void option_instat_callback(struct urb *urb);
/* Vendor and product IDs */
#define OPTION_VENDOR_ID 0x0AF0
#define OPTION_PRODUCT_COLT 0x5000
#define OPTION_PRODUCT_RICOLA 0x6000
#define OPTION_PRODUCT_RICOLA_LIGHT 0x6100
#define OPTION_PRODUCT_RICOLA_QUAD 0x6200
#define OPTION_PRODUCT_RICOLA_QUAD_LIGHT 0x6300
#define OPTION_PRODUCT_RICOLA_NDIS 0x6050
#define OPTION_PRODUCT_RICOLA_NDIS_LIGHT 0x6150
#define OPTION_PRODUCT_RICOLA_NDIS_QUAD 0x6250
#define OPTION_PRODUCT_RICOLA_NDIS_QUAD_LIGHT 0x6350
#define OPTION_PRODUCT_COBRA 0x6500
#define OPTION_PRODUCT_COBRA_BUS 0x6501
#define OPTION_PRODUCT_VIPER 0x6600
#define OPTION_PRODUCT_VIPER_BUS 0x6601
#define OPTION_PRODUCT_GT_MAX_READY 0x6701
#define OPTION_PRODUCT_FUJI_MODEM_LIGHT 0x6721
#define OPTION_PRODUCT_FUJI_MODEM_GT 0x6741
#define OPTION_PRODUCT_FUJI_MODEM_EX 0x6761
#define OPTION_PRODUCT_KOI_MODEM 0x6800
#define OPTION_PRODUCT_SCORPION_MODEM 0x6901
#define OPTION_PRODUCT_ETNA_MODEM 0x7001
#define OPTION_PRODUCT_ETNA_MODEM_LITE 0x7021
#define OPTION_PRODUCT_ETNA_MODEM_GT 0x7041
#define OPTION_PRODUCT_ETNA_MODEM_EX 0x7061
#define OPTION_PRODUCT_ETNA_KOI_MODEM 0x7100
#define OPTION_PRODUCT_GTM380_MODEM 0x7201
#define HUAWEI_VENDOR_ID 0x12D1
#define HUAWEI_PRODUCT_E173 0x140C
#define HUAWEI_PRODUCT_E1750 0x1406
#define HUAWEI_PRODUCT_K4505 0x1464
#define HUAWEI_PRODUCT_K3765 0x1465
#define HUAWEI_PRODUCT_K4605 0x14C6
#define HUAWEI_PRODUCT_E173S6 0x1C07
#define QUANTA_VENDOR_ID 0x0408
#define QUANTA_PRODUCT_Q101 0xEA02
#define QUANTA_PRODUCT_Q111 0xEA03
#define QUANTA_PRODUCT_GLX 0xEA04
#define QUANTA_PRODUCT_GKE 0xEA05
#define QUANTA_PRODUCT_GLE 0xEA06
#define NOVATELWIRELESS_VENDOR_ID 0x1410
/* YISO PRODUCTS */
#define YISO_VENDOR_ID 0x0EAB
#define YISO_PRODUCT_U893 0xC893
/*
* NOVATEL WIRELESS PRODUCTS
*
* Note from Novatel Wireless:
* If your Novatel modem does not work on linux, don't
* change the option module, but check our website. If
* that does not help, contact [email protected]
*/
/* MERLIN EVDO PRODUCTS */
#define NOVATELWIRELESS_PRODUCT_V640 0x1100
#define NOVATELWIRELESS_PRODUCT_V620 0x1110
#define NOVATELWIRELESS_PRODUCT_V740 0x1120
#define NOVATELWIRELESS_PRODUCT_V720 0x1130
/* MERLIN HSDPA/HSPA PRODUCTS */
#define NOVATELWIRELESS_PRODUCT_U730 0x1400
#define NOVATELWIRELESS_PRODUCT_U740 0x1410
#define NOVATELWIRELESS_PRODUCT_U870 0x1420
#define NOVATELWIRELESS_PRODUCT_XU870 0x1430
#define NOVATELWIRELESS_PRODUCT_X950D 0x1450
/* EXPEDITE PRODUCTS */
#define NOVATELWIRELESS_PRODUCT_EV620 0x2100
#define NOVATELWIRELESS_PRODUCT_ES720 0x2110
#define NOVATELWIRELESS_PRODUCT_E725 0x2120
#define NOVATELWIRELESS_PRODUCT_ES620 0x2130
#define NOVATELWIRELESS_PRODUCT_EU730 0x2400
#define NOVATELWIRELESS_PRODUCT_EU740 0x2410
#define NOVATELWIRELESS_PRODUCT_EU870D 0x2420
/* OVATION PRODUCTS */
#define NOVATELWIRELESS_PRODUCT_MC727 0x4100
#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400
/*
* Note from Novatel Wireless:
* All PID in the 5xxx range are currently reserved for
* auto-install CDROMs, and should not be added to this
* module.
*
* #define NOVATELWIRELESS_PRODUCT_U727 0x5010
* #define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100
*/
#define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002
#define NOVATELWIRELESS_PRODUCT_MC780 0x6010
#define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED 0x6000
#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0x6001
#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0x7000
#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0x7001
#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3 0x7003
#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4 0x7004
#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5 0x7005
#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6 0x7006
#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7 0x7007
#define NOVATELWIRELESS_PRODUCT_MC996D 0x7030
#define NOVATELWIRELESS_PRODUCT_MF3470 0x7041
#define NOVATELWIRELESS_PRODUCT_MC547 0x7042
#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0x8000
#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0x8001
#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000
#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
#define NOVATELWIRELESS_PRODUCT_E362 0x9010
#define NOVATELWIRELESS_PRODUCT_E371 0x9011
#define NOVATELWIRELESS_PRODUCT_U620L 0x9022
#define NOVATELWIRELESS_PRODUCT_G2 0xA010
#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
#define UBLOX_VENDOR_ID 0x1546
/* AMOI PRODUCTS */
#define AMOI_VENDOR_ID 0x1614
#define AMOI_PRODUCT_H01 0x0800
#define AMOI_PRODUCT_H01A 0x7002
#define AMOI_PRODUCT_H02 0x0802
#define AMOI_PRODUCT_SKYPEPHONE_S2 0x0407
#define DELL_VENDOR_ID 0x413C
/* Dell modems */
#define DELL_PRODUCT_5700_MINICARD 0x8114
#define DELL_PRODUCT_5500_MINICARD 0x8115
#define DELL_PRODUCT_5505_MINICARD 0x8116
#define DELL_PRODUCT_5700_EXPRESSCARD 0x8117
#define DELL_PRODUCT_5510_EXPRESSCARD 0x8118
#define DELL_PRODUCT_5700_MINICARD_SPRINT 0x8128
#define DELL_PRODUCT_5700_MINICARD_TELUS 0x8129
#define DELL_PRODUCT_5720_MINICARD_VZW 0x8133
#define DELL_PRODUCT_5720_MINICARD_SPRINT 0x8134
#define DELL_PRODUCT_5720_MINICARD_TELUS 0x8135
#define DELL_PRODUCT_5520_MINICARD_CINGULAR 0x8136
#define DELL_PRODUCT_5520_MINICARD_GENERIC_L 0x8137
#define DELL_PRODUCT_5520_MINICARD_GENERIC_I 0x8138
#define DELL_PRODUCT_5730_MINICARD_SPRINT 0x8180
#define DELL_PRODUCT_5730_MINICARD_TELUS 0x8181
#define DELL_PRODUCT_5730_MINICARD_VZW 0x8182
#define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */
#define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */
#define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */
#define DELL_PRODUCT_5821E 0x81d7
#define DELL_PRODUCT_5821E_ESIM 0x81e0
#define DELL_PRODUCT_5829E_ESIM 0x81e4
#define DELL_PRODUCT_5829E 0x81e6
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
#define KYOCERA_PRODUCT_KPC680 0x180a
#define ANYDATA_VENDOR_ID 0x16d5
#define ANYDATA_PRODUCT_ADU_620UW 0x6202
#define ANYDATA_PRODUCT_ADU_E100A 0x6501
#define ANYDATA_PRODUCT_ADU_500A 0x6502
#define AXESSTEL_VENDOR_ID 0x1726
#define AXESSTEL_PRODUCT_MV110H 0x1000
#define BANDRICH_VENDOR_ID 0x1A8D
#define BANDRICH_PRODUCT_C100_1 0x1002
#define BANDRICH_PRODUCT_C100_2 0x1003
#define BANDRICH_PRODUCT_1004 0x1004
#define BANDRICH_PRODUCT_1005 0x1005
#define BANDRICH_PRODUCT_1006 0x1006
#define BANDRICH_PRODUCT_1007 0x1007
#define BANDRICH_PRODUCT_1008 0x1008
#define BANDRICH_PRODUCT_1009 0x1009
#define BANDRICH_PRODUCT_100A 0x100a
#define BANDRICH_PRODUCT_100B 0x100b
#define BANDRICH_PRODUCT_100C 0x100c
#define BANDRICH_PRODUCT_100D 0x100d
#define BANDRICH_PRODUCT_100E 0x100e
#define BANDRICH_PRODUCT_100F 0x100f
#define BANDRICH_PRODUCT_1010 0x1010
#define BANDRICH_PRODUCT_1011 0x1011
#define BANDRICH_PRODUCT_1012 0x1012
#define QUALCOMM_VENDOR_ID 0x05C6
/* These Quectel products use Qualcomm's vendor ID */
#define QUECTEL_PRODUCT_UC20 0x9003
#define QUECTEL_PRODUCT_UC15 0x9090
/* These u-blox products use Qualcomm's vendor ID */
#define UBLOX_PRODUCT_R410M 0x90b2
/* These Yuga products use Qualcomm's vendor ID */
#define YUGA_PRODUCT_CLM920_NC5 0x9625
#define QUECTEL_VENDOR_ID 0x2c7c
/* These Quectel products use Quectel's vendor ID */
#define QUECTEL_PRODUCT_EC21 0x0121
#define QUECTEL_PRODUCT_EM061K_LTA 0x0123
#define QUECTEL_PRODUCT_EM061K_LMS 0x0124
#define QUECTEL_PRODUCT_EC25 0x0125
#define QUECTEL_PRODUCT_EM060K_128 0x0128
#define QUECTEL_PRODUCT_EG91 0x0191
#define QUECTEL_PRODUCT_EG95 0x0195
#define QUECTEL_PRODUCT_BG96 0x0296
#define QUECTEL_PRODUCT_EP06 0x0306
#define QUECTEL_PRODUCT_EM05G 0x030a
#define QUECTEL_PRODUCT_EM060K 0x030b
#define QUECTEL_PRODUCT_EM05G_CS 0x030c
#define QUECTEL_PRODUCT_EM05GV2 0x030e
#define QUECTEL_PRODUCT_EM05CN_SG 0x0310
#define QUECTEL_PRODUCT_EM05G_SG 0x0311
#define QUECTEL_PRODUCT_EM05CN 0x0312
#define QUECTEL_PRODUCT_EM05G_GR 0x0313
#define QUECTEL_PRODUCT_EM05G_RS 0x0314
#define QUECTEL_PRODUCT_EM12 0x0512
#define QUECTEL_PRODUCT_RM500Q 0x0800
#define QUECTEL_PRODUCT_RM520N 0x0801
#define QUECTEL_PRODUCT_EC200U 0x0901
#define QUECTEL_PRODUCT_EC200S_CN 0x6002
#define QUECTEL_PRODUCT_EC200A 0x6005
#define QUECTEL_PRODUCT_EM061K_LWW 0x6008
#define QUECTEL_PRODUCT_EM061K_LCN 0x6009
#define QUECTEL_PRODUCT_EC200T 0x6026
#define QUECTEL_PRODUCT_RM500K 0x7001
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
#define CMOTECH_PRODUCT_CMU_300 0x6002
#define CMOTECH_PRODUCT_6003 0x6003
#define CMOTECH_PRODUCT_6004 0x6004
#define CMOTECH_PRODUCT_6005 0x6005
#define CMOTECH_PRODUCT_CGU_628A 0x6006
#define CMOTECH_PRODUCT_CHE_628S 0x6007
#define CMOTECH_PRODUCT_CMU_301 0x6008
#define CMOTECH_PRODUCT_CHU_628 0x6280
#define CMOTECH_PRODUCT_CHU_628S 0x6281
#define CMOTECH_PRODUCT_CDU_680 0x6803
#define CMOTECH_PRODUCT_CDU_685A 0x6804
#define CMOTECH_PRODUCT_CHU_720S 0x7001
#define CMOTECH_PRODUCT_7002 0x7002
#define CMOTECH_PRODUCT_CHU_629K 0x7003
#define CMOTECH_PRODUCT_7004 0x7004
#define CMOTECH_PRODUCT_7005 0x7005
#define CMOTECH_PRODUCT_CGU_629 0x7006
#define CMOTECH_PRODUCT_CHU_629S 0x700a
#define CMOTECH_PRODUCT_CHU_720I 0x7211
#define CMOTECH_PRODUCT_7212 0x7212
#define CMOTECH_PRODUCT_7213 0x7213
#define CMOTECH_PRODUCT_7251 0x7251
#define CMOTECH_PRODUCT_7252 0x7252
#define CMOTECH_PRODUCT_7253 0x7253
#define TELIT_VENDOR_ID 0x1bc7
#define TELIT_PRODUCT_UC864E 0x1003
#define TELIT_PRODUCT_UC864G 0x1004
#define TELIT_PRODUCT_CC864_DUAL 0x1005
#define TELIT_PRODUCT_CC864_SINGLE 0x1006
#define TELIT_PRODUCT_DE910_DUAL 0x1010
#define TELIT_PRODUCT_UE910_V2 0x1012
#define TELIT_PRODUCT_LE922_USBCFG1 0x1040
#define TELIT_PRODUCT_LE922_USBCFG2 0x1041
#define TELIT_PRODUCT_LE922_USBCFG0 0x1042
#define TELIT_PRODUCT_LE922_USBCFG3 0x1043
#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
#define TELIT_PRODUCT_ME910 0x1100
#define TELIT_PRODUCT_ME910_DUAL_MODEM 0x1101
#define TELIT_PRODUCT_LE920 0x1200
#define TELIT_PRODUCT_LE910 0x1201
#define TELIT_PRODUCT_LE910_USBCFG4 0x1206
#define TELIT_PRODUCT_LE920A4_1207 0x1207
#define TELIT_PRODUCT_LE920A4_1208 0x1208
#define TELIT_PRODUCT_LE920A4_1211 0x1211
#define TELIT_PRODUCT_LE920A4_1212 0x1212
#define TELIT_PRODUCT_LE920A4_1213 0x1213
#define TELIT_PRODUCT_LE920A4_1214 0x1214
/* ZTE PRODUCTS */
#define ZTE_VENDOR_ID 0x19d2
#define ZTE_PRODUCT_MF622 0x0001
#define ZTE_PRODUCT_MF628 0x0015
#define ZTE_PRODUCT_MF626 0x0031
#define ZTE_PRODUCT_ZM8620_X 0x0396
#define ZTE_PRODUCT_ME3620_MBIM 0x0426
#define ZTE_PRODUCT_ME3620_X 0x1432
#define ZTE_PRODUCT_ME3620_L 0x1433
#define ZTE_PRODUCT_AC2726 0xfff1
#define ZTE_PRODUCT_MG880 0xfffd
#define ZTE_PRODUCT_CDMA_TECH 0xfffe
#define ZTE_PRODUCT_AC8710T 0xffff
#define ZTE_PRODUCT_MC2718 0xffe8
#define ZTE_PRODUCT_AD3812 0xffeb
#define ZTE_PRODUCT_MC2716 0xffed
#define BENQ_VENDOR_ID 0x04a5
#define BENQ_PRODUCT_H10 0x4068
#define DLINK_VENDOR_ID 0x1186
#define DLINK_PRODUCT_DWM_652 0x3e04
#define DLINK_PRODUCT_DWM_652_U5 0xce16
#define DLINK_PRODUCT_DWM_652_U5A 0xce1e
#define QISDA_VENDOR_ID 0x1da5
#define QISDA_PRODUCT_H21_4512 0x4512
#define QISDA_PRODUCT_H21_4523 0x4523
#define QISDA_PRODUCT_H20_4515 0x4515
#define QISDA_PRODUCT_H20_4518 0x4518
#define QISDA_PRODUCT_H20_4519 0x4519
/* TLAYTECH PRODUCTS */
#define TLAYTECH_VENDOR_ID 0x20B9
#define TLAYTECH_PRODUCT_TEU800 0x1682
/* TOSHIBA PRODUCTS */
#define TOSHIBA_VENDOR_ID 0x0930
#define TOSHIBA_PRODUCT_HSDPA_MINICARD 0x1302
#define TOSHIBA_PRODUCT_G450 0x0d45
#define ALINK_VENDOR_ID 0x1e0e
#define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */
#define ALINK_PRODUCT_PH300 0x9100
#define ALINK_PRODUCT_3GU 0x9200
/* ALCATEL PRODUCTS */
#define ALCATEL_VENDOR_ID 0x1bbb
#define ALCATEL_PRODUCT_X060S_X200 0x0000
#define ALCATEL_PRODUCT_X220_X500D 0x0017
#define ALCATEL_PRODUCT_L100V 0x011e
#define ALCATEL_PRODUCT_L800MA 0x0203
#define PIRELLI_VENDOR_ID 0x1266
#define PIRELLI_PRODUCT_C100_1 0x1002
#define PIRELLI_PRODUCT_C100_2 0x1003
#define PIRELLI_PRODUCT_1004 0x1004
#define PIRELLI_PRODUCT_1005 0x1005
#define PIRELLI_PRODUCT_1006 0x1006
#define PIRELLI_PRODUCT_1007 0x1007
#define PIRELLI_PRODUCT_1008 0x1008
#define PIRELLI_PRODUCT_1009 0x1009
#define PIRELLI_PRODUCT_100A 0x100a
#define PIRELLI_PRODUCT_100B 0x100b
#define PIRELLI_PRODUCT_100C 0x100c
#define PIRELLI_PRODUCT_100D 0x100d
#define PIRELLI_PRODUCT_100E 0x100e
#define PIRELLI_PRODUCT_100F 0x100f
#define PIRELLI_PRODUCT_1011 0x1011
#define PIRELLI_PRODUCT_1012 0x1012
/* Airplus products */
#define AIRPLUS_VENDOR_ID 0x1011
#define AIRPLUS_PRODUCT_MCD650 0x3198
/* Longcheer/Longsung vendor ID; makes whitelabel devices that
* many other vendors like 4G Systems, Alcatel, ChinaBird,
* Mobidata, etc sell under their own brand names.
*/
#define LONGCHEER_VENDOR_ID 0x1c9e
/* 4G Systems products */
/* This one was sold as the VW and Skoda "Carstick LTE" */
#define FOUR_G_SYSTEMS_PRODUCT_CARSTICK_LTE 0x7605
/* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick *
* It seems to contain a Qualcomm QSC6240/6290 chipset */
#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
#define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01
/* Fujisoft products */
#define FUJISOFT_PRODUCT_FS040U 0x9b02
/* iBall 3.5G connect wireless modem */
#define IBALL_3_5G_CONNECT 0x9605
/* Zoom */
#define ZOOM_PRODUCT_4597 0x9607
/* SpeedUp SU9800 usb 3g modem */
#define SPEEDUP_PRODUCT_SU9800 0x9800
/* Haier products */
#define HAIER_VENDOR_ID 0x201e
#define HAIER_PRODUCT_CE81B 0x10f8
#define HAIER_PRODUCT_CE100 0x2009
/* Gemalto's Cinterion products (formerly Siemens) */
#define SIEMENS_VENDOR_ID 0x0681
#define CINTERION_VENDOR_ID 0x1e2d
#define CINTERION_PRODUCT_HC25_MDMNET 0x0040
#define CINTERION_PRODUCT_HC25_MDM 0x0047
#define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */
#define CINTERION_PRODUCT_HC28_MDM 0x004C
#define CINTERION_PRODUCT_EU3_E 0x0051
#define CINTERION_PRODUCT_EU3_P 0x0052
#define CINTERION_PRODUCT_PH8 0x0053
#define CINTERION_PRODUCT_AHXX 0x0055
#define CINTERION_PRODUCT_PLXX 0x0060
#define CINTERION_PRODUCT_EXS82 0x006c
#define CINTERION_PRODUCT_PH8_2RMNET 0x0082
#define CINTERION_PRODUCT_PH8_AUDIO 0x0083
#define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
#define CINTERION_PRODUCT_AHXX_AUDIO 0x0085
#define CINTERION_PRODUCT_CLS8 0x00b0
#define CINTERION_PRODUCT_MV31_MBIM 0x00b3
#define CINTERION_PRODUCT_MV31_RMNET 0x00b7
#define CINTERION_PRODUCT_MV31_2_MBIM 0x00b8
#define CINTERION_PRODUCT_MV31_2_RMNET 0x00b9
#define CINTERION_PRODUCT_MV32_WA 0x00f1
#define CINTERION_PRODUCT_MV32_WB 0x00f2
#define CINTERION_PRODUCT_MV32_WA_RMNET 0x00f3
#define CINTERION_PRODUCT_MV32_WB_RMNET 0x00f4
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
#define OLIVETTI_PRODUCT_OLICARD100 0xc000
#define OLIVETTI_PRODUCT_OLICARD120 0xc001
#define OLIVETTI_PRODUCT_OLICARD140 0xc002
#define OLIVETTI_PRODUCT_OLICARD145 0xc003
#define OLIVETTI_PRODUCT_OLICARD155 0xc004
#define OLIVETTI_PRODUCT_OLICARD200 0xc005
#define OLIVETTI_PRODUCT_OLICARD160 0xc00a
#define OLIVETTI_PRODUCT_OLICARD500 0xc00b
/* Celot products */
#define CELOT_VENDOR_ID 0x211f
#define CELOT_PRODUCT_CT680M 0x6801
/* Samsung products */
#define SAMSUNG_VENDOR_ID 0x04e8
#define SAMSUNG_PRODUCT_GT_B3730 0x6889
/* YUGA products www.yuga-info.com [email protected] */
#define YUGA_VENDOR_ID 0x257A
#define YUGA_PRODUCT_CEM600 0x1601
#define YUGA_PRODUCT_CEM610 0x1602
#define YUGA_PRODUCT_CEM500 0x1603
#define YUGA_PRODUCT_CEM510 0x1604
#define YUGA_PRODUCT_CEM800 0x1605
#define YUGA_PRODUCT_CEM900 0x1606
#define YUGA_PRODUCT_CEU818 0x1607
#define YUGA_PRODUCT_CEU816 0x1608
#define YUGA_PRODUCT_CEU828 0x1609
#define YUGA_PRODUCT_CEU826 0x160A
#define YUGA_PRODUCT_CEU518 0x160B
#define YUGA_PRODUCT_CEU516 0x160C
#define YUGA_PRODUCT_CEU528 0x160D
#define YUGA_PRODUCT_CEU526 0x160F
#define YUGA_PRODUCT_CEU881 0x161F
#define YUGA_PRODUCT_CEU882 0x162F
#define YUGA_PRODUCT_CWM600 0x2601
#define YUGA_PRODUCT_CWM610 0x2602
#define YUGA_PRODUCT_CWM500 0x2603
#define YUGA_PRODUCT_CWM510 0x2604
#define YUGA_PRODUCT_CWM800 0x2605
#define YUGA_PRODUCT_CWM900 0x2606
#define YUGA_PRODUCT_CWU718 0x2607
#define YUGA_PRODUCT_CWU716 0x2608
#define YUGA_PRODUCT_CWU728 0x2609
#define YUGA_PRODUCT_CWU726 0x260A
#define YUGA_PRODUCT_CWU518 0x260B
#define YUGA_PRODUCT_CWU516 0x260C
#define YUGA_PRODUCT_CWU528 0x260D
#define YUGA_PRODUCT_CWU581 0x260E
#define YUGA_PRODUCT_CWU526 0x260F
#define YUGA_PRODUCT_CWU582 0x261F
#define YUGA_PRODUCT_CWU583 0x262F
#define YUGA_PRODUCT_CLM600 0x3601
#define YUGA_PRODUCT_CLM610 0x3602
#define YUGA_PRODUCT_CLM500 0x3603
#define YUGA_PRODUCT_CLM510 0x3604
#define YUGA_PRODUCT_CLM800 0x3605
#define YUGA_PRODUCT_CLM900 0x3606
#define YUGA_PRODUCT_CLU718 0x3607
#define YUGA_PRODUCT_CLU716 0x3608
#define YUGA_PRODUCT_CLU728 0x3609
#define YUGA_PRODUCT_CLU726 0x360A
#define YUGA_PRODUCT_CLU518 0x360B
#define YUGA_PRODUCT_CLU516 0x360C
#define YUGA_PRODUCT_CLU528 0x360D
#define YUGA_PRODUCT_CLU526 0x360F
/* Viettel products */
#define VIETTEL_VENDOR_ID 0x2262
#define VIETTEL_PRODUCT_VT1000 0x0002
/* ZD Incorporated */
#define ZD_VENDOR_ID 0x0685
#define ZD_PRODUCT_7000 0x7000
/* LG products */
#define LG_VENDOR_ID 0x1004
#define LG_PRODUCT_L02C 0x618f
/* MediaTek products */
#define MEDIATEK_VENDOR_ID 0x0e8d
#define MEDIATEK_PRODUCT_DC_1COM 0x00a0
#define MEDIATEK_PRODUCT_DC_4COM 0x00a5
#define MEDIATEK_PRODUCT_DC_4COM2 0x00a7
#define MEDIATEK_PRODUCT_DC_5COM 0x00a4
#define MEDIATEK_PRODUCT_7208_1COM 0x7101
#define MEDIATEK_PRODUCT_7208_2COM 0x7102
#define MEDIATEK_PRODUCT_7103_2COM 0x7103
#define MEDIATEK_PRODUCT_7106_2COM 0x7106
#define MEDIATEK_PRODUCT_FP_1COM 0x0003
#define MEDIATEK_PRODUCT_FP_2COM 0x0023
#define MEDIATEK_PRODUCT_FPDC_1COM 0x0043
#define MEDIATEK_PRODUCT_FPDC_2COM 0x0033
/* Cellient products */
#define CELLIENT_VENDOR_ID 0x2692
#define CELLIENT_PRODUCT_MEN200 0x9005
#define CELLIENT_PRODUCT_MPL200 0x9025
/* Hyundai Petatel Inc. products */
#define PETATEL_VENDOR_ID 0x1ff4
#define PETATEL_PRODUCT_NP10T_600A 0x600a
#define PETATEL_PRODUCT_NP10T_600E 0x600e
/* TP-LINK Incorporated products */
#define TPLINK_VENDOR_ID 0x2357
#define TPLINK_PRODUCT_LTE 0x000D
#define TPLINK_PRODUCT_MA180 0x0201
/* Changhong products */
#define CHANGHONG_VENDOR_ID 0x2077
#define CHANGHONG_PRODUCT_CH690 0x7001
/* Inovia */
#define INOVIA_VENDOR_ID 0x20a6
#define INOVIA_SEW858 0x1105
/* VIA Telecom */
#define VIATELECOM_VENDOR_ID 0x15eb
#define VIATELECOM_PRODUCT_CDS7 0x0001
/* WeTelecom products */
#define WETELECOM_VENDOR_ID 0x22de
#define WETELECOM_PRODUCT_WMD200 0x6801
#define WETELECOM_PRODUCT_6802 0x6802
#define WETELECOM_PRODUCT_WMD300 0x6803
/* OPPO products */
#define OPPO_VENDOR_ID 0x22d9
#define OPPO_PRODUCT_R11 0x276c
/* Sierra Wireless products */
#define SIERRA_VENDOR_ID 0x1199
#define SIERRA_PRODUCT_EM9191 0x90d3
/* UNISOC (Spreadtrum) products */
#define UNISOC_VENDOR_ID 0x1782
/* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */
#define TOZED_PRODUCT_LT70C 0x4055
/* Device flags */
/* Highest interface number which can be used with NCTRL() and RSVD() */
#define FLAG_IFNUM_MAX 7
/* Interface does not support modem-control requests */
#define NCTRL(ifnum) ((BIT(ifnum) & 0xff) << 8)
/* Interface is reserved */
#define RSVD(ifnum) ((BIT(ifnum) & 0xff) << 0)
/* Interface must have two endpoints */
#define NUMEP2 BIT(16)
/* Device needs ZLP */
#define ZLP BIT(17)
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_LIGHT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_QUAD) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_QUAD_LIGHT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_NDIS) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_NDIS_LIGHT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_NDIS_QUAD) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_NDIS_QUAD_LIGHT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COBRA) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COBRA_BUS) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_VIPER) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_VIPER_BUS) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_GT_MAX_READY) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUJI_MODEM_LIGHT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUJI_MODEM_GT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUJI_MODEM_EX) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_KOI_MODEM) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_SCORPION_MODEM) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_LITE) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_GT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_EX) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_KOI_MODEM) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_GTM380_MODEM) },
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_Q101) },
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_Q111) },
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) },
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
{ USB_DEVICE(QUANTA_VENDOR_ID, 0xea42),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c1f, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S6, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
.driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x14ac, 0xff, 0xff, 0xff), /* Huawei E1820 */
.driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x03) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x04) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x05) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x06) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x10) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x12) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x13) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x14) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x15) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x17) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x18) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x19) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1C) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x31) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x32) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x33) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x34) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x35) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x36) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x48) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x49) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4C) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x61) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x62) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x63) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x64) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x65) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x66) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x72) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x73) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x74) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x75) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7C) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x02) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x03) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x04) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x05) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x06) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x10) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x12) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x13) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x14) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x15) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x17) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x18) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x19) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1C) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x31) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x32) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x33) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x34) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x35) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x36) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x48) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x49) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4C) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x61) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x62) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x63) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x64) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x65) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x66) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x72) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x73) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x74) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x75) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x02) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x03) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x04) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x05) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x06) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x10) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x12) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x13) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x14) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x15) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x17) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x18) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x19) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1C) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x31) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x32) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x33) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x34) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x35) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x36) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x48) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x49) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4C) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x61) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x62) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x63) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x64) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x65) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x66) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x72) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x73) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x74) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x75) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7C) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x02) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x03) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x04) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x05) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x06) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x10) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x12) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x13) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x14) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x15) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x17) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x18) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x19) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1C) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x31) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x32) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x33) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x34) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x35) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x36) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x48) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x49) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4C) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x61) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x62) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x63) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x64) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x65) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x66) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x72) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x73) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x74) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x75) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7C) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x02) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x03) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x04) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x05) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x06) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x10) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x12) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x13) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x14) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x15) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x17) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x18) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x19) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1C) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x31) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x32) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x33) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x34) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x35) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x36) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x48) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x49) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4C) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x61) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x62) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x63) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x64) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x65) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x66) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x72) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x73) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x74) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x75) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7C) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x02) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x03) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x04) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x05) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x06) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x10) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x12) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x13) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x14) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x15) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x17) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x18) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x19) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1C) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x31) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x32) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x33) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x34) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x35) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x36) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x48) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x49) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4C) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x61) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x62) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x63) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x64) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x65) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x66) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x72) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x73) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x74) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x75) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
/* Motorola devices */
{ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) }, /* mdm6600 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) }, /* mdm9600 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) }, /* mdm ram dl */
{ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) }, /* mdm qc dl */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC780) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC996D) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MF3470) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC547) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
/* Novatel Ovation MC551 a.k.a. Verizon USB551L */
{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U620L, 0xff, 0x00, 0x00) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H02) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_SKYPEPHONE_S2) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5500_MINICARD) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5505_MINICARD) }, /* Dell Wireless 5505 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_EXPRESSCARD) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO ExpressCard == Novatel Merlin XV620 CDMA/EV-DO */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5510_EXPRESSCARD) }, /* Dell Wireless 5510 Mobile Broadband HSDPA ExpressCard == Novatel Merlin XU870 HSDPA/3G */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD_SPRINT) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite E720 CDMA/EV-DO */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD_TELUS) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite ET620 CDMA/EV-DO */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_VZW) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_SPRINT) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_TELUS) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_CINGULAR) }, /* Dell Wireless HSDPA 5520 == Novatel Expedite EU860D */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_L) }, /* Dell Wireless HSDPA 5520 */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_I) }, /* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E),
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM),
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E),
.driver_info = RSVD(0) | RSVD(6) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM),
.driver_info = RSVD(0) | RSVD(6) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
{ USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) },
{ USB_DEVICE(YISO_VENDOR_ID, YISO_PRODUCT_U893) },
{ USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1004, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1005, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1006, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1007, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1008, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1009, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100A, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100B, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100C, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100D, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100E, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100F, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1010, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1011, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012, 0xff) },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
{ USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
.driver_info = RSVD(3) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */
.driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) },
/* Quectel products using Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
.driver_info = RSVD(4) },
/* Yuga products use Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5),
.driver_info = RSVD(1) | RSVD(4) },
/* u-blox products using Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
.driver_info = RSVD(1) | RSVD(3) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x908b), /* u-blox LARA-R6 00B */
.driver_info = RSVD(4) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x90fa),
.driver_info = RSVD(3) },
/* u-blox products */
{ USB_DEVICE(UBLOX_VENDOR_ID, 0x1311) }, /* u-blox LARA-R6 01B */
{ USB_DEVICE(UBLOX_VENDOR_ID, 0x1312), /* u-blox LARA-R6 01B (RMNET) */
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(UBLOX_VENDOR_ID, 0x1313, 0xff) }, /* u-blox LARA-R6 01B (ECM) */
{ USB_DEVICE(UBLOX_VENDOR_ID, 0x1341) }, /* u-blox LARA-L6 */
{ USB_DEVICE(UBLOX_VENDOR_ID, 0x1342), /* u-blox LARA-L6 (RMNET) */
.driver_info = RSVD(4) },
{ USB_DEVICE(UBLOX_VENDOR_ID, 0x1343), /* u-blox LARA-L6 (ECM) */
.driver_info = RSVD(4) },
/* Quectel products using Quectel vendor ID */
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff),
.driver_info = NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0xff, 0xff),
.driver_info = NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG91, 0xff, 0xff, 0xff),
.driver_info = NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG91, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
.driver_info = NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0203, 0xff), /* BG95-M3 */
.driver_info = ZLP },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05CN, 0xff),
.driver_info = RSVD(6) | ZLP },
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05CN_SG, 0xff),
.driver_info = RSVD(6) | ZLP },
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff),
.driver_info = RSVD(6) | ZLP },
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05GV2, 0xff),
.driver_info = RSVD(4) | ZLP },
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_CS, 0xff),
.driver_info = RSVD(6) | ZLP },
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_GR, 0xff),
.driver_info = RSVD(6) | ZLP },
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_RS, 0xff),
.driver_info = RSVD(6) | ZLP },
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_SG, 0xff),
.driver_info = RSVD(6) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LMS, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LMS, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LMS, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LTA, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LTA, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LTA, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LWW, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LWW, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LWW, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0xff, 0x30) }, /* EM160R-GL */
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0, 0) },
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0700, 0xff), /* BG95 */
.driver_info = RSVD(3) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
.driver_info = ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0900, 0xff, 0, 0), /* RM500U-CN */
.driver_info = ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200A, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
.driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S),
.driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301),
.driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628),
.driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S),
.driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002),
.driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K),
.driver_info = RSVD(4) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004),
.driver_info = RSVD(3) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629),
.driver_info = RSVD(5) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S),
.driver_info = RSVD(4) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I),
.driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212),
.driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213),
.driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251),
.driver_info = RSVD(1) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252),
.driver_info = RSVD(1) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253),
.driver_info = RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1031, 0xff), /* Telit LE910C1-EUX */
.driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1033, 0xff), /* Telit LE910C1-EUX (ECM) */
.driver_info = NCTRL(0) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
.driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
.driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1050, 0xff), /* Telit FN980 (rmnet) */
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1051, 0xff), /* Telit FN980 (MBIM) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1052, 0xff), /* Telit FN980 (RNDIS) */
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1053, 0xff), /* Telit FN980 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1054, 0xff), /* Telit FT980-KS */
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1055, 0xff), /* Telit FN980 (PCIe) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff), /* Telit FD980 */
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1057, 0xff), /* Telit FN980 */
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1058, 0xff), /* Telit FN980 (PCIe) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1060, 0xff), /* Telit LN920 (rmnet) */
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1061, 0xff), /* Telit LN920 (MBIM) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1062, 0xff), /* Telit LN920 (RNDIS) */
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff), /* Telit LN920 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff), /* Telit FN990 (rmnet) */
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff), /* Telit FN990 (MBIM) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff), /* Telit FN990 (RNDIS) */
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990 (PCIe) */
.driver_info = RSVD(0) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990 (rmnet) */
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff), /* Telit FE990 (MBIM) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff), /* Telit FE990 (RNDIS) */
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
.driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff), /* Telit ME910 (ECM) */
.driver_info = NCTRL(0) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110a, 0xff), /* Telit ME910G1 */
.driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110b, 0xff), /* Telit ME910G1 (ECM) */
.driver_info = NCTRL(0) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1203, 0xff), /* Telit LE910Cx (RNDIS) */
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1204, 0xff), /* Telit LE910Cx (MBIM) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(5) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208),
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212),
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1230, 0xff), /* Telit LE910Cx (rmnet) */
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1231, 0xff), /* Telit LE910Cx (RNDIS) */
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x1250, 0xff, 0x00, 0x00) }, /* Telit LE910Cx (rmnet) */
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1261),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1900), /* Telit LN940 (QMI) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
.driver_info = NCTRL(0) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7010, 0xff), /* Telit LE910-S1 (RNDIS) */
.driver_info = NCTRL(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */
.driver_info = NCTRL(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701a, 0xff), /* Telit LE910R1 (RNDIS) */
.driver_info = NCTRL(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff), /* Telit LE910R1 (ECM) */
.driver_info = NCTRL(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
.driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */
.driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9201), /* Telit LE910R1 flashing device */
.driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0006, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0008, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0009, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000a, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000b, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000c, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000d, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000e, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000f, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
.driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff),
.driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff),
.driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff),
.driver_info = NCTRL(0) | NCTRL(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0038, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0040, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0044, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff),
.driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0050, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0056, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0065, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0067, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0077, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0079, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0082, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0083, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0087, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0088, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0089, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0090, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0091, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0092, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0093, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0095, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0096, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0097, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff),
.driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff),
.driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff),
.driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff),
.driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff),
.driver_info = RSVD(6) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff),
.driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0135, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0136, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0137, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0139, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0145, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0148, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0151, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff),
.driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff),
.driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0197, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */
.driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0200, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0201, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
.driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0601, 0xff) }, /* GosunCn ZTE WeLink ME3630 (RNDIS mode) */
{ USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0602, 0xff) }, /* GosunCn ZTE WeLink ME3630 (MBIM mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff),
.driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1060, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1061, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1062, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1063, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1064, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1065, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1066, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1067, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1068, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1069, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1070, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1071, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1072, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1073, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1074, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1075, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1076, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1077, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1078, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1079, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1080, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1081, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1082, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1083, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1084, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1085, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1086, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1087, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1088, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1089, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1090, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1091, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1092, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1093, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1094, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1095, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1096, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1097, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1098, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1099, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1100, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1101, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1102, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1103, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1104, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1105, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1106, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1107, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1108, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1109, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1110, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1111, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1112, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1113, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1114, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1115, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1116, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1117, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1118, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1119, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1120, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1121, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1122, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1123, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1124, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1125, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1126, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1127, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1128, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1129, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1130, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1131, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1132, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1133, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1134, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1135, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1136, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1137, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1138, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1139, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1140, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1141, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1142, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1143, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1144, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1145, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1146, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1147, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1148, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1149, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1150, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1151, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1152, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1153, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1154, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1155, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1156, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1157, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1158, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1159, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1160, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1161, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1162, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1163, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1164, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1165, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1166, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1167, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1168, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff),
.driver_info = RSVD(3) | RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1260, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1261, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1262, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1263, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1264, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1265, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1266, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff),
.driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) },
{ USB_DEVICE(ZTE_VENDOR_ID, 0x1275), /* ZTE P685M */
.driver_info = RSVD(3) | RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1279, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1280, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1281, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1282, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1283, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1284, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1285, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1286, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1287, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1288, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1289, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1290, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1291, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1292, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1293, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1294, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1295, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1296, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1297, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1301, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1302, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1303, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1333, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff),
.driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
.driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff),
.driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff),
.driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
.driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
.driver_info = RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1485, 0xff, 0xff, 0xff), /* ZTE MF286D */
.driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1545, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1546, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1547, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1565, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1566, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1567, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1589, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1590, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1591, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1592, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1594, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1596, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1598, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1600, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff),
.driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
.driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff),
.driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff),
.driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
.driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff45, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff46, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff47, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff48, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff49, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4a, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4b, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4c, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4d, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4e, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4f, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff50, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff51, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff52, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff53, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff54, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff55, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff56, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff57, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff58, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff59, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5a, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5b, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5c, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5d, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5e, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5f, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff60, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff61, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff62, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff63, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff64, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff65, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff66, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff67, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff68, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff69, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6a, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6b, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6c, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6d, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6e, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6f, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff70, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff71, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff72, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff73, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff74, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff75, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff76, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff77, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff78, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff79, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7a, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7b, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7c, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7d, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7e, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7f, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff80, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff81, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff82, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff83, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff84, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff85, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff86, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff87, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff88, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff89, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8a, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8b, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8c, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8d, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8e, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8f, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff90, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff91, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff92, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff9f, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa0, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa1, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa2, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa3, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa4, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa5, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa6, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa7, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa8, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa9, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaa, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffab, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffac, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffae, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaf, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb0, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb1, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb2, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb3, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb4, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb5, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb6, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb7, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb8, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb9, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffba, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbb, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbc, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbd, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbe, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbf, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc0, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc1, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc2, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc3, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc4, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc5, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc6, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc7, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc8, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc9, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffca, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcb, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcc, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcd, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffce, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcf, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd0, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd1, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd2, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd3, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd4, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd5, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffec, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffee, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xfff6, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xfff7, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xfff8, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xfff9, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xfffb, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xfffc, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MG880, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
.driver_info = NCTRL(1) | NCTRL(2) | NCTRL(3) | NCTRL(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
.driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
.driver_info = NCTRL(1) | NCTRL(2) | NCTRL(3) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
.driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
.driver_info = RSVD(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
.driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
.driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
{ USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
{ USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
{ USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5A) },
{ USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) },
{ USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) },
{ USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) },
{ USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4518) },
{ USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4519) },
{ USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) },
{ USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
{ USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
{ USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
{ USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
.driver_info = RSVD(5) | RSVD(6) },
{ USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */
{ USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */
.driver_info = RSVD(7) },
{ USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9205, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT+ECM mode */
{ USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9206, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT-only mode */
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
.driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
.driver_info = RSVD(6) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x0052),
.driver_info = RSVD(6) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b6),
.driver_info = RSVD(3) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b7),
.driver_info = RSVD(5) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
.driver_info = RSVD(4) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA),
.driver_info = RSVD(2) },
{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_CARSTICK_LTE),
.driver_info = RSVD(0) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
.driver_info = NCTRL(0) | NCTRL(1) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
.driver_info = NCTRL(1) | NCTRL(2) | RSVD(3) },
{USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U),
.driver_info = RSVD(3)},
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
.driver_info = RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
{ USB_DEVICE_AND_INTERFACE_INFO(HAIER_VENDOR_ID, HAIER_PRODUCT_CE81B, 0xff, 0xff, 0xff) },
/* Pirelli */
{ USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1004, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1005, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1006, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1007, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1008, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1009, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100A, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100B, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100C, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100D, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100E, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012, 0xff) },
/* Cinterion */
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff),
.driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_CLS8, 0xff),
.driver_info = RSVD(0) | RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EXS82, 0xff) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff),
.driver_info = RSVD(3)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
.driver_info = RSVD(0)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_MBIM, 0xff),
.driver_info = RSVD(3)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_RMNET, 0xff),
.driver_info = RSVD(0)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff),
.driver_info = RSVD(3)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA_RMNET, 0xff),
.driver_info = RSVD(0) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff),
.driver_info = RSVD(3)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB_RMNET, 0xff),
.driver_info = RSVD(0) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
.driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
.driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140),
.driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155),
.driver_info = RSVD(6) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
.driver_info = RSVD(6) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160),
.driver_info = RSVD(6) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
.driver_info = RSVD(4) },
{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM500) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM510) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM800) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM900) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU818) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU816) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU828) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU826) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU518) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU516) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU528) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU526) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM600) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM610) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM500) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM510) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM800) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM900) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU718) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU716) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU728) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU726) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU518) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU516) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU528) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU526) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM600) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM610) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM500) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM510) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM800) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM900) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU718) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU716) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU728) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU726) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU518) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU881) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU882) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU581) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU582) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU583) },
{ USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) },
{ USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) }, /* MediaTek MT6276M modem & app port */
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_1COM, 0x0a, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_1COM, 0x02, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_2COM, 0x02, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_1COM, 0x0a, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7103_2COM, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MPL200),
.driver_info = RSVD(1) | RSVD(4) },
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
{ USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */
{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
.driver_info = RSVD(4) },
{ USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */
.driver_info = RSVD(4) },
{ USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d01, 0xff) }, /* D-Link DWM-156 (variant) */
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d02, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d03, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff), /* D-Link DWM-158 */
.driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d0e, 0xff) }, /* D-Link DWM-157 C1 */
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff), /* D-Link DWM-222 A2 */
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
{ USB_DEVICE_INTERFACE_CLASS(0x1435, 0xd191, 0xff), /* Wistron Neweb D19Q1 */
.driver_info = RSVD(1) | RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x1690, 0x7588, 0xff), /* ASKEY WWHC050 */
.driver_info = RSVD(1) | RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2033, 0xff), /* BroadMobi BM806U */
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x10) }, /* HP lt4132 (Huawei ME906s-158) */
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x12) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
{ USB_DEVICE(0x0489, 0xe0b4), /* Foxconn T77W968 */
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0db, 0xff), /* Foxconn T99W265 MBIM */
.driver_info = RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0ee, 0xff), /* Foxconn T99W368 MBIM */
.driver_info = RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0f0, 0xff), /* Foxconn T99W373 MBIM */
.driver_info = RSVD(3) },
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x1782, 0x4d11, 0xff) }, /* Fibocom L610 (ECM/RNDIS mode) */
{ USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
.driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
.driver_info = RSVD(6) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0106, 0xff) }, /* Fibocom MA510 (ECM mode w/ diag intf.) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x010a, 0xff) }, /* Fibocom MA510 (ECM mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
/* The card has three separate interfaces, which the serial driver
* recognizes separately, thus num_port=1.
*/
static struct usb_serial_driver option_1port_device = {
.driver = {
.owner = THIS_MODULE,
.name = "option1",
},
.description = "GSM modem (1-port)",
.id_table = option_ids,
.num_ports = 1,
.probe = option_probe,
.open = usb_wwan_open,
.close = usb_wwan_close,
.dtr_rts = usb_wwan_dtr_rts,
.write = usb_wwan_write,
.write_room = usb_wwan_write_room,
.chars_in_buffer = usb_wwan_chars_in_buffer,
.tiocmget = usb_wwan_tiocmget,
.tiocmset = usb_wwan_tiocmset,
.attach = option_attach,
.release = option_release,
.port_probe = usb_wwan_port_probe,
.port_remove = usb_wwan_port_remove,
.read_int_callback = option_instat_callback,
#ifdef CONFIG_PM
.suspend = usb_wwan_suspend,
.resume = usb_wwan_resume,
#endif
};
static struct usb_serial_driver * const serial_drivers[] = {
&option_1port_device, NULL
};
module_usb_serial_driver(serial_drivers, option_ids);
static bool iface_is_reserved(unsigned long device_flags, u8 ifnum)
{
if (ifnum > FLAG_IFNUM_MAX)
return false;
return device_flags & RSVD(ifnum);
}
static int option_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
struct usb_interface_descriptor *iface_desc =
&serial->interface->cur_altsetting->desc;
unsigned long device_flags = id->driver_info;
/* Never bind to the CD-Rom emulation interface */
if (iface_desc->bInterfaceClass == USB_CLASS_MASS_STORAGE)
return -ENODEV;
/*
* Don't bind reserved interfaces (like network ones) which often have
* the same class/subclass/protocol as the serial interfaces. Look at
* the Windows driver .INF files for reserved interface numbers.
*/
if (iface_is_reserved(device_flags, iface_desc->bInterfaceNumber))
return -ENODEV;
/*
* Allow matching on bNumEndpoints for devices whose interface numbers
* can change (e.g. Quectel EP06).
*/
if (device_flags & NUMEP2 && iface_desc->bNumEndpoints != 2)
return -ENODEV;
/* Store the device flags so we can use them during attach. */
usb_set_serial_data(serial, (void *)device_flags);
return 0;
}
static bool iface_no_modem_control(unsigned long device_flags, u8 ifnum)
{
if (ifnum > FLAG_IFNUM_MAX)
return false;
return device_flags & NCTRL(ifnum);
}
static int option_attach(struct usb_serial *serial)
{
struct usb_interface_descriptor *iface_desc;
struct usb_wwan_intf_private *data;
unsigned long device_flags;
data = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL);
if (!data)
return -ENOMEM;
/* Retrieve device flags stored at probe. */
device_flags = (unsigned long)usb_get_serial_data(serial);
iface_desc = &serial->interface->cur_altsetting->desc;
if (!iface_no_modem_control(device_flags, iface_desc->bInterfaceNumber))
data->use_send_setup = 1;
if (device_flags & ZLP)
data->use_zlp = 1;
spin_lock_init(&data->susp_lock);
usb_set_serial_data(serial, data);
return 0;
}
static void option_release(struct usb_serial *serial)
{
struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial);
kfree(intfdata);
}
static void option_instat_callback(struct urb *urb)
{
int err;
int status = urb->status;
struct usb_serial_port *port = urb->context;
struct device *dev = &port->dev;
struct usb_wwan_port_private *portdata =
usb_get_serial_port_data(port);
dev_dbg(dev, "%s: urb %p port %p has data %p\n", __func__, urb, port, portdata);
if (status == 0) {
struct usb_ctrlrequest *req_pkt = urb->transfer_buffer;
if (!req_pkt) {
dev_dbg(dev, "%s: NULL req_pkt\n", __func__);
return;
}
if ((req_pkt->bRequestType == 0xA1) &&
(req_pkt->bRequest == 0x20)) {
int old_dcd_state;
unsigned char signals = *((unsigned char *)
urb->transfer_buffer +
sizeof(struct usb_ctrlrequest));
dev_dbg(dev, "%s: signal x%x\n", __func__, signals);
old_dcd_state = portdata->dcd_state;
portdata->cts_state = 1;
portdata->dcd_state = ((signals & 0x01) ? 1 : 0);
portdata->dsr_state = ((signals & 0x02) ? 1 : 0);
portdata->ri_state = ((signals & 0x08) ? 1 : 0);
if (old_dcd_state && !portdata->dcd_state)
tty_port_tty_hangup(&port->port, true);
} else {
dev_dbg(dev, "%s: type %x req %x\n", __func__,
req_pkt->bRequestType, req_pkt->bRequest);
}
} else if (status == -ENOENT || status == -ESHUTDOWN) {
dev_dbg(dev, "%s: urb stopped: %d\n", __func__, status);
} else
dev_dbg(dev, "%s: error %d\n", __func__, status);
/* Resubmit urb so we continue receiving IRQ data */
if (status != -ESHUTDOWN && status != -ENOENT) {
usb_mark_last_busy(port->serial->dev);
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err)
dev_dbg(dev, "%s: resubmit intr urb failed. (%d)\n",
__func__, err);
}
}
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/option.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* TI 3410/5052 USB Serial Driver
*
* Copyright (C) 2004 Texas Instruments
*
* This driver is based on the Linux io_ti driver, which is
* Copyright (C) 2000-2002 Inside Out Networks
* Copyright (C) 2001-2002 Greg Kroah-Hartman
*
* For questions or problems with this driver, contact Texas Instruments
* technical support, or Al Borchers <[email protected]>, or
* Peter Berger <[email protected]>.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/ioctl.h>
#include <linux/serial.h>
#include <linux/kfifo.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
/* Configuration ids */
#define TI_BOOT_CONFIG 1
#define TI_ACTIVE_CONFIG 2
/* Vendor and product ids */
#define TI_VENDOR_ID 0x0451
#define IBM_VENDOR_ID 0x04b3
#define STARTECH_VENDOR_ID 0x14b0
#define TI_3410_PRODUCT_ID 0x3410
#define IBM_4543_PRODUCT_ID 0x4543
#define IBM_454B_PRODUCT_ID 0x454b
#define IBM_454C_PRODUCT_ID 0x454c
#define TI_3410_EZ430_ID 0xF430 /* TI ez430 development tool */
#define TI_5052_BOOT_PRODUCT_ID 0x5052 /* no EEPROM, no firmware */
#define TI_5152_BOOT_PRODUCT_ID 0x5152 /* no EEPROM, no firmware */
#define TI_5052_EEPROM_PRODUCT_ID 0x505A /* EEPROM, no firmware */
#define TI_5052_FIRMWARE_PRODUCT_ID 0x505F /* firmware is running */
#define FRI2_PRODUCT_ID 0x5053 /* Fish River Island II */
/* Multi-Tech vendor and product ids */
#define MTS_VENDOR_ID 0x06E0
#define MTS_GSM_NO_FW_PRODUCT_ID 0xF108
#define MTS_CDMA_NO_FW_PRODUCT_ID 0xF109
#define MTS_CDMA_PRODUCT_ID 0xF110
#define MTS_GSM_PRODUCT_ID 0xF111
#define MTS_EDGE_PRODUCT_ID 0xF112
#define MTS_MT9234MU_PRODUCT_ID 0xF114
#define MTS_MT9234ZBA_PRODUCT_ID 0xF115
#define MTS_MT9234ZBAOLD_PRODUCT_ID 0x0319
/* Abbott Diabetics vendor and product ids */
#define ABBOTT_VENDOR_ID 0x1a61
#define ABBOTT_STEREO_PLUG_ID 0x3410
#define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID
#define ABBOTT_STRIP_PORT_ID 0x3420
/* Honeywell vendor and product IDs */
#define HONEYWELL_VENDOR_ID 0x10ac
#define HONEYWELL_HGI80_PRODUCT_ID 0x0102 /* Honeywell HGI80 */
/* Moxa UPORT 11x0 vendor and product IDs */
#define MXU1_VENDOR_ID 0x110a
#define MXU1_1110_PRODUCT_ID 0x1110
#define MXU1_1130_PRODUCT_ID 0x1130
#define MXU1_1150_PRODUCT_ID 0x1150
#define MXU1_1151_PRODUCT_ID 0x1151
#define MXU1_1131_PRODUCT_ID 0x1131
/* Commands */
#define TI_GET_VERSION 0x01
#define TI_GET_PORT_STATUS 0x02
#define TI_GET_PORT_DEV_INFO 0x03
#define TI_GET_CONFIG 0x04
#define TI_SET_CONFIG 0x05
#define TI_OPEN_PORT 0x06
#define TI_CLOSE_PORT 0x07
#define TI_START_PORT 0x08
#define TI_STOP_PORT 0x09
#define TI_TEST_PORT 0x0A
#define TI_PURGE_PORT 0x0B
#define TI_RESET_EXT_DEVICE 0x0C
#define TI_WRITE_DATA 0x80
#define TI_READ_DATA 0x81
#define TI_REQ_TYPE_CLASS 0x82
/* Module identifiers */
#define TI_I2C_PORT 0x01
#define TI_IEEE1284_PORT 0x02
#define TI_UART1_PORT 0x03
#define TI_UART2_PORT 0x04
#define TI_RAM_PORT 0x05
/* Modem status */
#define TI_MSR_DELTA_CTS 0x01
#define TI_MSR_DELTA_DSR 0x02
#define TI_MSR_DELTA_RI 0x04
#define TI_MSR_DELTA_CD 0x08
#define TI_MSR_CTS 0x10
#define TI_MSR_DSR 0x20
#define TI_MSR_RI 0x40
#define TI_MSR_CD 0x80
#define TI_MSR_DELTA_MASK 0x0F
#define TI_MSR_MASK 0xF0
/* Line status */
#define TI_LSR_OVERRUN_ERROR 0x01
#define TI_LSR_PARITY_ERROR 0x02
#define TI_LSR_FRAMING_ERROR 0x04
#define TI_LSR_BREAK 0x08
#define TI_LSR_ERROR 0x0F
#define TI_LSR_RX_FULL 0x10
#define TI_LSR_TX_EMPTY 0x20
#define TI_LSR_TX_EMPTY_BOTH 0x40
/* Line control */
#define TI_LCR_BREAK 0x40
/* Modem control */
#define TI_MCR_LOOP 0x04
#define TI_MCR_DTR 0x10
#define TI_MCR_RTS 0x20
/* Mask settings */
#define TI_UART_ENABLE_RTS_IN 0x0001
#define TI_UART_DISABLE_RTS 0x0002
#define TI_UART_ENABLE_PARITY_CHECKING 0x0008
#define TI_UART_ENABLE_DSR_OUT 0x0010
#define TI_UART_ENABLE_CTS_OUT 0x0020
#define TI_UART_ENABLE_X_OUT 0x0040
#define TI_UART_ENABLE_XA_OUT 0x0080
#define TI_UART_ENABLE_X_IN 0x0100
#define TI_UART_ENABLE_DTR_IN 0x0800
#define TI_UART_DISABLE_DTR 0x1000
#define TI_UART_ENABLE_MS_INTS 0x2000
#define TI_UART_ENABLE_AUTO_START_DMA 0x4000
/* Parity */
#define TI_UART_NO_PARITY 0x00
#define TI_UART_ODD_PARITY 0x01
#define TI_UART_EVEN_PARITY 0x02
#define TI_UART_MARK_PARITY 0x03
#define TI_UART_SPACE_PARITY 0x04
/* Stop bits */
#define TI_UART_1_STOP_BITS 0x00
#define TI_UART_1_5_STOP_BITS 0x01
#define TI_UART_2_STOP_BITS 0x02
/* Bits per character */
#define TI_UART_5_DATA_BITS 0x00
#define TI_UART_6_DATA_BITS 0x01
#define TI_UART_7_DATA_BITS 0x02
#define TI_UART_8_DATA_BITS 0x03
/* 232/485 modes */
#define TI_UART_232 0x00
#define TI_UART_485_RECEIVER_DISABLED 0x01
#define TI_UART_485_RECEIVER_ENABLED 0x02
/* Pipe transfer mode and timeout */
#define TI_PIPE_MODE_CONTINUOUS 0x01
#define TI_PIPE_MODE_MASK 0x03
#define TI_PIPE_TIMEOUT_MASK 0x7C
#define TI_PIPE_TIMEOUT_ENABLE 0x80
/* Config struct */
struct ti_uart_config {
__be16 wBaudRate;
__be16 wFlags;
u8 bDataBits;
u8 bParity;
u8 bStopBits;
char cXon;
char cXoff;
u8 bUartMode;
};
/* Get port status */
struct ti_port_status {
u8 bCmdCode;
u8 bModuleId;
u8 bErrorCode;
u8 bMSR;
u8 bLSR;
};
/* Purge modes */
#define TI_PURGE_OUTPUT 0x00
#define TI_PURGE_INPUT 0x80
/* Read/Write data */
#define TI_RW_DATA_ADDR_SFR 0x10
#define TI_RW_DATA_ADDR_IDATA 0x20
#define TI_RW_DATA_ADDR_XDATA 0x30
#define TI_RW_DATA_ADDR_CODE 0x40
#define TI_RW_DATA_ADDR_GPIO 0x50
#define TI_RW_DATA_ADDR_I2C 0x60
#define TI_RW_DATA_ADDR_FLASH 0x70
#define TI_RW_DATA_ADDR_DSP 0x80
#define TI_RW_DATA_UNSPECIFIED 0x00
#define TI_RW_DATA_BYTE 0x01
#define TI_RW_DATA_WORD 0x02
#define TI_RW_DATA_DOUBLE_WORD 0x04
struct ti_write_data_bytes {
u8 bAddrType;
u8 bDataType;
u8 bDataCounter;
__be16 wBaseAddrHi;
__be16 wBaseAddrLo;
u8 bData[];
} __packed;
struct ti_read_data_request {
u8 bAddrType;
u8 bDataType;
u8 bDataCounter;
__be16 wBaseAddrHi;
__be16 wBaseAddrLo;
} __packed;
struct ti_read_data_bytes {
u8 bCmdCode;
u8 bModuleId;
u8 bErrorCode;
u8 bData[];
};
/* Interrupt struct */
struct ti_interrupt {
u8 bICode;
u8 bIInfo;
};
/* Interrupt codes */
#define TI_CODE_HARDWARE_ERROR 0xFF
#define TI_CODE_DATA_ERROR 0x03
#define TI_CODE_MODEM_STATUS 0x04
/* Download firmware max packet size */
#define TI_DOWNLOAD_MAX_PACKET_SIZE 64
/* Firmware image header */
struct ti_firmware_header {
__le16 wLength;
u8 bCheckSum;
} __packed;
/* UART addresses */
#define TI_UART1_BASE_ADDR 0xFFA0 /* UART 1 base address */
#define TI_UART2_BASE_ADDR 0xFFB0 /* UART 2 base address */
#define TI_UART_OFFSET_LCR 0x0002 /* UART MCR register offset */
#define TI_UART_OFFSET_MCR 0x0004 /* UART MCR register offset */
#define TI_DRIVER_AUTHOR "Al Borchers <[email protected]>"
#define TI_DRIVER_DESC "TI USB 3410/5052 Serial Driver"
#define TI_FIRMWARE_BUF_SIZE 16284
#define TI_TRANSFER_TIMEOUT 2
/* read urb states */
#define TI_READ_URB_RUNNING 0
#define TI_READ_URB_STOPPING 1
#define TI_READ_URB_STOPPED 2
#define TI_EXTRA_VID_PID_COUNT 5
struct ti_port {
int tp_is_open;
u8 tp_msr;
u8 tp_shadow_mcr;
u8 tp_uart_mode; /* 232 or 485 modes */
unsigned int tp_uart_base_addr;
struct ti_device *tp_tdev;
struct usb_serial_port *tp_port;
spinlock_t tp_lock;
int tp_read_urb_state;
int tp_write_urb_in_use;
};
struct ti_device {
struct mutex td_open_close_lock;
int td_open_port_count;
struct usb_serial *td_serial;
int td_is_3410;
bool td_rs485_only;
};
static int ti_startup(struct usb_serial *serial);
static void ti_release(struct usb_serial *serial);
static int ti_port_probe(struct usb_serial_port *port);
static void ti_port_remove(struct usb_serial_port *port);
static int ti_open(struct tty_struct *tty, struct usb_serial_port *port);
static void ti_close(struct usb_serial_port *port);
static int ti_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *data, int count);
static unsigned int ti_write_room(struct tty_struct *tty);
static unsigned int ti_chars_in_buffer(struct tty_struct *tty);
static bool ti_tx_empty(struct usb_serial_port *port);
static void ti_throttle(struct tty_struct *tty);
static void ti_unthrottle(struct tty_struct *tty);
static void ti_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios);
static int ti_tiocmget(struct tty_struct *tty);
static int ti_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
static int ti_break(struct tty_struct *tty, int break_state);
static void ti_interrupt_callback(struct urb *urb);
static void ti_bulk_in_callback(struct urb *urb);
static void ti_bulk_out_callback(struct urb *urb);
static void ti_recv(struct usb_serial_port *port, unsigned char *data,
int length);
static void ti_send(struct ti_port *tport);
static int ti_set_mcr(struct ti_port *tport, unsigned int mcr);
static int ti_get_lsr(struct ti_port *tport, u8 *lsr);
static void ti_get_serial_info(struct tty_struct *tty, struct serial_struct *ss);
static void ti_handle_new_msr(struct ti_port *tport, u8 msr);
static void ti_stop_read(struct ti_port *tport, struct tty_struct *tty);
static int ti_restart_read(struct ti_port *tport, struct tty_struct *tty);
static int ti_command_out_sync(struct usb_device *udev, u8 command,
u16 moduleid, u16 value, void *data, int size);
static int ti_command_in_sync(struct usb_device *udev, u8 command,
u16 moduleid, u16 value, void *data, int size);
static int ti_port_cmd_out(struct usb_serial_port *port, u8 command,
u16 value, void *data, int size);
static int ti_port_cmd_in(struct usb_serial_port *port, u8 command,
u16 value, void *data, int size);
static int ti_write_byte(struct usb_serial_port *port, struct ti_device *tdev,
unsigned long addr, u8 mask, u8 byte);
static int ti_download_firmware(struct ti_device *tdev);
static const struct usb_device_id ti_id_table_3410[] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_NO_FW_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234MU_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBA_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBAOLD_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
{ USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) },
{ USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
{ USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) },
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1110_PRODUCT_ID) },
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1130_PRODUCT_ID) },
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
{ USB_DEVICE(STARTECH_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ } /* terminator */
};
static const struct usb_device_id ti_id_table_5052[] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
{ }
};
static const struct usb_device_id ti_id_table_combined[] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_NO_FW_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234MU_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBA_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBAOLD_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
{ USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
{ USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
{ USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) },
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1110_PRODUCT_ID) },
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1130_PRODUCT_ID) },
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
{ USB_DEVICE(STARTECH_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ } /* terminator */
};
static struct usb_serial_driver ti_1port_device = {
.driver = {
.owner = THIS_MODULE,
.name = "ti_usb_3410_5052_1",
},
.description = "TI USB 3410 1 port adapter",
.id_table = ti_id_table_3410,
.num_ports = 1,
.num_bulk_out = 1,
.attach = ti_startup,
.release = ti_release,
.port_probe = ti_port_probe,
.port_remove = ti_port_remove,
.open = ti_open,
.close = ti_close,
.write = ti_write,
.write_room = ti_write_room,
.chars_in_buffer = ti_chars_in_buffer,
.tx_empty = ti_tx_empty,
.throttle = ti_throttle,
.unthrottle = ti_unthrottle,
.get_serial = ti_get_serial_info,
.set_termios = ti_set_termios,
.tiocmget = ti_tiocmget,
.tiocmset = ti_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
.break_ctl = ti_break,
.read_int_callback = ti_interrupt_callback,
.read_bulk_callback = ti_bulk_in_callback,
.write_bulk_callback = ti_bulk_out_callback,
};
static struct usb_serial_driver ti_2port_device = {
.driver = {
.owner = THIS_MODULE,
.name = "ti_usb_3410_5052_2",
},
.description = "TI USB 5052 2 port adapter",
.id_table = ti_id_table_5052,
.num_ports = 2,
.num_bulk_out = 1,
.attach = ti_startup,
.release = ti_release,
.port_probe = ti_port_probe,
.port_remove = ti_port_remove,
.open = ti_open,
.close = ti_close,
.write = ti_write,
.write_room = ti_write_room,
.chars_in_buffer = ti_chars_in_buffer,
.tx_empty = ti_tx_empty,
.throttle = ti_throttle,
.unthrottle = ti_unthrottle,
.get_serial = ti_get_serial_info,
.set_termios = ti_set_termios,
.tiocmget = ti_tiocmget,
.tiocmset = ti_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
.break_ctl = ti_break,
.read_int_callback = ti_interrupt_callback,
.read_bulk_callback = ti_bulk_in_callback,
.write_bulk_callback = ti_bulk_out_callback,
};
static struct usb_serial_driver * const serial_drivers[] = {
&ti_1port_device, &ti_2port_device, NULL
};
MODULE_AUTHOR(TI_DRIVER_AUTHOR);
MODULE_DESCRIPTION(TI_DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("ti_3410.fw");
MODULE_FIRMWARE("ti_5052.fw");
MODULE_FIRMWARE("mts_cdma.fw");
MODULE_FIRMWARE("mts_gsm.fw");
MODULE_FIRMWARE("mts_edge.fw");
MODULE_FIRMWARE("mts_mt9234mu.fw");
MODULE_FIRMWARE("mts_mt9234zba.fw");
MODULE_FIRMWARE("moxa/moxa-1110.fw");
MODULE_FIRMWARE("moxa/moxa-1130.fw");
MODULE_FIRMWARE("moxa/moxa-1131.fw");
MODULE_FIRMWARE("moxa/moxa-1150.fw");
MODULE_FIRMWARE("moxa/moxa-1151.fw");
MODULE_DEVICE_TABLE(usb, ti_id_table_combined);
module_usb_serial_driver(serial_drivers, ti_id_table_combined);
static int ti_startup(struct usb_serial *serial)
{
struct ti_device *tdev;
struct usb_device *dev = serial->dev;
struct usb_host_interface *cur_altsetting;
int num_endpoints;
u16 vid, pid;
int status;
dev_dbg(&dev->dev,
"%s - product 0x%4X, num configurations %d, configuration value %d\n",
__func__, le16_to_cpu(dev->descriptor.idProduct),
dev->descriptor.bNumConfigurations,
dev->actconfig->desc.bConfigurationValue);
tdev = kzalloc(sizeof(struct ti_device), GFP_KERNEL);
if (!tdev)
return -ENOMEM;
mutex_init(&tdev->td_open_close_lock);
tdev->td_serial = serial;
usb_set_serial_data(serial, tdev);
/* determine device type */
if (serial->type == &ti_1port_device)
tdev->td_is_3410 = 1;
dev_dbg(&dev->dev, "%s - device type is %s\n", __func__,
tdev->td_is_3410 ? "3410" : "5052");
vid = le16_to_cpu(dev->descriptor.idVendor);
pid = le16_to_cpu(dev->descriptor.idProduct);
if (vid == MXU1_VENDOR_ID) {
switch (pid) {
case MXU1_1130_PRODUCT_ID:
case MXU1_1131_PRODUCT_ID:
tdev->td_rs485_only = true;
break;
}
}
cur_altsetting = serial->interface->cur_altsetting;
num_endpoints = cur_altsetting->desc.bNumEndpoints;
/* if we have only 1 configuration and 1 endpoint, download firmware */
if (dev->descriptor.bNumConfigurations == 1 && num_endpoints == 1) {
status = ti_download_firmware(tdev);
if (status != 0)
goto free_tdev;
/* 3410 must be reset, 5052 resets itself */
if (tdev->td_is_3410) {
msleep_interruptible(100);
usb_reset_device(dev);
}
status = -ENODEV;
goto free_tdev;
}
/* the second configuration must be set */
if (dev->actconfig->desc.bConfigurationValue == TI_BOOT_CONFIG) {
status = usb_driver_set_configuration(dev, TI_ACTIVE_CONFIG);
status = status ? status : -ENODEV;
goto free_tdev;
}
if (serial->num_bulk_in < serial->num_ports ||
serial->num_bulk_out < serial->num_ports) {
dev_err(&serial->interface->dev, "missing endpoints\n");
status = -ENODEV;
goto free_tdev;
}
return 0;
free_tdev:
kfree(tdev);
usb_set_serial_data(serial, NULL);
return status;
}
static void ti_release(struct usb_serial *serial)
{
struct ti_device *tdev = usb_get_serial_data(serial);
kfree(tdev);
}
static int ti_port_probe(struct usb_serial_port *port)
{
struct ti_port *tport;
tport = kzalloc(sizeof(*tport), GFP_KERNEL);
if (!tport)
return -ENOMEM;
spin_lock_init(&tport->tp_lock);
if (port == port->serial->port[0])
tport->tp_uart_base_addr = TI_UART1_BASE_ADDR;
else
tport->tp_uart_base_addr = TI_UART2_BASE_ADDR;
tport->tp_port = port;
tport->tp_tdev = usb_get_serial_data(port->serial);
if (tport->tp_tdev->td_rs485_only)
tport->tp_uart_mode = TI_UART_485_RECEIVER_DISABLED;
else
tport->tp_uart_mode = TI_UART_232;
usb_set_serial_port_data(port, tport);
/*
* The TUSB5052 LSR does not tell when the transmitter shift register
* has emptied so add a one-character drain delay.
*/
if (!tport->tp_tdev->td_is_3410)
port->port.drain_delay = 1;
return 0;
}
static void ti_port_remove(struct usb_serial_port *port)
{
struct ti_port *tport;
tport = usb_get_serial_port_data(port);
kfree(tport);
}
static int ti_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct ti_port *tport = usb_get_serial_port_data(port);
struct ti_device *tdev;
struct usb_device *dev;
struct urb *urb;
int status;
u16 open_settings;
open_settings = (TI_PIPE_MODE_CONTINUOUS |
TI_PIPE_TIMEOUT_ENABLE |
(TI_TRANSFER_TIMEOUT << 2));
dev = port->serial->dev;
tdev = tport->tp_tdev;
/* only one open on any port on a device at a time */
if (mutex_lock_interruptible(&tdev->td_open_close_lock))
return -ERESTARTSYS;
tport->tp_msr = 0;
tport->tp_shadow_mcr |= (TI_MCR_RTS | TI_MCR_DTR);
/* start interrupt urb the first time a port is opened on this device */
if (tdev->td_open_port_count == 0) {
dev_dbg(&port->dev, "%s - start interrupt in urb\n", __func__);
urb = tdev->td_serial->port[0]->interrupt_in_urb;
if (!urb) {
dev_err(&port->dev, "%s - no interrupt urb\n", __func__);
status = -EINVAL;
goto release_lock;
}
urb->context = tdev;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev, "%s - submit interrupt urb failed, %d\n", __func__, status);
goto release_lock;
}
}
if (tty)
ti_set_termios(tty, port, &tty->termios);
status = ti_port_cmd_out(port, TI_OPEN_PORT, open_settings, NULL, 0);
if (status) {
dev_err(&port->dev, "%s - cannot send open command, %d\n",
__func__, status);
goto unlink_int_urb;
}
status = ti_port_cmd_out(port, TI_START_PORT, 0, NULL, 0);
if (status) {
dev_err(&port->dev, "%s - cannot send start command, %d\n",
__func__, status);
goto unlink_int_urb;
}
status = ti_port_cmd_out(port, TI_PURGE_PORT, TI_PURGE_INPUT, NULL, 0);
if (status) {
dev_err(&port->dev, "%s - cannot clear input buffers, %d\n",
__func__, status);
goto unlink_int_urb;
}
status = ti_port_cmd_out(port, TI_PURGE_PORT, TI_PURGE_OUTPUT, NULL, 0);
if (status) {
dev_err(&port->dev, "%s - cannot clear output buffers, %d\n",
__func__, status);
goto unlink_int_urb;
}
/* reset the data toggle on the bulk endpoints to work around bug in
* host controllers where things get out of sync some times */
usb_clear_halt(dev, port->write_urb->pipe);
usb_clear_halt(dev, port->read_urb->pipe);
if (tty)
ti_set_termios(tty, port, &tty->termios);
status = ti_port_cmd_out(port, TI_OPEN_PORT, open_settings, NULL, 0);
if (status) {
dev_err(&port->dev, "%s - cannot send open command (2), %d\n",
__func__, status);
goto unlink_int_urb;
}
status = ti_port_cmd_out(port, TI_START_PORT, 0, NULL, 0);
if (status) {
dev_err(&port->dev, "%s - cannot send start command (2), %d\n",
__func__, status);
goto unlink_int_urb;
}
/* start read urb */
urb = port->read_urb;
if (!urb) {
dev_err(&port->dev, "%s - no read urb\n", __func__);
status = -EINVAL;
goto unlink_int_urb;
}
tport->tp_read_urb_state = TI_READ_URB_RUNNING;
urb->context = tport;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev, "%s - submit read urb failed, %d\n",
__func__, status);
goto unlink_int_urb;
}
tport->tp_is_open = 1;
++tdev->td_open_port_count;
goto release_lock;
unlink_int_urb:
if (tdev->td_open_port_count == 0)
usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
release_lock:
mutex_unlock(&tdev->td_open_close_lock);
return status;
}
static void ti_close(struct usb_serial_port *port)
{
struct ti_device *tdev;
struct ti_port *tport;
int status;
unsigned long flags;
tdev = usb_get_serial_data(port->serial);
tport = usb_get_serial_port_data(port);
tport->tp_is_open = 0;
usb_kill_urb(port->read_urb);
usb_kill_urb(port->write_urb);
tport->tp_write_urb_in_use = 0;
spin_lock_irqsave(&tport->tp_lock, flags);
kfifo_reset_out(&port->write_fifo);
spin_unlock_irqrestore(&tport->tp_lock, flags);
status = ti_port_cmd_out(port, TI_CLOSE_PORT, 0, NULL, 0);
if (status)
dev_err(&port->dev,
"%s - cannot send close port command, %d\n"
, __func__, status);
mutex_lock(&tdev->td_open_close_lock);
--tdev->td_open_port_count;
if (tdev->td_open_port_count == 0) {
/* last port is closed, shut down interrupt urb */
usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
}
mutex_unlock(&tdev->td_open_close_lock);
}
static int ti_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *data, int count)
{
struct ti_port *tport = usb_get_serial_port_data(port);
if (count == 0) {
return 0;
}
if (!tport->tp_is_open)
return -ENODEV;
count = kfifo_in_locked(&port->write_fifo, data, count,
&tport->tp_lock);
ti_send(tport);
return count;
}
static unsigned int ti_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct ti_port *tport = usb_get_serial_port_data(port);
unsigned int room;
unsigned long flags;
spin_lock_irqsave(&tport->tp_lock, flags);
room = kfifo_avail(&port->write_fifo);
spin_unlock_irqrestore(&tport->tp_lock, flags);
dev_dbg(&port->dev, "%s - returns %u\n", __func__, room);
return room;
}
static unsigned int ti_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct ti_port *tport = usb_get_serial_port_data(port);
unsigned int chars;
unsigned long flags;
spin_lock_irqsave(&tport->tp_lock, flags);
chars = kfifo_len(&port->write_fifo);
spin_unlock_irqrestore(&tport->tp_lock, flags);
dev_dbg(&port->dev, "%s - returns %u\n", __func__, chars);
return chars;
}
static bool ti_tx_empty(struct usb_serial_port *port)
{
struct ti_port *tport = usb_get_serial_port_data(port);
u8 lsr, mask;
int ret;
/*
* TUSB5052 does not have the TEMT bit to tell if the shift register
* is empty.
*/
if (tport->tp_tdev->td_is_3410)
mask = TI_LSR_TX_EMPTY_BOTH;
else
mask = TI_LSR_TX_EMPTY;
ret = ti_get_lsr(tport, &lsr);
if (!ret && !(lsr & mask))
return false;
return true;
}
static void ti_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct ti_port *tport = usb_get_serial_port_data(port);
if (I_IXOFF(tty) || C_CRTSCTS(tty))
ti_stop_read(tport, tty);
}
static void ti_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct ti_port *tport = usb_get_serial_port_data(port);
int status;
if (I_IXOFF(tty) || C_CRTSCTS(tty)) {
status = ti_restart_read(tport, tty);
if (status)
dev_err(&port->dev, "%s - cannot restart read, %d\n",
__func__, status);
}
}
static void ti_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct ti_port *tport = usb_get_serial_port_data(port);
struct ti_uart_config *config;
int baud;
int status;
unsigned int mcr;
u16 wbaudrate;
u16 wflags = 0;
config = kmalloc(sizeof(*config), GFP_KERNEL);
if (!config)
return;
/* these flags must be set */
wflags |= TI_UART_ENABLE_MS_INTS;
wflags |= TI_UART_ENABLE_AUTO_START_DMA;
config->bUartMode = tport->tp_uart_mode;
switch (C_CSIZE(tty)) {
case CS5:
config->bDataBits = TI_UART_5_DATA_BITS;
break;
case CS6:
config->bDataBits = TI_UART_6_DATA_BITS;
break;
case CS7:
config->bDataBits = TI_UART_7_DATA_BITS;
break;
default:
case CS8:
config->bDataBits = TI_UART_8_DATA_BITS;
break;
}
/* CMSPAR isn't supported by this driver */
tty->termios.c_cflag &= ~CMSPAR;
if (C_PARENB(tty)) {
if (C_PARODD(tty)) {
wflags |= TI_UART_ENABLE_PARITY_CHECKING;
config->bParity = TI_UART_ODD_PARITY;
} else {
wflags |= TI_UART_ENABLE_PARITY_CHECKING;
config->bParity = TI_UART_EVEN_PARITY;
}
} else {
wflags &= ~TI_UART_ENABLE_PARITY_CHECKING;
config->bParity = TI_UART_NO_PARITY;
}
if (C_CSTOPB(tty))
config->bStopBits = TI_UART_2_STOP_BITS;
else
config->bStopBits = TI_UART_1_STOP_BITS;
if (C_CRTSCTS(tty)) {
/* RTS flow control must be off to drop RTS for baud rate B0 */
if ((C_BAUD(tty)) != B0)
wflags |= TI_UART_ENABLE_RTS_IN;
wflags |= TI_UART_ENABLE_CTS_OUT;
} else {
ti_restart_read(tport, tty);
}
if (I_IXOFF(tty) || I_IXON(tty)) {
config->cXon = START_CHAR(tty);
config->cXoff = STOP_CHAR(tty);
if (I_IXOFF(tty))
wflags |= TI_UART_ENABLE_X_IN;
else
ti_restart_read(tport, tty);
if (I_IXON(tty))
wflags |= TI_UART_ENABLE_X_OUT;
}
baud = tty_get_baud_rate(tty);
if (!baud)
baud = 9600;
if (tport->tp_tdev->td_is_3410)
wbaudrate = (923077 + baud/2) / baud;
else
wbaudrate = (461538 + baud/2) / baud;
/* FIXME: Should calculate resulting baud here and report it back */
if ((C_BAUD(tty)) != B0)
tty_encode_baud_rate(tty, baud, baud);
dev_dbg(&port->dev,
"%s - BaudRate=%d, wBaudRate=%d, wFlags=0x%04X, bDataBits=%d, bParity=%d, bStopBits=%d, cXon=%d, cXoff=%d, bUartMode=%d\n",
__func__, baud, wbaudrate, wflags,
config->bDataBits, config->bParity, config->bStopBits,
config->cXon, config->cXoff, config->bUartMode);
config->wBaudRate = cpu_to_be16(wbaudrate);
config->wFlags = cpu_to_be16(wflags);
status = ti_port_cmd_out(port, TI_SET_CONFIG, 0, config,
sizeof(*config));
if (status)
dev_err(&port->dev, "%s - cannot set config on port %d, %d\n",
__func__, port->port_number, status);
/* SET_CONFIG asserts RTS and DTR, reset them correctly */
mcr = tport->tp_shadow_mcr;
/* if baud rate is B0, clear RTS and DTR */
if (C_BAUD(tty) == B0)
mcr &= ~(TI_MCR_DTR | TI_MCR_RTS);
status = ti_set_mcr(tport, mcr);
if (status)
dev_err(&port->dev, "%s - cannot set modem control on port %d, %d\n",
__func__, port->port_number, status);
kfree(config);
}
static int ti_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct ti_port *tport = usb_get_serial_port_data(port);
unsigned int result;
unsigned int msr;
unsigned int mcr;
unsigned long flags;
spin_lock_irqsave(&tport->tp_lock, flags);
msr = tport->tp_msr;
mcr = tport->tp_shadow_mcr;
spin_unlock_irqrestore(&tport->tp_lock, flags);
result = ((mcr & TI_MCR_DTR) ? TIOCM_DTR : 0)
| ((mcr & TI_MCR_RTS) ? TIOCM_RTS : 0)
| ((mcr & TI_MCR_LOOP) ? TIOCM_LOOP : 0)
| ((msr & TI_MSR_CTS) ? TIOCM_CTS : 0)
| ((msr & TI_MSR_CD) ? TIOCM_CAR : 0)
| ((msr & TI_MSR_RI) ? TIOCM_RI : 0)
| ((msr & TI_MSR_DSR) ? TIOCM_DSR : 0);
dev_dbg(&port->dev, "%s - 0x%04X\n", __func__, result);
return result;
}
static int ti_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct ti_port *tport = usb_get_serial_port_data(port);
unsigned int mcr;
unsigned long flags;
spin_lock_irqsave(&tport->tp_lock, flags);
mcr = tport->tp_shadow_mcr;
if (set & TIOCM_RTS)
mcr |= TI_MCR_RTS;
if (set & TIOCM_DTR)
mcr |= TI_MCR_DTR;
if (set & TIOCM_LOOP)
mcr |= TI_MCR_LOOP;
if (clear & TIOCM_RTS)
mcr &= ~TI_MCR_RTS;
if (clear & TIOCM_DTR)
mcr &= ~TI_MCR_DTR;
if (clear & TIOCM_LOOP)
mcr &= ~TI_MCR_LOOP;
spin_unlock_irqrestore(&tport->tp_lock, flags);
return ti_set_mcr(tport, mcr);
}
static int ti_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct ti_port *tport = usb_get_serial_port_data(port);
int status;
dev_dbg(&port->dev, "%s - state = %d\n", __func__, break_state);
status = ti_write_byte(port, tport->tp_tdev,
tport->tp_uart_base_addr + TI_UART_OFFSET_LCR,
TI_LCR_BREAK, break_state == -1 ? TI_LCR_BREAK : 0);
if (status) {
dev_dbg(&port->dev, "%s - error setting break, %d\n", __func__, status);
return status;
}
return 0;
}
static int ti_get_port_from_code(unsigned char code)
{
return (code >> 6) & 0x01;
}
static int ti_get_func_from_code(unsigned char code)
{
return code & 0x0f;
}
static void ti_interrupt_callback(struct urb *urb)
{
struct ti_device *tdev = urb->context;
struct usb_serial_port *port;
struct usb_serial *serial = tdev->td_serial;
struct ti_port *tport;
struct device *dev = &urb->dev->dev;
unsigned char *data = urb->transfer_buffer;
int length = urb->actual_length;
int port_number;
int function;
int status = urb->status;
int retval;
u8 msr;
switch (status) {
case 0:
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
dev_dbg(dev, "%s - urb shutting down, %d\n", __func__, status);
return;
default:
dev_err(dev, "%s - nonzero urb status, %d\n", __func__, status);
goto exit;
}
if (length != 2) {
dev_dbg(dev, "%s - bad packet size, %d\n", __func__, length);
goto exit;
}
if (data[0] == TI_CODE_HARDWARE_ERROR) {
dev_err(dev, "%s - hardware error, %d\n", __func__, data[1]);
goto exit;
}
port_number = ti_get_port_from_code(data[0]);
function = ti_get_func_from_code(data[0]);
dev_dbg(dev, "%s - port_number %d, function %d, data 0x%02X\n",
__func__, port_number, function, data[1]);
if (port_number >= serial->num_ports) {
dev_err(dev, "%s - bad port number, %d\n",
__func__, port_number);
goto exit;
}
port = serial->port[port_number];
tport = usb_get_serial_port_data(port);
if (!tport)
goto exit;
switch (function) {
case TI_CODE_DATA_ERROR:
dev_err(dev, "%s - DATA ERROR, port %d, data 0x%02X\n",
__func__, port_number, data[1]);
break;
case TI_CODE_MODEM_STATUS:
msr = data[1];
dev_dbg(dev, "%s - port %d, msr 0x%02X\n", __func__, port_number, msr);
ti_handle_new_msr(tport, msr);
break;
default:
dev_err(dev, "%s - unknown interrupt code, 0x%02X\n",
__func__, data[1]);
break;
}
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(dev, "%s - resubmit interrupt urb failed, %d\n",
__func__, retval);
}
static void ti_bulk_in_callback(struct urb *urb)
{
struct ti_port *tport = urb->context;
struct usb_serial_port *port = tport->tp_port;
struct device *dev = &urb->dev->dev;
int status = urb->status;
unsigned long flags;
int retval = 0;
switch (status) {
case 0:
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
dev_dbg(dev, "%s - urb shutting down, %d\n", __func__, status);
return;
default:
dev_err(dev, "%s - nonzero urb status, %d\n",
__func__, status);
}
if (status == -EPIPE)
goto exit;
if (status) {
dev_err(dev, "%s - stopping read!\n", __func__);
return;
}
if (urb->actual_length) {
usb_serial_debug_data(dev, __func__, urb->actual_length,
urb->transfer_buffer);
if (!tport->tp_is_open)
dev_dbg(dev, "%s - port closed, dropping data\n",
__func__);
else
ti_recv(port, urb->transfer_buffer, urb->actual_length);
spin_lock_irqsave(&tport->tp_lock, flags);
port->icount.rx += urb->actual_length;
spin_unlock_irqrestore(&tport->tp_lock, flags);
}
exit:
/* continue to read unless stopping */
spin_lock_irqsave(&tport->tp_lock, flags);
if (tport->tp_read_urb_state == TI_READ_URB_RUNNING)
retval = usb_submit_urb(urb, GFP_ATOMIC);
else if (tport->tp_read_urb_state == TI_READ_URB_STOPPING)
tport->tp_read_urb_state = TI_READ_URB_STOPPED;
spin_unlock_irqrestore(&tport->tp_lock, flags);
if (retval)
dev_err(dev, "%s - resubmit read urb failed, %d\n",
__func__, retval);
}
static void ti_bulk_out_callback(struct urb *urb)
{
struct ti_port *tport = urb->context;
struct usb_serial_port *port = tport->tp_port;
int status = urb->status;
tport->tp_write_urb_in_use = 0;
switch (status) {
case 0:
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
dev_dbg(&port->dev, "%s - urb shutting down, %d\n", __func__, status);
return;
default:
dev_err_console(port, "%s - nonzero urb status, %d\n",
__func__, status);
}
/* send any buffered data */
ti_send(tport);
}
static void ti_recv(struct usb_serial_port *port, unsigned char *data,
int length)
{
int cnt;
do {
cnt = tty_insert_flip_string(&port->port, data, length);
if (cnt < length) {
dev_err(&port->dev, "%s - dropping data, %d bytes lost\n",
__func__, length - cnt);
if (cnt == 0)
break;
}
tty_flip_buffer_push(&port->port);
data += cnt;
length -= cnt;
} while (length > 0);
}
static void ti_send(struct ti_port *tport)
{
int count, result;
struct usb_serial_port *port = tport->tp_port;
unsigned long flags;
spin_lock_irqsave(&tport->tp_lock, flags);
if (tport->tp_write_urb_in_use)
goto unlock;
count = kfifo_out(&port->write_fifo,
port->write_urb->transfer_buffer,
port->bulk_out_size);
if (count == 0)
goto unlock;
tport->tp_write_urb_in_use = 1;
spin_unlock_irqrestore(&tport->tp_lock, flags);
usb_serial_debug_data(&port->dev, __func__, count,
port->write_urb->transfer_buffer);
usb_fill_bulk_urb(port->write_urb, port->serial->dev,
usb_sndbulkpipe(port->serial->dev,
port->bulk_out_endpointAddress),
port->write_urb->transfer_buffer, count,
ti_bulk_out_callback, tport);
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (result) {
dev_err_console(port, "%s - submit write urb failed, %d\n",
__func__, result);
tport->tp_write_urb_in_use = 0;
/* TODO: reschedule ti_send */
} else {
spin_lock_irqsave(&tport->tp_lock, flags);
port->icount.tx += count;
spin_unlock_irqrestore(&tport->tp_lock, flags);
}
/* more room in the buffer for new writes, wakeup */
tty_port_tty_wakeup(&port->port);
return;
unlock:
spin_unlock_irqrestore(&tport->tp_lock, flags);
return;
}
static int ti_set_mcr(struct ti_port *tport, unsigned int mcr)
{
unsigned long flags;
int status;
status = ti_write_byte(tport->tp_port, tport->tp_tdev,
tport->tp_uart_base_addr + TI_UART_OFFSET_MCR,
TI_MCR_RTS | TI_MCR_DTR | TI_MCR_LOOP, mcr);
spin_lock_irqsave(&tport->tp_lock, flags);
if (!status)
tport->tp_shadow_mcr = mcr;
spin_unlock_irqrestore(&tport->tp_lock, flags);
return status;
}
static int ti_get_lsr(struct ti_port *tport, u8 *lsr)
{
int size, status;
struct usb_serial_port *port = tport->tp_port;
struct ti_port_status *data;
size = sizeof(struct ti_port_status);
data = kmalloc(size, GFP_KERNEL);
if (!data)
return -ENOMEM;
status = ti_port_cmd_in(port, TI_GET_PORT_STATUS, 0, data, size);
if (status) {
dev_err(&port->dev,
"%s - get port status command failed, %d\n",
__func__, status);
goto free_data;
}
dev_dbg(&port->dev, "%s - lsr 0x%02X\n", __func__, data->bLSR);
*lsr = data->bLSR;
free_data:
kfree(data);
return status;
}
static void ti_get_serial_info(struct tty_struct *tty, struct serial_struct *ss)
{
struct usb_serial_port *port = tty->driver_data;
struct ti_port *tport = usb_get_serial_port_data(port);
ss->baud_base = tport->tp_tdev->td_is_3410 ? 921600 : 460800;
}
static void ti_handle_new_msr(struct ti_port *tport, u8 msr)
{
struct async_icount *icount;
struct tty_struct *tty;
unsigned long flags;
dev_dbg(&tport->tp_port->dev, "%s - msr 0x%02X\n", __func__, msr);
if (msr & TI_MSR_DELTA_MASK) {
spin_lock_irqsave(&tport->tp_lock, flags);
icount = &tport->tp_port->icount;
if (msr & TI_MSR_DELTA_CTS)
icount->cts++;
if (msr & TI_MSR_DELTA_DSR)
icount->dsr++;
if (msr & TI_MSR_DELTA_CD)
icount->dcd++;
if (msr & TI_MSR_DELTA_RI)
icount->rng++;
wake_up_interruptible(&tport->tp_port->port.delta_msr_wait);
spin_unlock_irqrestore(&tport->tp_lock, flags);
}
tport->tp_msr = msr & TI_MSR_MASK;
/* handle CTS flow control */
tty = tty_port_tty_get(&tport->tp_port->port);
if (tty && C_CRTSCTS(tty)) {
if (msr & TI_MSR_CTS)
tty_wakeup(tty);
}
tty_kref_put(tty);
}
static void ti_stop_read(struct ti_port *tport, struct tty_struct *tty)
{
unsigned long flags;
spin_lock_irqsave(&tport->tp_lock, flags);
if (tport->tp_read_urb_state == TI_READ_URB_RUNNING)
tport->tp_read_urb_state = TI_READ_URB_STOPPING;
spin_unlock_irqrestore(&tport->tp_lock, flags);
}
static int ti_restart_read(struct ti_port *tport, struct tty_struct *tty)
{
struct urb *urb;
int status = 0;
unsigned long flags;
spin_lock_irqsave(&tport->tp_lock, flags);
if (tport->tp_read_urb_state == TI_READ_URB_STOPPED) {
tport->tp_read_urb_state = TI_READ_URB_RUNNING;
urb = tport->tp_port->read_urb;
spin_unlock_irqrestore(&tport->tp_lock, flags);
urb->context = tport;
status = usb_submit_urb(urb, GFP_KERNEL);
} else {
tport->tp_read_urb_state = TI_READ_URB_RUNNING;
spin_unlock_irqrestore(&tport->tp_lock, flags);
}
return status;
}
static int ti_command_out_sync(struct usb_device *udev, u8 command,
u16 moduleid, u16 value, void *data, int size)
{
int status;
status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), command,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
value, moduleid, data, size, 1000);
if (status < 0)
return status;
return 0;
}
static int ti_command_in_sync(struct usb_device *udev, u8 command,
u16 moduleid, u16 value, void *data, int size)
{
int status;
status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), command,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
value, moduleid, data, size, 1000);
if (status == size)
status = 0;
else if (status >= 0)
status = -ECOMM;
return status;
}
static int ti_port_cmd_out(struct usb_serial_port *port, u8 command,
u16 value, void *data, int size)
{
return ti_command_out_sync(port->serial->dev, command,
TI_UART1_PORT + port->port_number,
value, data, size);
}
static int ti_port_cmd_in(struct usb_serial_port *port, u8 command,
u16 value, void *data, int size)
{
return ti_command_in_sync(port->serial->dev, command,
TI_UART1_PORT + port->port_number,
value, data, size);
}
static int ti_write_byte(struct usb_serial_port *port,
struct ti_device *tdev, unsigned long addr,
u8 mask, u8 byte)
{
int status;
unsigned int size;
struct ti_write_data_bytes *data;
dev_dbg(&port->dev, "%s - addr 0x%08lX, mask 0x%02X, byte 0x%02X\n", __func__,
addr, mask, byte);
size = sizeof(struct ti_write_data_bytes) + 2;
data = kmalloc(size, GFP_KERNEL);
if (!data)
return -ENOMEM;
data->bAddrType = TI_RW_DATA_ADDR_XDATA;
data->bDataType = TI_RW_DATA_BYTE;
data->bDataCounter = 1;
data->wBaseAddrHi = cpu_to_be16(addr>>16);
data->wBaseAddrLo = cpu_to_be16(addr);
data->bData[0] = mask;
data->bData[1] = byte;
status = ti_command_out_sync(port->serial->dev, TI_WRITE_DATA,
TI_RAM_PORT, 0, data, size);
if (status < 0)
dev_err(&port->dev, "%s - failed, %d\n", __func__, status);
kfree(data);
return status;
}
static int ti_do_download(struct usb_device *dev, int pipe,
u8 *buffer, int size)
{
int pos;
u8 cs = 0;
int done;
struct ti_firmware_header *header;
int status = 0;
int len;
for (pos = sizeof(struct ti_firmware_header); pos < size; pos++)
cs = (u8)(cs + buffer[pos]);
header = (struct ti_firmware_header *)buffer;
header->wLength = cpu_to_le16(size - sizeof(*header));
header->bCheckSum = cs;
dev_dbg(&dev->dev, "%s - downloading firmware\n", __func__);
for (pos = 0; pos < size; pos += done) {
len = min(size - pos, TI_DOWNLOAD_MAX_PACKET_SIZE);
status = usb_bulk_msg(dev, pipe, buffer + pos, len,
&done, 1000);
if (status)
break;
}
return status;
}
static int ti_download_firmware(struct ti_device *tdev)
{
int status;
int buffer_size;
u8 *buffer;
struct usb_device *dev = tdev->td_serial->dev;
unsigned int pipe = usb_sndbulkpipe(dev,
tdev->td_serial->port[0]->bulk_out_endpointAddress);
const struct firmware *fw_p;
char buf[32];
if (le16_to_cpu(dev->descriptor.idVendor) == MXU1_VENDOR_ID) {
snprintf(buf,
sizeof(buf),
"moxa/moxa-%04x.fw",
le16_to_cpu(dev->descriptor.idProduct));
status = request_firmware(&fw_p, buf, &dev->dev);
goto check_firmware;
}
/* try ID specific firmware first, then try generic firmware */
sprintf(buf, "ti_usb-v%04x-p%04x.fw",
le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct));
status = request_firmware(&fw_p, buf, &dev->dev);
if (status != 0) {
buf[0] = '\0';
if (le16_to_cpu(dev->descriptor.idVendor) == MTS_VENDOR_ID) {
switch (le16_to_cpu(dev->descriptor.idProduct)) {
case MTS_CDMA_PRODUCT_ID:
strcpy(buf, "mts_cdma.fw");
break;
case MTS_GSM_PRODUCT_ID:
strcpy(buf, "mts_gsm.fw");
break;
case MTS_EDGE_PRODUCT_ID:
strcpy(buf, "mts_edge.fw");
break;
case MTS_MT9234MU_PRODUCT_ID:
strcpy(buf, "mts_mt9234mu.fw");
break;
case MTS_MT9234ZBA_PRODUCT_ID:
strcpy(buf, "mts_mt9234zba.fw");
break;
case MTS_MT9234ZBAOLD_PRODUCT_ID:
strcpy(buf, "mts_mt9234zba.fw");
break; }
}
if (buf[0] == '\0') {
if (tdev->td_is_3410)
strcpy(buf, "ti_3410.fw");
else
strcpy(buf, "ti_5052.fw");
}
status = request_firmware(&fw_p, buf, &dev->dev);
}
check_firmware:
if (status) {
dev_err(&dev->dev, "%s - firmware not found\n", __func__);
return -ENOENT;
}
if (fw_p->size > TI_FIRMWARE_BUF_SIZE) {
dev_err(&dev->dev, "%s - firmware too large %zu\n", __func__, fw_p->size);
release_firmware(fw_p);
return -ENOENT;
}
buffer_size = TI_FIRMWARE_BUF_SIZE + sizeof(struct ti_firmware_header);
buffer = kmalloc(buffer_size, GFP_KERNEL);
if (buffer) {
memcpy(buffer, fw_p->data, fw_p->size);
memset(buffer + fw_p->size, 0xff, buffer_size - fw_p->size);
status = ti_do_download(dev, pipe, buffer, fw_p->size);
kfree(buffer);
} else {
status = -ENOMEM;
}
release_firmware(fw_p);
if (status) {
dev_err(&dev->dev, "%s - error downloading firmware, %d\n",
__func__, status);
return status;
}
dev_dbg(&dev->dev, "%s - download successful\n", __func__);
return 0;
}
| linux-master | drivers/usb/serial/ti_usb_3410_5052.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Infinity Unlimited USB Phoenix driver
*
* Copyright (C) 2010 James Courtier-Dutton ([email protected])
* Copyright (C) 2007 Alain Degreffe ([email protected])
*
* Original code taken from iuutool (Copyright (C) 2006 Juan Carlos Borrás)
*
* And tested with help of WB Electronics
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include "iuu_phoenix.h"
#include <linux/random.h>
#define DRIVER_DESC "Infinity USB Unlimited Phoenix driver"
static const struct usb_device_id id_table[] = {
{USB_DEVICE(IUU_USB_VENDOR_ID, IUU_USB_PRODUCT_ID)},
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
/* turbo parameter */
static int boost = 100;
static int clockmode = 1;
static int cdmode = 1;
static int iuu_cardin;
static int iuu_cardout;
static bool xmas;
static int vcc_default = 5;
static int iuu_create_sysfs_attrs(struct usb_serial_port *port);
static int iuu_remove_sysfs_attrs(struct usb_serial_port *port);
static void read_rxcmd_callback(struct urb *urb);
struct iuu_private {
spinlock_t lock; /* store irq state */
u8 line_status;
int tiostatus; /* store IUART SIGNAL for tiocmget call */
u8 reset; /* if 1 reset is needed */
int poll; /* number of poll */
u8 *writebuf; /* buffer for writing to device */
int writelen; /* num of byte to write to device */
u8 *buf; /* used for initialize speed */
u8 len;
int vcc; /* vcc (either 3 or 5 V) */
u32 boost;
u32 clk;
};
static int iuu_port_probe(struct usb_serial_port *port)
{
struct iuu_private *priv;
int ret;
priv = kzalloc(sizeof(struct iuu_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->buf = kzalloc(256, GFP_KERNEL);
if (!priv->buf) {
kfree(priv);
return -ENOMEM;
}
priv->writebuf = kzalloc(256, GFP_KERNEL);
if (!priv->writebuf) {
kfree(priv->buf);
kfree(priv);
return -ENOMEM;
}
priv->vcc = vcc_default;
spin_lock_init(&priv->lock);
usb_set_serial_port_data(port, priv);
ret = iuu_create_sysfs_attrs(port);
if (ret) {
kfree(priv->writebuf);
kfree(priv->buf);
kfree(priv);
return ret;
}
return 0;
}
static void iuu_port_remove(struct usb_serial_port *port)
{
struct iuu_private *priv = usb_get_serial_port_data(port);
iuu_remove_sysfs_attrs(port);
kfree(priv->writebuf);
kfree(priv->buf);
kfree(priv);
}
static int iuu_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct iuu_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
/* FIXME: locking on tiomstatus */
dev_dbg(&port->dev, "%s msg : SET = 0x%04x, CLEAR = 0x%04x\n",
__func__, set, clear);
spin_lock_irqsave(&priv->lock, flags);
if ((set & TIOCM_RTS) && !(priv->tiostatus == TIOCM_RTS)) {
dev_dbg(&port->dev, "%s TIOCMSET RESET called !!!\n", __func__);
priv->reset = 1;
}
if (set & TIOCM_RTS)
priv->tiostatus = TIOCM_RTS;
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
/* This is used to provide a carrier detect mechanism
* When a card is present, the response is 0x00
* When no card , the reader respond with TIOCM_CD
* This is known as CD autodetect mechanism
*/
static int iuu_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct iuu_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
int rc;
spin_lock_irqsave(&priv->lock, flags);
rc = priv->tiostatus;
spin_unlock_irqrestore(&priv->lock, flags);
return rc;
}
static void iuu_rxcmd(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
int status = urb->status;
if (status) {
dev_dbg(&port->dev, "%s - status = %d\n", __func__, status);
/* error stop all */
return;
}
memset(port->write_urb->transfer_buffer, IUU_UART_RX, 1);
usb_fill_bulk_urb(port->write_urb, port->serial->dev,
usb_sndbulkpipe(port->serial->dev,
port->bulk_out_endpointAddress),
port->write_urb->transfer_buffer, 1,
read_rxcmd_callback, port);
usb_submit_urb(port->write_urb, GFP_ATOMIC);
}
static int iuu_reset(struct usb_serial_port *port, u8 wt)
{
struct iuu_private *priv = usb_get_serial_port_data(port);
int result;
char *buf_ptr = port->write_urb->transfer_buffer;
/* Prepare the reset sequence */
*buf_ptr++ = IUU_RST_SET;
*buf_ptr++ = IUU_DELAY_MS;
*buf_ptr++ = wt;
*buf_ptr = IUU_RST_CLEAR;
/* send the sequence */
usb_fill_bulk_urb(port->write_urb,
port->serial->dev,
usb_sndbulkpipe(port->serial->dev,
port->bulk_out_endpointAddress),
port->write_urb->transfer_buffer, 4, iuu_rxcmd, port);
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
priv->reset = 0;
return result;
}
/* Status Function
* Return value is
* 0x00 = no card
* 0x01 = smartcard
* 0x02 = sim card
*/
static void iuu_update_status_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct iuu_private *priv = usb_get_serial_port_data(port);
u8 *st;
int status = urb->status;
if (status) {
dev_dbg(&port->dev, "%s - status = %d\n", __func__, status);
/* error stop all */
return;
}
st = urb->transfer_buffer;
dev_dbg(&port->dev, "%s - enter\n", __func__);
if (urb->actual_length == 1) {
switch (st[0]) {
case 0x1:
priv->tiostatus = iuu_cardout;
break;
case 0x0:
priv->tiostatus = iuu_cardin;
break;
default:
priv->tiostatus = iuu_cardin;
}
}
iuu_rxcmd(urb);
}
static void iuu_status_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
int status = urb->status;
dev_dbg(&port->dev, "%s - status = %d\n", __func__, status);
usb_fill_bulk_urb(port->read_urb, port->serial->dev,
usb_rcvbulkpipe(port->serial->dev,
port->bulk_in_endpointAddress),
port->read_urb->transfer_buffer, 256,
iuu_update_status_callback, port);
usb_submit_urb(port->read_urb, GFP_ATOMIC);
}
static int iuu_status(struct usb_serial_port *port)
{
int result;
memset(port->write_urb->transfer_buffer, IUU_GET_STATE_REGISTER, 1);
usb_fill_bulk_urb(port->write_urb, port->serial->dev,
usb_sndbulkpipe(port->serial->dev,
port->bulk_out_endpointAddress),
port->write_urb->transfer_buffer, 1,
iuu_status_callback, port);
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
return result;
}
static int bulk_immediate(struct usb_serial_port *port, u8 *buf, u8 count)
{
int status;
struct usb_serial *serial = port->serial;
int actual = 0;
/* send the data out the bulk port */
status =
usb_bulk_msg(serial->dev,
usb_sndbulkpipe(serial->dev,
port->bulk_out_endpointAddress), buf,
count, &actual, 1000);
if (status != IUU_OPERATION_OK)
dev_dbg(&port->dev, "%s - error = %2x\n", __func__, status);
else
dev_dbg(&port->dev, "%s - write OK !\n", __func__);
return status;
}
static int read_immediate(struct usb_serial_port *port, u8 *buf, u8 count)
{
int status;
struct usb_serial *serial = port->serial;
int actual = 0;
/* send the data out the bulk port */
status =
usb_bulk_msg(serial->dev,
usb_rcvbulkpipe(serial->dev,
port->bulk_in_endpointAddress), buf,
count, &actual, 1000);
if (status != IUU_OPERATION_OK)
dev_dbg(&port->dev, "%s - error = %2x\n", __func__, status);
else
dev_dbg(&port->dev, "%s - read OK !\n", __func__);
return status;
}
static int iuu_led(struct usb_serial_port *port, unsigned int R,
unsigned int G, unsigned int B, u8 f)
{
int status;
u8 *buf;
buf = kmalloc(8, GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf[0] = IUU_SET_LED;
buf[1] = R & 0xFF;
buf[2] = (R >> 8) & 0xFF;
buf[3] = G & 0xFF;
buf[4] = (G >> 8) & 0xFF;
buf[5] = B & 0xFF;
buf[6] = (B >> 8) & 0xFF;
buf[7] = f;
status = bulk_immediate(port, buf, 8);
kfree(buf);
if (status != IUU_OPERATION_OK)
dev_dbg(&port->dev, "%s - led error status = %2x\n", __func__, status);
else
dev_dbg(&port->dev, "%s - led OK !\n", __func__);
return IUU_OPERATION_OK;
}
static void iuu_rgbf_fill_buffer(u8 *buf, u8 r1, u8 r2, u8 g1, u8 g2, u8 b1,
u8 b2, u8 freq)
{
*buf++ = IUU_SET_LED;
*buf++ = r1;
*buf++ = r2;
*buf++ = g1;
*buf++ = g2;
*buf++ = b1;
*buf++ = b2;
*buf = freq;
}
static void iuu_led_activity_on(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
char *buf_ptr = port->write_urb->transfer_buffer;
if (xmas) {
buf_ptr[0] = IUU_SET_LED;
get_random_bytes(buf_ptr + 1, 6);
buf_ptr[7] = 1;
} else {
iuu_rgbf_fill_buffer(buf_ptr, 255, 255, 0, 0, 0, 0, 255);
}
usb_fill_bulk_urb(port->write_urb, port->serial->dev,
usb_sndbulkpipe(port->serial->dev,
port->bulk_out_endpointAddress),
port->write_urb->transfer_buffer, 8 ,
iuu_rxcmd, port);
usb_submit_urb(port->write_urb, GFP_ATOMIC);
}
static void iuu_led_activity_off(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
char *buf_ptr = port->write_urb->transfer_buffer;
if (xmas) {
iuu_rxcmd(urb);
return;
}
iuu_rgbf_fill_buffer(buf_ptr, 0, 0, 255, 255, 0, 0, 255);
usb_fill_bulk_urb(port->write_urb, port->serial->dev,
usb_sndbulkpipe(port->serial->dev,
port->bulk_out_endpointAddress),
port->write_urb->transfer_buffer, 8 ,
iuu_rxcmd, port);
usb_submit_urb(port->write_urb, GFP_ATOMIC);
}
static int iuu_clk(struct usb_serial_port *port, int dwFrq)
{
int status;
struct iuu_private *priv = usb_get_serial_port_data(port);
int Count = 0;
u8 FrqGenAdr = 0x69;
u8 DIV = 0; /* 8bit */
u8 XDRV = 0; /* 8bit */
u8 PUMP = 0; /* 3bit */
u8 PBmsb = 0; /* 2bit */
u8 PBlsb = 0; /* 8bit */
u8 PO = 0; /* 1bit */
u8 Q = 0; /* 7bit */
/* 24bit = 3bytes */
unsigned int P = 0;
unsigned int P2 = 0;
int frq = (int)dwFrq;
if (frq == 0) {
priv->buf[Count++] = IUU_UART_WRITE_I2C;
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x09;
priv->buf[Count++] = 0x00;
status = bulk_immediate(port, (u8 *) priv->buf, Count);
if (status != 0) {
dev_dbg(&port->dev, "%s - write error\n", __func__);
return status;
}
} else if (frq == 3579000) {
DIV = 100;
P = 1193;
Q = 40;
XDRV = 0;
} else if (frq == 3680000) {
DIV = 105;
P = 161;
Q = 5;
XDRV = 0;
} else if (frq == 6000000) {
DIV = 66;
P = 66;
Q = 2;
XDRV = 0x28;
} else {
unsigned int result = 0;
unsigned int tmp = 0;
unsigned int check;
unsigned int check2;
char found = 0x00;
unsigned int lQ = 2;
unsigned int lP = 2055;
unsigned int lDiv = 4;
for (lQ = 2; lQ <= 47 && !found; lQ++)
for (lP = 2055; lP >= 8 && !found; lP--)
for (lDiv = 4; lDiv <= 127 && !found; lDiv++) {
tmp = (12000000 / lDiv) * (lP / lQ);
if (abs((int)(tmp - frq)) <
abs((int)(frq - result))) {
check2 = (12000000 / lQ);
if (check2 < 250000)
continue;
check = (12000000 / lQ) * lP;
if (check > 400000000)
continue;
if (check < 100000000)
continue;
if (lDiv < 4 || lDiv > 127)
continue;
result = tmp;
P = lP;
DIV = lDiv;
Q = lQ;
if (result == frq)
found = 0x01;
}
}
}
P2 = ((P - PO) / 2) - 4;
PUMP = 0x04;
PBmsb = (P2 >> 8 & 0x03);
PBlsb = P2 & 0xFF;
PO = (P >> 10) & 0x01;
Q = Q - 2;
priv->buf[Count++] = IUU_UART_WRITE_I2C; /* 0x4C */
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x09;
priv->buf[Count++] = 0x20; /* Adr = 0x09 */
priv->buf[Count++] = IUU_UART_WRITE_I2C; /* 0x4C */
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x0C;
priv->buf[Count++] = DIV; /* Adr = 0x0C */
priv->buf[Count++] = IUU_UART_WRITE_I2C; /* 0x4C */
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x12;
priv->buf[Count++] = XDRV; /* Adr = 0x12 */
priv->buf[Count++] = IUU_UART_WRITE_I2C; /* 0x4C */
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x13;
priv->buf[Count++] = 0x6B; /* Adr = 0x13 */
priv->buf[Count++] = IUU_UART_WRITE_I2C; /* 0x4C */
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x40;
priv->buf[Count++] = (0xC0 | ((PUMP & 0x07) << 2)) |
(PBmsb & 0x03); /* Adr = 0x40 */
priv->buf[Count++] = IUU_UART_WRITE_I2C; /* 0x4C */
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x41;
priv->buf[Count++] = PBlsb; /* Adr = 0x41 */
priv->buf[Count++] = IUU_UART_WRITE_I2C; /* 0x4C */
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x42;
priv->buf[Count++] = Q | (((PO & 0x01) << 7)); /* Adr = 0x42 */
priv->buf[Count++] = IUU_UART_WRITE_I2C; /* 0x4C */
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x44;
priv->buf[Count++] = (char)0xFF; /* Adr = 0x44 */
priv->buf[Count++] = IUU_UART_WRITE_I2C; /* 0x4C */
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x45;
priv->buf[Count++] = (char)0xFE; /* Adr = 0x45 */
priv->buf[Count++] = IUU_UART_WRITE_I2C; /* 0x4C */
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x46;
priv->buf[Count++] = 0x7F; /* Adr = 0x46 */
priv->buf[Count++] = IUU_UART_WRITE_I2C; /* 0x4C */
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x47;
priv->buf[Count++] = (char)0x84; /* Adr = 0x47 */
status = bulk_immediate(port, (u8 *) priv->buf, Count);
if (status != IUU_OPERATION_OK)
dev_dbg(&port->dev, "%s - write error\n", __func__);
return status;
}
static int iuu_uart_flush(struct usb_serial_port *port)
{
struct device *dev = &port->dev;
int i;
int status;
u8 *rxcmd;
struct iuu_private *priv = usb_get_serial_port_data(port);
if (iuu_led(port, 0xF000, 0, 0, 0xFF) < 0)
return -EIO;
rxcmd = kmalloc(1, GFP_KERNEL);
if (!rxcmd)
return -ENOMEM;
rxcmd[0] = IUU_UART_RX;
for (i = 0; i < 2; i++) {
status = bulk_immediate(port, rxcmd, 1);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_write error\n", __func__);
goto out_free;
}
status = read_immediate(port, &priv->len, 1);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
goto out_free;
}
if (priv->len > 0) {
dev_dbg(dev, "%s - uart_flush datalen is : %i\n", __func__, priv->len);
status = read_immediate(port, priv->buf, priv->len);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
goto out_free;
}
}
}
dev_dbg(dev, "%s - uart_flush_read OK!\n", __func__);
iuu_led(port, 0, 0xF000, 0, 0xFF);
out_free:
kfree(rxcmd);
return status;
}
static void read_buf_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
if (status) {
if (status == -EPROTO) {
/* reschedule needed */
}
return;
}
dev_dbg(&port->dev, "%s - %i chars to write\n", __func__, urb->actual_length);
if (urb->actual_length) {
tty_insert_flip_string(&port->port, data, urb->actual_length);
tty_flip_buffer_push(&port->port);
}
iuu_led_activity_on(urb);
}
static int iuu_bulk_write(struct usb_serial_port *port)
{
struct iuu_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
int result;
int buf_len;
char *buf_ptr = port->write_urb->transfer_buffer;
spin_lock_irqsave(&priv->lock, flags);
*buf_ptr++ = IUU_UART_ESC;
*buf_ptr++ = IUU_UART_TX;
*buf_ptr++ = priv->writelen;
memcpy(buf_ptr, priv->writebuf, priv->writelen);
buf_len = priv->writelen;
priv->writelen = 0;
spin_unlock_irqrestore(&priv->lock, flags);
dev_dbg(&port->dev, "%s - writing %i chars : %*ph\n", __func__,
buf_len, buf_len, buf_ptr);
usb_fill_bulk_urb(port->write_urb, port->serial->dev,
usb_sndbulkpipe(port->serial->dev,
port->bulk_out_endpointAddress),
port->write_urb->transfer_buffer, buf_len + 3,
iuu_rxcmd, port);
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
usb_serial_port_softint(port);
return result;
}
static int iuu_read_buf(struct usb_serial_port *port, int len)
{
int result;
usb_fill_bulk_urb(port->read_urb, port->serial->dev,
usb_rcvbulkpipe(port->serial->dev,
port->bulk_in_endpointAddress),
port->read_urb->transfer_buffer, len,
read_buf_callback, port);
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
return result;
}
static void iuu_uart_read_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct iuu_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
int status = urb->status;
int len = 0;
unsigned char *data = urb->transfer_buffer;
priv->poll++;
if (status) {
dev_dbg(&port->dev, "%s - status = %d\n", __func__, status);
/* error stop all */
return;
}
if (urb->actual_length == 1)
len = (int) data[0];
if (urb->actual_length > 1) {
dev_dbg(&port->dev, "%s - urb->actual_length = %i\n", __func__,
urb->actual_length);
return;
}
/* if len > 0 call readbuf */
if (len > 0) {
dev_dbg(&port->dev, "%s - call read buf - len to read is %i\n",
__func__, len);
status = iuu_read_buf(port, len);
return;
}
/* need to update status ? */
if (priv->poll > 99) {
status = iuu_status(port);
priv->poll = 0;
return;
}
/* reset waiting ? */
if (priv->reset == 1) {
status = iuu_reset(port, 0xC);
return;
}
/* Writebuf is waiting */
spin_lock_irqsave(&priv->lock, flags);
if (priv->writelen > 0) {
spin_unlock_irqrestore(&priv->lock, flags);
status = iuu_bulk_write(port);
return;
}
spin_unlock_irqrestore(&priv->lock, flags);
/* if nothing to write call again rxcmd */
dev_dbg(&port->dev, "%s - rxcmd recall\n", __func__);
iuu_led_activity_off(urb);
}
static int iuu_uart_write(struct tty_struct *tty, struct usb_serial_port *port,
const u8 *buf, int count)
{
struct iuu_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
count = min(count, 256 - priv->writelen);
if (count == 0)
goto out;
/* fill the buffer */
memcpy(priv->writebuf + priv->writelen, buf, count);
priv->writelen += count;
out:
spin_unlock_irqrestore(&priv->lock, flags);
return count;
}
static void read_rxcmd_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
int result;
int status = urb->status;
if (status) {
/* error stop all */
return;
}
usb_fill_bulk_urb(port->read_urb, port->serial->dev,
usb_rcvbulkpipe(port->serial->dev,
port->bulk_in_endpointAddress),
port->read_urb->transfer_buffer, 256,
iuu_uart_read_callback, port);
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
dev_dbg(&port->dev, "%s - submit result = %d\n", __func__, result);
}
static int iuu_uart_on(struct usb_serial_port *port)
{
int status;
u8 *buf;
buf = kmalloc(4, GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf[0] = IUU_UART_ENABLE;
buf[1] = (u8) ((IUU_BAUD_9600 >> 8) & 0x00FF);
buf[2] = (u8) (0x00FF & IUU_BAUD_9600);
buf[3] = (u8) (0x0F0 & IUU_ONE_STOP_BIT) | (0x07 & IUU_PARITY_EVEN);
status = bulk_immediate(port, buf, 4);
if (status != IUU_OPERATION_OK) {
dev_dbg(&port->dev, "%s - uart_on error\n", __func__);
goto uart_enable_failed;
}
/* iuu_reset() the card after iuu_uart_on() */
status = iuu_uart_flush(port);
if (status != IUU_OPERATION_OK)
dev_dbg(&port->dev, "%s - uart_flush error\n", __func__);
uart_enable_failed:
kfree(buf);
return status;
}
/* Disables the IUU UART (a.k.a. the Phoenix voiderface) */
static int iuu_uart_off(struct usb_serial_port *port)
{
int status;
u8 *buf;
buf = kmalloc(1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf[0] = IUU_UART_DISABLE;
status = bulk_immediate(port, buf, 1);
if (status != IUU_OPERATION_OK)
dev_dbg(&port->dev, "%s - uart_off error\n", __func__);
kfree(buf);
return status;
}
static int iuu_uart_baud(struct usb_serial_port *port, u32 baud_base,
u32 *actual, u8 parity)
{
int status;
u32 baud;
u8 *dataout;
u8 DataCount = 0;
u8 T1Frekvens = 0;
u8 T1reload = 0;
unsigned int T1FrekvensHZ = 0;
dev_dbg(&port->dev, "%s - enter baud_base=%d\n", __func__, baud_base);
dataout = kmalloc(5, GFP_KERNEL);
if (!dataout)
return -ENOMEM;
/*baud = (((priv->clk / 35) * baud_base) / 100000); */
baud = baud_base;
if (baud < 1200 || baud > 230400) {
kfree(dataout);
return IUU_INVALID_PARAMETER;
}
if (baud > 977) {
T1Frekvens = 3;
T1FrekvensHZ = 500000;
}
if (baud > 3906) {
T1Frekvens = 2;
T1FrekvensHZ = 2000000;
}
if (baud > 11718) {
T1Frekvens = 1;
T1FrekvensHZ = 6000000;
}
if (baud > 46875) {
T1Frekvens = 0;
T1FrekvensHZ = 24000000;
}
T1reload = 256 - (u8) (T1FrekvensHZ / (baud * 2));
/* magic number here: ENTER_FIRMWARE_UPDATE; */
dataout[DataCount++] = IUU_UART_ESC;
/* magic number here: CHANGE_BAUD; */
dataout[DataCount++] = IUU_UART_CHANGE;
dataout[DataCount++] = T1Frekvens;
dataout[DataCount++] = T1reload;
*actual = (T1FrekvensHZ / (256 - T1reload)) / 2;
switch (parity & 0x0F) {
case IUU_PARITY_NONE:
dataout[DataCount++] = 0x00;
break;
case IUU_PARITY_EVEN:
dataout[DataCount++] = 0x01;
break;
case IUU_PARITY_ODD:
dataout[DataCount++] = 0x02;
break;
case IUU_PARITY_MARK:
dataout[DataCount++] = 0x03;
break;
case IUU_PARITY_SPACE:
dataout[DataCount++] = 0x04;
break;
default:
kfree(dataout);
return IUU_INVALID_PARAMETER;
}
switch (parity & 0xF0) {
case IUU_ONE_STOP_BIT:
dataout[DataCount - 1] |= IUU_ONE_STOP_BIT;
break;
case IUU_TWO_STOP_BITS:
dataout[DataCount - 1] |= IUU_TWO_STOP_BITS;
break;
default:
kfree(dataout);
return IUU_INVALID_PARAMETER;
}
status = bulk_immediate(port, dataout, DataCount);
if (status != IUU_OPERATION_OK)
dev_dbg(&port->dev, "%s - uart_off error\n", __func__);
kfree(dataout);
return status;
}
static void iuu_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
const u32 supported_mask = CMSPAR|PARENB|PARODD;
struct iuu_private *priv = usb_get_serial_port_data(port);
unsigned int cflag = tty->termios.c_cflag;
int status;
u32 actual;
u32 parity;
int csize = CS7;
int baud;
u32 newval = cflag & supported_mask;
/* Just use the ospeed. ispeed should be the same. */
baud = tty->termios.c_ospeed;
dev_dbg(&port->dev, "%s - enter c_ospeed or baud=%d\n", __func__, baud);
/* compute the parity parameter */
parity = 0;
if (cflag & CMSPAR) { /* Using mark space */
if (cflag & PARODD)
parity |= IUU_PARITY_SPACE;
else
parity |= IUU_PARITY_MARK;
} else if (!(cflag & PARENB)) {
parity |= IUU_PARITY_NONE;
csize = CS8;
} else if (cflag & PARODD)
parity |= IUU_PARITY_ODD;
else
parity |= IUU_PARITY_EVEN;
parity |= (cflag & CSTOPB ? IUU_TWO_STOP_BITS : IUU_ONE_STOP_BIT);
/* set it */
status = iuu_uart_baud(port,
baud * priv->boost / 100,
&actual, parity);
/* set the termios value to the real one, so the user now what has
* changed. We support few fields so its easies to copy the old hw
* settings back over and then adjust them
*/
if (old_termios)
tty_termios_copy_hw(&tty->termios, old_termios);
if (status != 0) /* Set failed - return old bits */
return;
/* Re-encode speed, parity and csize */
tty_encode_baud_rate(tty, baud, baud);
tty->termios.c_cflag &= ~(supported_mask|CSIZE);
tty->termios.c_cflag |= newval | csize;
}
static void iuu_close(struct usb_serial_port *port)
{
/* iuu_led (port,255,0,0,0); */
iuu_uart_off(port);
usb_kill_urb(port->write_urb);
usb_kill_urb(port->read_urb);
iuu_led(port, 0, 0, 0xF000, 0xFF);
}
static void iuu_init_termios(struct tty_struct *tty)
{
tty->termios.c_cflag = B9600 | CS8 | CSTOPB | CREAD | PARENB | CLOCAL;
tty->termios.c_ispeed = 9600;
tty->termios.c_ospeed = 9600;
tty->termios.c_lflag = 0;
tty->termios.c_oflag = 0;
tty->termios.c_iflag = 0;
}
static int iuu_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct device *dev = &port->dev;
int result;
int baud;
u32 actual;
struct iuu_private *priv = usb_get_serial_port_data(port);
baud = tty->termios.c_ospeed;
dev_dbg(dev, "%s - baud %d\n", __func__, baud);
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
priv->poll = 0;
#define SOUP(a, b, c, d) do { \
result = usb_control_msg(port->serial->dev, \
usb_sndctrlpipe(port->serial->dev, 0), \
b, a, c, d, NULL, 0, 1000); \
dev_dbg(dev, "0x%x:0x%x:0x%x:0x%x %d\n", a, b, c, d, result); } while (0)
/* This is not UART related but IUU USB driver related or something */
/* like that. Basically no IUU will accept any commands from the USB */
/* host unless it has received the following message */
/* sprintf(buf ,"%c%c%c%c",0x03,0x02,0x02,0x0); */
SOUP(0x03, 0x02, 0x02, 0x0);
iuu_led(port, 0xF000, 0xF000, 0, 0xFF);
iuu_uart_on(port);
if (boost < 100)
boost = 100;
priv->boost = boost;
switch (clockmode) {
case 2: /* 3.680 Mhz */
priv->clk = IUU_CLK_3680000;
iuu_clk(port, IUU_CLK_3680000 * boost / 100);
result =
iuu_uart_baud(port, baud * boost / 100, &actual,
IUU_PARITY_EVEN);
break;
case 3: /* 6.00 Mhz */
iuu_clk(port, IUU_CLK_6000000 * boost / 100);
priv->clk = IUU_CLK_6000000;
/* Ratio of 6000000 to 3500000 for baud 9600 */
result =
iuu_uart_baud(port, 16457 * boost / 100, &actual,
IUU_PARITY_EVEN);
break;
default: /* 3.579 Mhz */
iuu_clk(port, IUU_CLK_3579000 * boost / 100);
priv->clk = IUU_CLK_3579000;
result =
iuu_uart_baud(port, baud * boost / 100, &actual,
IUU_PARITY_EVEN);
}
/* set the cardin cardout signals */
switch (cdmode) {
case 0:
iuu_cardin = 0;
iuu_cardout = 0;
break;
case 1:
iuu_cardin = TIOCM_CD;
iuu_cardout = 0;
break;
case 2:
iuu_cardin = 0;
iuu_cardout = TIOCM_CD;
break;
case 3:
iuu_cardin = TIOCM_DSR;
iuu_cardout = 0;
break;
case 4:
iuu_cardin = 0;
iuu_cardout = TIOCM_DSR;
break;
case 5:
iuu_cardin = TIOCM_CTS;
iuu_cardout = 0;
break;
case 6:
iuu_cardin = 0;
iuu_cardout = TIOCM_CTS;
break;
case 7:
iuu_cardin = TIOCM_RNG;
iuu_cardout = 0;
break;
case 8:
iuu_cardin = 0;
iuu_cardout = TIOCM_RNG;
}
iuu_uart_flush(port);
dev_dbg(dev, "%s - initialization done\n", __func__);
memset(port->write_urb->transfer_buffer, IUU_UART_RX, 1);
usb_fill_bulk_urb(port->write_urb, port->serial->dev,
usb_sndbulkpipe(port->serial->dev,
port->bulk_out_endpointAddress),
port->write_urb->transfer_buffer, 1,
read_rxcmd_callback, port);
result = usb_submit_urb(port->write_urb, GFP_KERNEL);
if (result) {
dev_err(dev, "%s - failed submitting read urb, error %d\n", __func__, result);
iuu_close(port);
} else {
dev_dbg(dev, "%s - rxcmd OK\n", __func__);
}
return result;
}
/* how to change VCC */
static int iuu_vcc_set(struct usb_serial_port *port, unsigned int vcc)
{
int status;
u8 *buf;
buf = kmalloc(5, GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf[0] = IUU_SET_VCC;
buf[1] = vcc & 0xFF;
buf[2] = (vcc >> 8) & 0xFF;
buf[3] = (vcc >> 16) & 0xFF;
buf[4] = (vcc >> 24) & 0xFF;
status = bulk_immediate(port, buf, 5);
kfree(buf);
if (status != IUU_OPERATION_OK)
dev_dbg(&port->dev, "%s - vcc error status = %2x\n", __func__, status);
else
dev_dbg(&port->dev, "%s - vcc OK !\n", __func__);
return status;
}
/*
* Sysfs Attributes
*/
static ssize_t vcc_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
struct iuu_private *priv = usb_get_serial_port_data(port);
return sprintf(buf, "%d\n", priv->vcc);
}
static ssize_t vcc_mode_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
struct iuu_private *priv = usb_get_serial_port_data(port);
unsigned long v;
if (kstrtoul(buf, 10, &v)) {
dev_err(dev, "%s - vcc_mode: %s is not a unsigned long\n",
__func__, buf);
goto fail_store_vcc_mode;
}
dev_dbg(dev, "%s: setting vcc_mode = %ld\n", __func__, v);
if ((v != 3) && (v != 5)) {
dev_err(dev, "%s - vcc_mode %ld is invalid\n", __func__, v);
} else {
iuu_vcc_set(port, v);
priv->vcc = v;
}
fail_store_vcc_mode:
return count;
}
static DEVICE_ATTR_RW(vcc_mode);
static int iuu_create_sysfs_attrs(struct usb_serial_port *port)
{
return device_create_file(&port->dev, &dev_attr_vcc_mode);
}
static int iuu_remove_sysfs_attrs(struct usb_serial_port *port)
{
device_remove_file(&port->dev, &dev_attr_vcc_mode);
return 0;
}
/*
* End Sysfs Attributes
*/
static struct usb_serial_driver iuu_device = {
.driver = {
.owner = THIS_MODULE,
.name = "iuu_phoenix",
},
.id_table = id_table,
.num_ports = 1,
.num_bulk_in = 1,
.num_bulk_out = 1,
.bulk_in_size = 512,
.bulk_out_size = 512,
.open = iuu_open,
.close = iuu_close,
.write = iuu_uart_write,
.read_bulk_callback = iuu_uart_read_callback,
.tiocmget = iuu_tiocmget,
.tiocmset = iuu_tiocmset,
.set_termios = iuu_set_termios,
.init_termios = iuu_init_termios,
.port_probe = iuu_port_probe,
.port_remove = iuu_port_remove,
};
static struct usb_serial_driver * const serial_drivers[] = {
&iuu_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR("Alain Degreffe [email protected]");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(xmas, bool, 0644);
MODULE_PARM_DESC(xmas, "Xmas colors enabled or not");
module_param(boost, int, 0644);
MODULE_PARM_DESC(boost, "Card overclock boost (in percent 100-500)");
module_param(clockmode, int, 0644);
MODULE_PARM_DESC(clockmode, "Card clock mode (1=3.579 MHz, 2=3.680 MHz, "
"3=6 Mhz)");
module_param(cdmode, int, 0644);
MODULE_PARM_DESC(cdmode, "Card detect mode (0=none, 1=CD, 2=!CD, 3=DSR, "
"4=!DSR, 5=CTS, 6=!CTS, 7=RING, 8=!RING)");
module_param(vcc_default, int, 0644);
MODULE_PARM_DESC(vcc_default, "Set default VCC (either 3 for 3.3V or 5 "
"for 5V). Default to 5.");
| linux-master | drivers/usb/serial/iuu_phoenix.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Prolific PL2303 USB to serial adaptor driver
*
* Copyright (C) 2001-2007 Greg Kroah-Hartman ([email protected])
* Copyright (C) 2003 IBM Corp.
*
* Original driver for 2.2.x by anonymous
*
* See Documentation/usb/usb-serial.rst for more information on using this
* driver
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <asm/unaligned.h>
#include "pl2303.h"
#define PL2303_QUIRK_UART_STATE_IDX0 BIT(0)
#define PL2303_QUIRK_LEGACY BIT(1)
#define PL2303_QUIRK_ENDPOINT_HACK BIT(2)
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID),
.driver_info = PL2303_QUIRK_ENDPOINT_HACK },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_CHILITAG) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_PHAROS) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GC) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GB) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GT) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GL) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GE) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GS) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
.driver_info = PL2303_QUIRK_ENDPOINT_HACK },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC485),
.driver_info = PL2303_QUIRK_ENDPOINT_HACK },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC232B),
.driver_info = PL2303_QUIRK_ENDPOINT_HACK },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
{ USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
{ USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
{ USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) },
{ USB_DEVICE(ITEGNO_VENDOR_ID, ITEGNO_PRODUCT_ID) },
{ USB_DEVICE(ITEGNO_VENDOR_ID, ITEGNO_PRODUCT_ID_2080) },
{ USB_DEVICE(MA620_VENDOR_ID, MA620_PRODUCT_ID) },
{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID) },
{ USB_DEVICE(TRIPP_VENDOR_ID, TRIPP_PRODUCT_ID) },
{ USB_DEVICE(RADIOSHACK_VENDOR_ID, RADIOSHACK_PRODUCT_ID) },
{ USB_DEVICE(DCU10_VENDOR_ID, DCU10_PRODUCT_ID) },
{ USB_DEVICE(SITECOM_VENDOR_ID, SITECOM_PRODUCT_ID) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_ID) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_SX1),
.driver_info = PL2303_QUIRK_UART_STATE_IDX0 },
{ USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X65),
.driver_info = PL2303_QUIRK_UART_STATE_IDX0 },
{ USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X75),
.driver_info = PL2303_QUIRK_UART_STATE_IDX0 },
{ USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_EF81),
.driver_info = PL2303_QUIRK_ENDPOINT_HACK },
{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_ID_S81) }, /* Benq/Siemens S81 */
{ USB_DEVICE(SYNTECH_VENDOR_ID, SYNTECH_PRODUCT_ID) },
{ USB_DEVICE(NOKIA_CA42_VENDOR_ID, NOKIA_CA42_PRODUCT_ID) },
{ USB_DEVICE(CA_42_CA42_VENDOR_ID, CA_42_CA42_PRODUCT_ID) },
{ USB_DEVICE(SAGEM_VENDOR_ID, SAGEM_PRODUCT_ID) },
{ USB_DEVICE(LEADTEK_VENDOR_ID, LEADTEK_9531_PRODUCT_ID) },
{ USB_DEVICE(SPEEDDRAGON_VENDOR_ID, SPEEDDRAGON_PRODUCT_ID) },
{ USB_DEVICE(DATAPILOT_U2_VENDOR_ID, DATAPILOT_U2_PRODUCT_ID) },
{ USB_DEVICE(BELKIN_VENDOR_ID, BELKIN_PRODUCT_ID) },
{ USB_DEVICE(ALCOR_VENDOR_ID, ALCOR_PRODUCT_ID),
.driver_info = PL2303_QUIRK_ENDPOINT_HACK },
{ USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) },
{ USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) },
{ USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
{ USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD381_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD381GC_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LM920_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LM930_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LM940_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_TD620_PRODUCT_ID) },
{ USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
{ USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
{ USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
{ USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
{ USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530GC_PRODUCT_ID) },
{ USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
{ USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_PRODUCT_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
#define SET_LINE_REQUEST_TYPE 0x21
#define SET_LINE_REQUEST 0x20
#define SET_CONTROL_REQUEST_TYPE 0x21
#define SET_CONTROL_REQUEST 0x22
#define CONTROL_DTR 0x01
#define CONTROL_RTS 0x02
#define BREAK_REQUEST_TYPE 0x21
#define BREAK_REQUEST 0x23
#define BREAK_ON 0xffff
#define BREAK_OFF 0x0000
#define GET_LINE_REQUEST_TYPE 0xa1
#define GET_LINE_REQUEST 0x21
#define VENDOR_WRITE_REQUEST_TYPE 0x40
#define VENDOR_WRITE_REQUEST 0x01
#define VENDOR_WRITE_NREQUEST 0x80
#define VENDOR_READ_REQUEST_TYPE 0xc0
#define VENDOR_READ_REQUEST 0x01
#define VENDOR_READ_NREQUEST 0x81
#define UART_STATE_INDEX 8
#define UART_STATE_MSR_MASK 0x8b
#define UART_STATE_TRANSIENT_MASK 0x74
#define UART_DCD 0x01
#define UART_DSR 0x02
#define UART_BREAK_ERROR 0x04
#define UART_RING 0x08
#define UART_FRAME_ERROR 0x10
#define UART_PARITY_ERROR 0x20
#define UART_OVERRUN_ERROR 0x40
#define UART_CTS 0x80
#define PL2303_FLOWCTRL_MASK 0xf0
#define PL2303_READ_TYPE_HX_STATUS 0x8080
#define PL2303_HXN_RESET_REG 0x07
#define PL2303_HXN_RESET_UPSTREAM_PIPE 0x02
#define PL2303_HXN_RESET_DOWNSTREAM_PIPE 0x01
#define PL2303_HXN_FLOWCTRL_REG 0x0a
#define PL2303_HXN_FLOWCTRL_MASK 0x1c
#define PL2303_HXN_FLOWCTRL_NONE 0x1c
#define PL2303_HXN_FLOWCTRL_RTS_CTS 0x18
#define PL2303_HXN_FLOWCTRL_XON_XOFF 0x0c
static int pl2303_set_break(struct usb_serial_port *port, bool enable);
enum pl2303_type {
TYPE_H,
TYPE_HX,
TYPE_TA,
TYPE_TB,
TYPE_HXD,
TYPE_HXN,
TYPE_COUNT
};
struct pl2303_type_data {
const char *name;
speed_t max_baud_rate;
unsigned long quirks;
unsigned int no_autoxonxoff:1;
unsigned int no_divisors:1;
unsigned int alt_divisors:1;
};
struct pl2303_serial_private {
const struct pl2303_type_data *type;
unsigned long quirks;
};
struct pl2303_private {
spinlock_t lock;
u8 line_control;
u8 line_status;
u8 line_settings[7];
};
static const struct pl2303_type_data pl2303_type_data[TYPE_COUNT] = {
[TYPE_H] = {
.name = "H",
.max_baud_rate = 1228800,
.quirks = PL2303_QUIRK_LEGACY,
.no_autoxonxoff = true,
},
[TYPE_HX] = {
.name = "HX",
.max_baud_rate = 6000000,
},
[TYPE_TA] = {
.name = "TA",
.max_baud_rate = 6000000,
.alt_divisors = true,
},
[TYPE_TB] = {
.name = "TB",
.max_baud_rate = 12000000,
.alt_divisors = true,
},
[TYPE_HXD] = {
.name = "HXD",
.max_baud_rate = 12000000,
},
[TYPE_HXN] = {
.name = "G",
.max_baud_rate = 12000000,
.no_divisors = true,
},
};
static int pl2303_vendor_read(struct usb_serial *serial, u16 value,
unsigned char buf[1])
{
struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
struct device *dev = &serial->interface->dev;
u8 request;
int res;
if (spriv->type == &pl2303_type_data[TYPE_HXN])
request = VENDOR_READ_NREQUEST;
else
request = VENDOR_READ_REQUEST;
res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
request, VENDOR_READ_REQUEST_TYPE,
value, 0, buf, 1, 100);
if (res != 1) {
dev_err(dev, "%s - failed to read [%04x]: %d\n", __func__,
value, res);
if (res >= 0)
res = -EIO;
return res;
}
dev_dbg(dev, "%s - [%04x] = %02x\n", __func__, value, buf[0]);
return 0;
}
static int pl2303_vendor_write(struct usb_serial *serial, u16 value, u16 index)
{
struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
struct device *dev = &serial->interface->dev;
u8 request;
int res;
dev_dbg(dev, "%s - [%04x] = %02x\n", __func__, value, index);
if (spriv->type == &pl2303_type_data[TYPE_HXN])
request = VENDOR_WRITE_NREQUEST;
else
request = VENDOR_WRITE_REQUEST;
res = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
request, VENDOR_WRITE_REQUEST_TYPE,
value, index, NULL, 0, 100);
if (res) {
dev_err(dev, "%s - failed to write [%04x]: %d\n", __func__,
value, res);
return res;
}
return 0;
}
static int pl2303_update_reg(struct usb_serial *serial, u8 reg, u8 mask, u8 val)
{
struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
int ret = 0;
u8 *buf;
buf = kmalloc(1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (spriv->type == &pl2303_type_data[TYPE_HXN])
ret = pl2303_vendor_read(serial, reg, buf);
else
ret = pl2303_vendor_read(serial, reg | 0x80, buf);
if (ret)
goto out_free;
*buf &= ~mask;
*buf |= val & mask;
ret = pl2303_vendor_write(serial, reg, *buf);
out_free:
kfree(buf);
return ret;
}
static int pl2303_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
usb_set_serial_data(serial, (void *)id->driver_info);
return 0;
}
/*
* Use interrupt endpoint from first interface if available.
*
* This is needed due to the looney way its endpoints are set up.
*/
static int pl2303_endpoint_hack(struct usb_serial *serial,
struct usb_serial_endpoints *epds)
{
struct usb_interface *interface = serial->interface;
struct usb_device *dev = serial->dev;
struct device *ddev = &interface->dev;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
unsigned int i;
if (interface == dev->actconfig->interface[0])
return 0;
/* check out the endpoints of the other interface */
iface_desc = dev->actconfig->interface[0]->cur_altsetting;
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
if (!usb_endpoint_is_int_in(endpoint))
continue;
dev_dbg(ddev, "found interrupt in on separate interface\n");
if (epds->num_interrupt_in < ARRAY_SIZE(epds->interrupt_in))
epds->interrupt_in[epds->num_interrupt_in++] = endpoint;
}
return 0;
}
static int pl2303_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds)
{
unsigned long quirks = (unsigned long)usb_get_serial_data(serial);
struct device *dev = &serial->interface->dev;
int ret;
if (quirks & PL2303_QUIRK_ENDPOINT_HACK) {
ret = pl2303_endpoint_hack(serial, epds);
if (ret)
return ret;
}
if (epds->num_interrupt_in < 1) {
dev_err(dev, "required interrupt-in endpoint missing\n");
return -ENODEV;
}
return 1;
}
static bool pl2303_supports_hx_status(struct usb_serial *serial)
{
int ret;
u8 buf;
ret = usb_control_msg_recv(serial->dev, 0, VENDOR_READ_REQUEST,
VENDOR_READ_REQUEST_TYPE, PL2303_READ_TYPE_HX_STATUS,
0, &buf, 1, 100, GFP_KERNEL);
return ret == 0;
}
static int pl2303_detect_type(struct usb_serial *serial)
{
struct usb_device_descriptor *desc = &serial->dev->descriptor;
u16 bcdDevice, bcdUSB;
/*
* Legacy PL2303H, variants 0 and 1 (difference unknown).
*/
if (desc->bDeviceClass == 0x02)
return TYPE_H; /* variant 0 */
if (desc->bMaxPacketSize0 != 0x40) {
if (desc->bDeviceClass == 0x00 || desc->bDeviceClass == 0xff)
return TYPE_H; /* variant 1 */
return TYPE_H; /* variant 0 */
}
bcdDevice = le16_to_cpu(desc->bcdDevice);
bcdUSB = le16_to_cpu(desc->bcdUSB);
switch (bcdUSB) {
case 0x101:
/* USB 1.0.1? Let's assume they meant 1.1... */
fallthrough;
case 0x110:
switch (bcdDevice) {
case 0x300:
return TYPE_HX;
case 0x400:
return TYPE_HXD;
default:
return TYPE_HX;
}
break;
case 0x200:
switch (bcdDevice) {
case 0x100: /* GC */
case 0x105:
return TYPE_HXN;
case 0x300: /* GT / TA */
if (pl2303_supports_hx_status(serial))
return TYPE_TA;
fallthrough;
case 0x305:
case 0x400: /* GL */
case 0x405:
return TYPE_HXN;
case 0x500: /* GE / TB */
if (pl2303_supports_hx_status(serial))
return TYPE_TB;
fallthrough;
case 0x505:
case 0x600: /* GS */
case 0x605:
case 0x700: /* GR */
case 0x705:
return TYPE_HXN;
}
break;
}
dev_err(&serial->interface->dev,
"unknown device type, please report to [email protected]\n");
return -ENODEV;
}
static int pl2303_startup(struct usb_serial *serial)
{
struct pl2303_serial_private *spriv;
enum pl2303_type type;
unsigned char *buf;
int ret;
ret = pl2303_detect_type(serial);
if (ret < 0)
return ret;
type = ret;
dev_dbg(&serial->interface->dev, "device type: %s\n", pl2303_type_data[type].name);
spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
if (!spriv)
return -ENOMEM;
spriv->type = &pl2303_type_data[type];
spriv->quirks = (unsigned long)usb_get_serial_data(serial);
spriv->quirks |= spriv->type->quirks;
usb_set_serial_data(serial, spriv);
if (type != TYPE_HXN) {
buf = kmalloc(1, GFP_KERNEL);
if (!buf) {
kfree(spriv);
return -ENOMEM;
}
pl2303_vendor_read(serial, 0x8484, buf);
pl2303_vendor_write(serial, 0x0404, 0);
pl2303_vendor_read(serial, 0x8484, buf);
pl2303_vendor_read(serial, 0x8383, buf);
pl2303_vendor_read(serial, 0x8484, buf);
pl2303_vendor_write(serial, 0x0404, 1);
pl2303_vendor_read(serial, 0x8484, buf);
pl2303_vendor_read(serial, 0x8383, buf);
pl2303_vendor_write(serial, 0, 1);
pl2303_vendor_write(serial, 1, 0);
if (spriv->quirks & PL2303_QUIRK_LEGACY)
pl2303_vendor_write(serial, 2, 0x24);
else
pl2303_vendor_write(serial, 2, 0x44);
kfree(buf);
}
return 0;
}
static void pl2303_release(struct usb_serial *serial)
{
struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
kfree(spriv);
}
static int pl2303_port_probe(struct usb_serial_port *port)
{
struct pl2303_private *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
spin_lock_init(&priv->lock);
usb_set_serial_port_data(port, priv);
port->port.drain_delay = 256;
return 0;
}
static void pl2303_port_remove(struct usb_serial_port *port)
{
struct pl2303_private *priv = usb_get_serial_port_data(port);
kfree(priv);
}
static int pl2303_set_control_lines(struct usb_serial_port *port, u8 value)
{
struct usb_device *dev = port->serial->dev;
int retval;
dev_dbg(&port->dev, "%s - %02x\n", __func__, value);
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
SET_CONTROL_REQUEST, SET_CONTROL_REQUEST_TYPE,
value, 0, NULL, 0, 100);
if (retval)
dev_err(&port->dev, "%s - failed: %d\n", __func__, retval);
return retval;
}
/*
* Returns the nearest supported baud rate that can be set directly without
* using divisors.
*/
static speed_t pl2303_get_supported_baud_rate(speed_t baud)
{
static const speed_t baud_sup[] = {
75, 150, 300, 600, 1200, 1800, 2400, 3600, 4800, 7200, 9600,
14400, 19200, 28800, 38400, 57600, 115200, 230400, 460800,
614400, 921600, 1228800, 2457600, 3000000, 6000000
};
unsigned i;
for (i = 0; i < ARRAY_SIZE(baud_sup); ++i) {
if (baud_sup[i] > baud)
break;
}
if (i == ARRAY_SIZE(baud_sup))
baud = baud_sup[i - 1];
else if (i > 0 && (baud_sup[i] - baud) > (baud - baud_sup[i - 1]))
baud = baud_sup[i - 1];
else
baud = baud_sup[i];
return baud;
}
/*
* NOTE: If unsupported baud rates are set directly, the PL2303 seems to
* use 9600 baud.
*/
static speed_t pl2303_encode_baud_rate_direct(unsigned char buf[4],
speed_t baud)
{
put_unaligned_le32(baud, buf);
return baud;
}
static speed_t pl2303_encode_baud_rate_divisor(unsigned char buf[4],
speed_t baud)
{
unsigned int baseline, mantissa, exponent;
/*
* Apparently the formula is:
* baudrate = 12M * 32 / (mantissa * 4^exponent)
* where
* mantissa = buf[8:0]
* exponent = buf[11:9]
*/
baseline = 12000000 * 32;
mantissa = baseline / baud;
if (mantissa == 0)
mantissa = 1; /* Avoid dividing by zero if baud > 32*12M. */
exponent = 0;
while (mantissa >= 512) {
if (exponent < 7) {
mantissa >>= 2; /* divide by 4 */
exponent++;
} else {
/* Exponent is maxed. Trim mantissa and leave. */
mantissa = 511;
break;
}
}
buf[3] = 0x80;
buf[2] = 0;
buf[1] = exponent << 1 | mantissa >> 8;
buf[0] = mantissa & 0xff;
/* Calculate and return the exact baud rate. */
baud = (baseline / mantissa) >> (exponent << 1);
return baud;
}
static speed_t pl2303_encode_baud_rate_divisor_alt(unsigned char buf[4],
speed_t baud)
{
unsigned int baseline, mantissa, exponent;
/*
* Apparently, for the TA version the formula is:
* baudrate = 12M * 32 / (mantissa * 2^exponent)
* where
* mantissa = buf[10:0]
* exponent = buf[15:13 16]
*/
baseline = 12000000 * 32;
mantissa = baseline / baud;
if (mantissa == 0)
mantissa = 1; /* Avoid dividing by zero if baud > 32*12M. */
exponent = 0;
while (mantissa >= 2048) {
if (exponent < 15) {
mantissa >>= 1; /* divide by 2 */
exponent++;
} else {
/* Exponent is maxed. Trim mantissa and leave. */
mantissa = 2047;
break;
}
}
buf[3] = 0x80;
buf[2] = exponent & 0x01;
buf[1] = (exponent & ~0x01) << 4 | mantissa >> 8;
buf[0] = mantissa & 0xff;
/* Calculate and return the exact baud rate. */
baud = (baseline / mantissa) >> exponent;
return baud;
}
static void pl2303_encode_baud_rate(struct tty_struct *tty,
struct usb_serial_port *port,
u8 buf[4])
{
struct usb_serial *serial = port->serial;
struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
speed_t baud_sup;
speed_t baud;
baud = tty_get_baud_rate(tty);
dev_dbg(&port->dev, "baud requested = %u\n", baud);
if (!baud)
return;
if (spriv->type->max_baud_rate)
baud = min_t(speed_t, baud, spriv->type->max_baud_rate);
/*
* Use direct method for supported baud rates, otherwise use divisors.
* Newer chip types do not support divisor encoding.
*/
if (spriv->type->no_divisors)
baud_sup = baud;
else
baud_sup = pl2303_get_supported_baud_rate(baud);
if (baud == baud_sup)
baud = pl2303_encode_baud_rate_direct(buf, baud);
else if (spriv->type->alt_divisors)
baud = pl2303_encode_baud_rate_divisor_alt(buf, baud);
else
baud = pl2303_encode_baud_rate_divisor(buf, baud);
/* Save resulting baud rate */
tty_encode_baud_rate(tty, baud, baud);
dev_dbg(&port->dev, "baud set = %u\n", baud);
}
static int pl2303_get_line_request(struct usb_serial_port *port,
unsigned char buf[7])
{
struct usb_device *udev = port->serial->dev;
int ret;
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
GET_LINE_REQUEST, GET_LINE_REQUEST_TYPE,
0, 0, buf, 7, 100);
if (ret != 7) {
dev_err(&port->dev, "%s - failed: %d\n", __func__, ret);
if (ret >= 0)
ret = -EIO;
return ret;
}
dev_dbg(&port->dev, "%s - %7ph\n", __func__, buf);
return 0;
}
static int pl2303_set_line_request(struct usb_serial_port *port,
unsigned char buf[7])
{
struct usb_device *udev = port->serial->dev;
int ret;
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
SET_LINE_REQUEST, SET_LINE_REQUEST_TYPE,
0, 0, buf, 7, 100);
if (ret < 0) {
dev_err(&port->dev, "%s - failed: %d\n", __func__, ret);
return ret;
}
dev_dbg(&port->dev, "%s - %7ph\n", __func__, buf);
return 0;
}
static bool pl2303_termios_change(const struct ktermios *a, const struct ktermios *b)
{
bool ixon_change;
ixon_change = ((a->c_iflag ^ b->c_iflag) & (IXON | IXANY)) ||
a->c_cc[VSTART] != b->c_cc[VSTART] ||
a->c_cc[VSTOP] != b->c_cc[VSTOP];
return tty_termios_hw_change(a, b) || ixon_change;
}
static bool pl2303_enable_xonxoff(struct tty_struct *tty, const struct pl2303_type_data *type)
{
if (!I_IXON(tty) || I_IXANY(tty))
return false;
if (START_CHAR(tty) != 0x11 || STOP_CHAR(tty) != 0x13)
return false;
if (type->no_autoxonxoff)
return false;
return true;
}
static void pl2303_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
struct pl2303_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
unsigned char *buf;
int ret;
u8 control;
if (old_termios && !pl2303_termios_change(&tty->termios, old_termios))
return;
buf = kzalloc(7, GFP_KERNEL);
if (!buf) {
/* Report back no change occurred */
if (old_termios)
tty->termios = *old_termios;
return;
}
pl2303_get_line_request(port, buf);
buf[6] = tty_get_char_size(tty->termios.c_cflag);
dev_dbg(&port->dev, "data bits = %d\n", buf[6]);
/* For reference buf[0]:buf[3] baud rate value */
pl2303_encode_baud_rate(tty, port, &buf[0]);
/* For reference buf[4]=0 is 1 stop bits */
/* For reference buf[4]=1 is 1.5 stop bits */
/* For reference buf[4]=2 is 2 stop bits */
if (C_CSTOPB(tty)) {
/*
* NOTE: Comply with "real" UARTs / RS232:
* use 1.5 instead of 2 stop bits with 5 data bits
*/
if (C_CSIZE(tty) == CS5) {
buf[4] = 1;
dev_dbg(&port->dev, "stop bits = 1.5\n");
} else {
buf[4] = 2;
dev_dbg(&port->dev, "stop bits = 2\n");
}
} else {
buf[4] = 0;
dev_dbg(&port->dev, "stop bits = 1\n");
}
if (C_PARENB(tty)) {
/* For reference buf[5]=0 is none parity */
/* For reference buf[5]=1 is odd parity */
/* For reference buf[5]=2 is even parity */
/* For reference buf[5]=3 is mark parity */
/* For reference buf[5]=4 is space parity */
if (C_PARODD(tty)) {
if (C_CMSPAR(tty)) {
buf[5] = 3;
dev_dbg(&port->dev, "parity = mark\n");
} else {
buf[5] = 1;
dev_dbg(&port->dev, "parity = odd\n");
}
} else {
if (C_CMSPAR(tty)) {
buf[5] = 4;
dev_dbg(&port->dev, "parity = space\n");
} else {
buf[5] = 2;
dev_dbg(&port->dev, "parity = even\n");
}
}
} else {
buf[5] = 0;
dev_dbg(&port->dev, "parity = none\n");
}
/*
* Some PL2303 are known to lose bytes if you change serial settings
* even to the same values as before. Thus we actually need to filter
* in this specific case.
*
* Note that the tty_termios_hw_change check above is not sufficient
* as a previously requested baud rate may differ from the one
* actually used (and stored in old_termios).
*
* NOTE: No additional locking needed for line_settings as it is
* only used in set_termios, which is serialised against itself.
*/
if (!old_termios || memcmp(buf, priv->line_settings, 7)) {
ret = pl2303_set_line_request(port, buf);
if (!ret)
memcpy(priv->line_settings, buf, 7);
}
/* change control lines if we are switching to or from B0 */
spin_lock_irqsave(&priv->lock, flags);
control = priv->line_control;
if (C_BAUD(tty) == B0)
priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
if (control != priv->line_control) {
control = priv->line_control;
spin_unlock_irqrestore(&priv->lock, flags);
pl2303_set_control_lines(port, control);
} else {
spin_unlock_irqrestore(&priv->lock, flags);
}
if (C_CRTSCTS(tty)) {
if (spriv->quirks & PL2303_QUIRK_LEGACY) {
pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0x40);
} else if (spriv->type == &pl2303_type_data[TYPE_HXN]) {
pl2303_update_reg(serial, PL2303_HXN_FLOWCTRL_REG,
PL2303_HXN_FLOWCTRL_MASK,
PL2303_HXN_FLOWCTRL_RTS_CTS);
} else {
pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0x60);
}
} else if (pl2303_enable_xonxoff(tty, spriv->type)) {
if (spriv->type == &pl2303_type_data[TYPE_HXN]) {
pl2303_update_reg(serial, PL2303_HXN_FLOWCTRL_REG,
PL2303_HXN_FLOWCTRL_MASK,
PL2303_HXN_FLOWCTRL_XON_XOFF);
} else {
pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0xc0);
}
} else {
if (spriv->type == &pl2303_type_data[TYPE_HXN]) {
pl2303_update_reg(serial, PL2303_HXN_FLOWCTRL_REG,
PL2303_HXN_FLOWCTRL_MASK,
PL2303_HXN_FLOWCTRL_NONE);
} else {
pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0);
}
}
kfree(buf);
}
static void pl2303_dtr_rts(struct usb_serial_port *port, int on)
{
struct pl2303_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
u8 control;
spin_lock_irqsave(&priv->lock, flags);
if (on)
priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
else
priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
control = priv->line_control;
spin_unlock_irqrestore(&priv->lock, flags);
pl2303_set_control_lines(port, control);
}
static void pl2303_close(struct usb_serial_port *port)
{
usb_serial_generic_close(port);
usb_kill_urb(port->interrupt_in_urb);
pl2303_set_break(port, false);
}
static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
int result;
if (spriv->quirks & PL2303_QUIRK_LEGACY) {
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
} else {
/* reset upstream data pipes */
if (spriv->type == &pl2303_type_data[TYPE_HXN]) {
pl2303_vendor_write(serial, PL2303_HXN_RESET_REG,
PL2303_HXN_RESET_UPSTREAM_PIPE |
PL2303_HXN_RESET_DOWNSTREAM_PIPE);
} else {
pl2303_vendor_write(serial, 8, 0);
pl2303_vendor_write(serial, 9, 0);
}
}
/* Setup termios */
if (tty)
pl2303_set_termios(tty, port, NULL);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
result);
return result;
}
result = usb_serial_generic_open(tty, port);
if (result) {
usb_kill_urb(port->interrupt_in_urb);
return result;
}
return 0;
}
static int pl2303_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct pl2303_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
u8 control;
int ret;
spin_lock_irqsave(&priv->lock, flags);
if (set & TIOCM_RTS)
priv->line_control |= CONTROL_RTS;
if (set & TIOCM_DTR)
priv->line_control |= CONTROL_DTR;
if (clear & TIOCM_RTS)
priv->line_control &= ~CONTROL_RTS;
if (clear & TIOCM_DTR)
priv->line_control &= ~CONTROL_DTR;
control = priv->line_control;
spin_unlock_irqrestore(&priv->lock, flags);
ret = pl2303_set_control_lines(port, control);
if (ret)
return usb_translate_errors(ret);
return 0;
}
static int pl2303_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct pl2303_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
unsigned int mcr;
unsigned int status;
unsigned int result;
spin_lock_irqsave(&priv->lock, flags);
mcr = priv->line_control;
status = priv->line_status;
spin_unlock_irqrestore(&priv->lock, flags);
result = ((mcr & CONTROL_DTR) ? TIOCM_DTR : 0)
| ((mcr & CONTROL_RTS) ? TIOCM_RTS : 0)
| ((status & UART_CTS) ? TIOCM_CTS : 0)
| ((status & UART_DSR) ? TIOCM_DSR : 0)
| ((status & UART_RING) ? TIOCM_RI : 0)
| ((status & UART_DCD) ? TIOCM_CD : 0);
dev_dbg(&port->dev, "%s - result = %x\n", __func__, result);
return result;
}
static int pl2303_carrier_raised(struct usb_serial_port *port)
{
struct pl2303_private *priv = usb_get_serial_port_data(port);
if (priv->line_status & UART_DCD)
return 1;
return 0;
}
static int pl2303_set_break(struct usb_serial_port *port, bool enable)
{
struct usb_serial *serial = port->serial;
u16 state;
int result;
if (enable)
state = BREAK_ON;
else
state = BREAK_OFF;
dev_dbg(&port->dev, "%s - turning break %s\n", __func__,
state == BREAK_OFF ? "off" : "on");
result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
BREAK_REQUEST, BREAK_REQUEST_TYPE, state,
0, NULL, 0, 100);
if (result) {
dev_err(&port->dev, "error sending break = %d\n", result);
return result;
}
return 0;
}
static int pl2303_break_ctl(struct tty_struct *tty, int state)
{
struct usb_serial_port *port = tty->driver_data;
return pl2303_set_break(port, state);
}
static void pl2303_update_line_status(struct usb_serial_port *port,
unsigned char *data,
unsigned int actual_length)
{
struct usb_serial *serial = port->serial;
struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
struct pl2303_private *priv = usb_get_serial_port_data(port);
struct tty_struct *tty;
unsigned long flags;
unsigned int status_idx = UART_STATE_INDEX;
u8 status;
u8 delta;
if (spriv->quirks & PL2303_QUIRK_UART_STATE_IDX0)
status_idx = 0;
if (actual_length < status_idx + 1)
return;
status = data[status_idx];
/* Save off the uart status for others to look at */
spin_lock_irqsave(&priv->lock, flags);
delta = priv->line_status ^ status;
priv->line_status = status;
spin_unlock_irqrestore(&priv->lock, flags);
if (status & UART_BREAK_ERROR)
usb_serial_handle_break(port);
if (delta & UART_STATE_MSR_MASK) {
if (delta & UART_CTS)
port->icount.cts++;
if (delta & UART_DSR)
port->icount.dsr++;
if (delta & UART_RING)
port->icount.rng++;
if (delta & UART_DCD) {
port->icount.dcd++;
tty = tty_port_tty_get(&port->port);
if (tty) {
usb_serial_handle_dcd_change(port, tty,
status & UART_DCD);
tty_kref_put(tty);
}
}
wake_up_interruptible(&port->port.delta_msr_wait);
}
}
static void pl2303_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
unsigned int actual_length = urb->actual_length;
int status = urb->status;
int retval;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&port->dev, "%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_dbg(&port->dev, "%s - nonzero urb status received: %d\n",
__func__, status);
goto exit;
}
usb_serial_debug_data(&port->dev, __func__,
urb->actual_length, urb->transfer_buffer);
pl2303_update_line_status(port, data, actual_length);
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval) {
dev_err(&port->dev,
"%s - usb_submit_urb failed with result %d\n",
__func__, retval);
}
}
static void pl2303_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct pl2303_private *priv = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
char tty_flag = TTY_NORMAL;
unsigned long flags;
u8 line_status;
int i;
/* update line status */
spin_lock_irqsave(&priv->lock, flags);
line_status = priv->line_status;
priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
spin_unlock_irqrestore(&priv->lock, flags);
if (!urb->actual_length)
return;
/*
* Break takes precedence over parity, which takes precedence over
* framing errors.
*/
if (line_status & UART_BREAK_ERROR)
tty_flag = TTY_BREAK;
else if (line_status & UART_PARITY_ERROR)
tty_flag = TTY_PARITY;
else if (line_status & UART_FRAME_ERROR)
tty_flag = TTY_FRAME;
if (tty_flag != TTY_NORMAL)
dev_dbg(&port->dev, "%s - tty_flag = %d\n", __func__,
tty_flag);
/* overrun is special, not associated with a char */
if (line_status & UART_OVERRUN_ERROR)
tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
if (port->sysrq) {
for (i = 0; i < urb->actual_length; ++i)
if (!usb_serial_handle_sysrq_char(port, data[i]))
tty_insert_flip_char(&port->port, data[i],
tty_flag);
} else {
tty_insert_flip_string_fixed_flag(&port->port, data, tty_flag,
urb->actual_length);
}
tty_flip_buffer_push(&port->port);
}
static struct usb_serial_driver pl2303_device = {
.driver = {
.owner = THIS_MODULE,
.name = "pl2303",
},
.id_table = id_table,
.num_bulk_in = 1,
.num_bulk_out = 1,
.num_interrupt_in = 0, /* see pl2303_calc_num_ports */
.bulk_in_size = 256,
.bulk_out_size = 256,
.open = pl2303_open,
.close = pl2303_close,
.dtr_rts = pl2303_dtr_rts,
.carrier_raised = pl2303_carrier_raised,
.break_ctl = pl2303_break_ctl,
.set_termios = pl2303_set_termios,
.tiocmget = pl2303_tiocmget,
.tiocmset = pl2303_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.process_read_urb = pl2303_process_read_urb,
.read_int_callback = pl2303_read_int_callback,
.probe = pl2303_probe,
.calc_num_ports = pl2303_calc_num_ports,
.attach = pl2303_startup,
.release = pl2303_release,
.port_probe = pl2303_port_probe,
.port_remove = pl2303_port_remove,
};
static struct usb_serial_driver * const serial_drivers[] = {
&pl2303_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION("Prolific PL2303 USB to serial adaptor driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/pl2303.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* USB ConnectTech WhiteHEAT driver
*
* Copyright (C) 2002
* Connect Tech Inc.
*
* Copyright (C) 1999 - 2001
* Greg Kroah-Hartman ([email protected])
*
* See Documentation/usb/usb-serial.rst for more information on using this
* driver
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <asm/termbits.h>
#include <linux/usb.h>
#include <linux/serial_reg.h>
#include <linux/serial.h>
#include <linux/usb/serial.h>
#include <linux/usb/ezusb.h>
#include "whiteheat.h" /* WhiteHEAT specific commands */
/*
* Version Information
*/
#define DRIVER_AUTHOR "Greg Kroah-Hartman <[email protected]>, Stuart MacDonald <[email protected]>"
#define DRIVER_DESC "USB ConnectTech WhiteHEAT driver"
#define CONNECT_TECH_VENDOR_ID 0x0710
#define CONNECT_TECH_FAKE_WHITE_HEAT_ID 0x0001
#define CONNECT_TECH_WHITE_HEAT_ID 0x8001
/*
ID tables for whiteheat are unusual, because we want to different
things for different versions of the device. Eventually, this
will be doable from a single table. But, for now, we define two
separate ID tables, and then a third table that combines them
just for the purpose of exporting the autoloading information.
*/
static const struct usb_device_id id_table_std[] = {
{ USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_WHITE_HEAT_ID) },
{ } /* Terminating entry */
};
static const struct usb_device_id id_table_prerenumeration[] = {
{ USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_FAKE_WHITE_HEAT_ID) },
{ } /* Terminating entry */
};
static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_WHITE_HEAT_ID) },
{ USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_FAKE_WHITE_HEAT_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table_combined);
/* function prototypes for the Connect Tech WhiteHEAT prerenumeration device */
static int whiteheat_firmware_download(struct usb_serial *serial,
const struct usb_device_id *id);
static int whiteheat_firmware_attach(struct usb_serial *serial);
/* function prototypes for the Connect Tech WhiteHEAT serial converter */
static int whiteheat_attach(struct usb_serial *serial);
static void whiteheat_release(struct usb_serial *serial);
static int whiteheat_port_probe(struct usb_serial_port *port);
static void whiteheat_port_remove(struct usb_serial_port *port);
static int whiteheat_open(struct tty_struct *tty,
struct usb_serial_port *port);
static void whiteheat_close(struct usb_serial_port *port);
static void whiteheat_get_serial(struct tty_struct *tty,
struct serial_struct *ss);
static void whiteheat_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios);
static int whiteheat_tiocmget(struct tty_struct *tty);
static int whiteheat_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
static int whiteheat_break_ctl(struct tty_struct *tty, int break_state);
static struct usb_serial_driver whiteheat_fake_device = {
.driver = {
.owner = THIS_MODULE,
.name = "whiteheatnofirm",
},
.description = "Connect Tech - WhiteHEAT - (prerenumeration)",
.id_table = id_table_prerenumeration,
.num_ports = 1,
.probe = whiteheat_firmware_download,
.attach = whiteheat_firmware_attach,
};
static struct usb_serial_driver whiteheat_device = {
.driver = {
.owner = THIS_MODULE,
.name = "whiteheat",
},
.description = "Connect Tech - WhiteHEAT",
.id_table = id_table_std,
.num_ports = 4,
.num_bulk_in = 5,
.num_bulk_out = 5,
.attach = whiteheat_attach,
.release = whiteheat_release,
.port_probe = whiteheat_port_probe,
.port_remove = whiteheat_port_remove,
.open = whiteheat_open,
.close = whiteheat_close,
.get_serial = whiteheat_get_serial,
.set_termios = whiteheat_set_termios,
.break_ctl = whiteheat_break_ctl,
.tiocmget = whiteheat_tiocmget,
.tiocmset = whiteheat_tiocmset,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
};
static struct usb_serial_driver * const serial_drivers[] = {
&whiteheat_fake_device, &whiteheat_device, NULL
};
struct whiteheat_command_private {
struct mutex mutex;
__u8 port_running;
__u8 command_finished;
wait_queue_head_t wait_command; /* for handling sleeping whilst
waiting for a command to
finish */
__u8 result_buffer[64];
};
struct whiteheat_private {
__u8 mcr; /* FIXME: no locking on mcr */
};
/* local function prototypes */
static int start_command_port(struct usb_serial *serial);
static void stop_command_port(struct usb_serial *serial);
static void command_port_write_callback(struct urb *urb);
static void command_port_read_callback(struct urb *urb);
static int firm_send_command(struct usb_serial_port *port, __u8 command,
__u8 *data, __u8 datasize);
static int firm_open(struct usb_serial_port *port);
static int firm_close(struct usb_serial_port *port);
static void firm_setup_port(struct tty_struct *tty);
static int firm_set_rts(struct usb_serial_port *port, __u8 onoff);
static int firm_set_dtr(struct usb_serial_port *port, __u8 onoff);
static int firm_set_break(struct usb_serial_port *port, __u8 onoff);
static int firm_purge(struct usb_serial_port *port, __u8 rxtx);
static int firm_get_dtr_rts(struct usb_serial_port *port);
static int firm_report_tx_done(struct usb_serial_port *port);
#define COMMAND_PORT 4
#define COMMAND_TIMEOUT (2*HZ) /* 2 second timeout for a command */
#define COMMAND_TIMEOUT_MS 2000
/*****************************************************************************
* Connect Tech's White Heat prerenumeration driver functions
*****************************************************************************/
/* steps to download the firmware to the WhiteHEAT device:
- hold the reset (by writing to the reset bit of the CPUCS register)
- download the VEND_AX.HEX file to the chip using VENDOR_REQUEST-ANCHOR_LOAD
- release the reset (by writing to the CPUCS register)
- download the WH.HEX file for all addresses greater than 0x1b3f using
VENDOR_REQUEST-ANCHOR_EXTERNAL_RAM_LOAD
- hold the reset
- download the WH.HEX file for all addresses less than 0x1b40 using
VENDOR_REQUEST_ANCHOR_LOAD
- release the reset
- device renumerated itself and comes up as new device id with all
firmware download completed.
*/
static int whiteheat_firmware_download(struct usb_serial *serial,
const struct usb_device_id *id)
{
int response;
response = ezusb_fx1_ihex_firmware_download(serial->dev, "whiteheat_loader.fw");
if (response >= 0) {
response = ezusb_fx1_ihex_firmware_download(serial->dev, "whiteheat.fw");
if (response >= 0)
return 0;
}
return -ENOENT;
}
static int whiteheat_firmware_attach(struct usb_serial *serial)
{
/* We want this device to fail to have a driver assigned to it */
return 1;
}
/*****************************************************************************
* Connect Tech's White Heat serial driver functions
*****************************************************************************/
static int whiteheat_attach(struct usb_serial *serial)
{
struct usb_serial_port *command_port;
struct whiteheat_command_private *command_info;
struct whiteheat_hw_info *hw_info;
int pipe;
int ret;
int alen;
__u8 *command;
__u8 *result;
command_port = serial->port[COMMAND_PORT];
pipe = usb_sndbulkpipe(serial->dev,
command_port->bulk_out_endpointAddress);
command = kmalloc(2, GFP_KERNEL);
if (!command)
goto no_command_buffer;
command[0] = WHITEHEAT_GET_HW_INFO;
command[1] = 0;
result = kmalloc(sizeof(*hw_info) + 1, GFP_KERNEL);
if (!result)
goto no_result_buffer;
/*
* When the module is reloaded the firmware is still there and
* the endpoints are still in the usb core unchanged. This is the
* unlinking bug in disguise. Same for the call below.
*/
usb_clear_halt(serial->dev, pipe);
ret = usb_bulk_msg(serial->dev, pipe, command, 2,
&alen, COMMAND_TIMEOUT_MS);
if (ret) {
dev_err(&serial->dev->dev, "%s: Couldn't send command [%d]\n",
serial->type->description, ret);
goto no_firmware;
} else if (alen != 2) {
dev_err(&serial->dev->dev, "%s: Send command incomplete [%d]\n",
serial->type->description, alen);
goto no_firmware;
}
pipe = usb_rcvbulkpipe(serial->dev,
command_port->bulk_in_endpointAddress);
/* See the comment on the usb_clear_halt() above */
usb_clear_halt(serial->dev, pipe);
ret = usb_bulk_msg(serial->dev, pipe, result,
sizeof(*hw_info) + 1, &alen, COMMAND_TIMEOUT_MS);
if (ret) {
dev_err(&serial->dev->dev, "%s: Couldn't get results [%d]\n",
serial->type->description, ret);
goto no_firmware;
} else if (alen != sizeof(*hw_info) + 1) {
dev_err(&serial->dev->dev, "%s: Get results incomplete [%d]\n",
serial->type->description, alen);
goto no_firmware;
} else if (result[0] != command[0]) {
dev_err(&serial->dev->dev, "%s: Command failed [%d]\n",
serial->type->description, result[0]);
goto no_firmware;
}
hw_info = (struct whiteheat_hw_info *)&result[1];
dev_info(&serial->dev->dev, "%s: Firmware v%d.%02d\n",
serial->type->description,
hw_info->sw_major_rev, hw_info->sw_minor_rev);
command_info = kmalloc(sizeof(struct whiteheat_command_private),
GFP_KERNEL);
if (!command_info)
goto no_command_private;
mutex_init(&command_info->mutex);
command_info->port_running = 0;
init_waitqueue_head(&command_info->wait_command);
usb_set_serial_port_data(command_port, command_info);
command_port->write_urb->complete = command_port_write_callback;
command_port->read_urb->complete = command_port_read_callback;
kfree(result);
kfree(command);
return 0;
no_firmware:
/* Firmware likely not running */
dev_err(&serial->dev->dev,
"%s: Unable to retrieve firmware version, try replugging\n",
serial->type->description);
dev_err(&serial->dev->dev,
"%s: If the firmware is not running (status led not blinking)\n",
serial->type->description);
dev_err(&serial->dev->dev,
"%s: please contact [email protected]\n",
serial->type->description);
kfree(result);
kfree(command);
return -ENODEV;
no_command_private:
kfree(result);
no_result_buffer:
kfree(command);
no_command_buffer:
return -ENOMEM;
}
static void whiteheat_release(struct usb_serial *serial)
{
struct usb_serial_port *command_port;
/* free up our private data for our command port */
command_port = serial->port[COMMAND_PORT];
kfree(usb_get_serial_port_data(command_port));
}
static int whiteheat_port_probe(struct usb_serial_port *port)
{
struct whiteheat_private *info;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
usb_set_serial_port_data(port, info);
return 0;
}
static void whiteheat_port_remove(struct usb_serial_port *port)
{
struct whiteheat_private *info;
info = usb_get_serial_port_data(port);
kfree(info);
}
static int whiteheat_open(struct tty_struct *tty, struct usb_serial_port *port)
{
int retval;
retval = start_command_port(port->serial);
if (retval)
goto exit;
/* send an open port command */
retval = firm_open(port);
if (retval) {
stop_command_port(port->serial);
goto exit;
}
retval = firm_purge(port, WHITEHEAT_PURGE_RX | WHITEHEAT_PURGE_TX);
if (retval) {
firm_close(port);
stop_command_port(port->serial);
goto exit;
}
if (tty)
firm_setup_port(tty);
/* Work around HCD bugs */
usb_clear_halt(port->serial->dev, port->read_urb->pipe);
usb_clear_halt(port->serial->dev, port->write_urb->pipe);
retval = usb_serial_generic_open(tty, port);
if (retval) {
firm_close(port);
stop_command_port(port->serial);
goto exit;
}
exit:
return retval;
}
static void whiteheat_close(struct usb_serial_port *port)
{
firm_report_tx_done(port);
firm_close(port);
usb_serial_generic_close(port);
stop_command_port(port->serial);
}
static int whiteheat_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct whiteheat_private *info = usb_get_serial_port_data(port);
unsigned int modem_signals = 0;
firm_get_dtr_rts(port);
if (info->mcr & UART_MCR_DTR)
modem_signals |= TIOCM_DTR;
if (info->mcr & UART_MCR_RTS)
modem_signals |= TIOCM_RTS;
return modem_signals;
}
static int whiteheat_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct whiteheat_private *info = usb_get_serial_port_data(port);
if (set & TIOCM_RTS)
info->mcr |= UART_MCR_RTS;
if (set & TIOCM_DTR)
info->mcr |= UART_MCR_DTR;
if (clear & TIOCM_RTS)
info->mcr &= ~UART_MCR_RTS;
if (clear & TIOCM_DTR)
info->mcr &= ~UART_MCR_DTR;
firm_set_dtr(port, info->mcr & UART_MCR_DTR);
firm_set_rts(port, info->mcr & UART_MCR_RTS);
return 0;
}
static void whiteheat_get_serial(struct tty_struct *tty, struct serial_struct *ss)
{
ss->baud_base = 460800;
}
static void whiteheat_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
firm_setup_port(tty);
}
static int whiteheat_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
return firm_set_break(port, break_state);
}
/*****************************************************************************
* Connect Tech's White Heat callback routines
*****************************************************************************/
static void command_port_write_callback(struct urb *urb)
{
int status = urb->status;
if (status) {
dev_dbg(&urb->dev->dev, "nonzero urb status: %d\n", status);
return;
}
}
static void command_port_read_callback(struct urb *urb)
{
struct usb_serial_port *command_port = urb->context;
struct whiteheat_command_private *command_info;
int status = urb->status;
unsigned char *data = urb->transfer_buffer;
int result;
command_info = usb_get_serial_port_data(command_port);
if (!command_info) {
dev_dbg(&urb->dev->dev, "%s - command_info is NULL, exiting.\n", __func__);
return;
}
if (!urb->actual_length) {
dev_dbg(&urb->dev->dev, "%s - empty response, exiting.\n", __func__);
return;
}
if (status) {
dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n", __func__, status);
if (status != -ENOENT)
command_info->command_finished = WHITEHEAT_CMD_FAILURE;
wake_up(&command_info->wait_command);
return;
}
usb_serial_debug_data(&command_port->dev, __func__, urb->actual_length, data);
if (data[0] == WHITEHEAT_CMD_COMPLETE) {
command_info->command_finished = WHITEHEAT_CMD_COMPLETE;
wake_up(&command_info->wait_command);
} else if (data[0] == WHITEHEAT_CMD_FAILURE) {
command_info->command_finished = WHITEHEAT_CMD_FAILURE;
wake_up(&command_info->wait_command);
} else if (data[0] == WHITEHEAT_EVENT) {
/* These are unsolicited reports from the firmware, hence no
waiting command to wakeup */
dev_dbg(&urb->dev->dev, "%s - event received\n", __func__);
} else if ((data[0] == WHITEHEAT_GET_DTR_RTS) &&
(urb->actual_length - 1 <= sizeof(command_info->result_buffer))) {
memcpy(command_info->result_buffer, &data[1],
urb->actual_length - 1);
command_info->command_finished = WHITEHEAT_CMD_COMPLETE;
wake_up(&command_info->wait_command);
} else
dev_dbg(&urb->dev->dev, "%s - bad reply from firmware\n", __func__);
/* Continue trying to always read */
result = usb_submit_urb(command_port->read_urb, GFP_ATOMIC);
if (result)
dev_dbg(&urb->dev->dev, "%s - failed resubmitting read urb, error %d\n",
__func__, result);
}
/*****************************************************************************
* Connect Tech's White Heat firmware interface
*****************************************************************************/
static int firm_send_command(struct usb_serial_port *port, __u8 command,
__u8 *data, __u8 datasize)
{
struct usb_serial_port *command_port;
struct whiteheat_command_private *command_info;
struct whiteheat_private *info;
struct device *dev = &port->dev;
__u8 *transfer_buffer;
int retval = 0;
int t;
dev_dbg(dev, "%s - command %d\n", __func__, command);
command_port = port->serial->port[COMMAND_PORT];
command_info = usb_get_serial_port_data(command_port);
if (command_port->bulk_out_size < datasize + 1)
return -EIO;
mutex_lock(&command_info->mutex);
command_info->command_finished = false;
transfer_buffer = (__u8 *)command_port->write_urb->transfer_buffer;
transfer_buffer[0] = command;
memcpy(&transfer_buffer[1], data, datasize);
command_port->write_urb->transfer_buffer_length = datasize + 1;
retval = usb_submit_urb(command_port->write_urb, GFP_NOIO);
if (retval) {
dev_dbg(dev, "%s - submit urb failed\n", __func__);
goto exit;
}
/* wait for the command to complete */
t = wait_event_timeout(command_info->wait_command,
(bool)command_info->command_finished, COMMAND_TIMEOUT);
if (!t)
usb_kill_urb(command_port->write_urb);
if (command_info->command_finished == false) {
dev_dbg(dev, "%s - command timed out.\n", __func__);
retval = -ETIMEDOUT;
goto exit;
}
if (command_info->command_finished == WHITEHEAT_CMD_FAILURE) {
dev_dbg(dev, "%s - command failed.\n", __func__);
retval = -EIO;
goto exit;
}
if (command_info->command_finished == WHITEHEAT_CMD_COMPLETE) {
dev_dbg(dev, "%s - command completed.\n", __func__);
switch (command) {
case WHITEHEAT_GET_DTR_RTS:
info = usb_get_serial_port_data(port);
info->mcr = command_info->result_buffer[0];
break;
}
}
exit:
mutex_unlock(&command_info->mutex);
return retval;
}
static int firm_open(struct usb_serial_port *port)
{
struct whiteheat_simple open_command;
open_command.port = port->port_number + 1;
return firm_send_command(port, WHITEHEAT_OPEN,
(__u8 *)&open_command, sizeof(open_command));
}
static int firm_close(struct usb_serial_port *port)
{
struct whiteheat_simple close_command;
close_command.port = port->port_number + 1;
return firm_send_command(port, WHITEHEAT_CLOSE,
(__u8 *)&close_command, sizeof(close_command));
}
static void firm_setup_port(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct device *dev = &port->dev;
struct whiteheat_port_settings port_settings;
unsigned int cflag = tty->termios.c_cflag;
speed_t baud;
port_settings.port = port->port_number + 1;
port_settings.bits = tty_get_char_size(cflag);
dev_dbg(dev, "%s - data bits = %d\n", __func__, port_settings.bits);
/* determine the parity */
if (cflag & PARENB)
if (cflag & CMSPAR)
if (cflag & PARODD)
port_settings.parity = WHITEHEAT_PAR_MARK;
else
port_settings.parity = WHITEHEAT_PAR_SPACE;
else
if (cflag & PARODD)
port_settings.parity = WHITEHEAT_PAR_ODD;
else
port_settings.parity = WHITEHEAT_PAR_EVEN;
else
port_settings.parity = WHITEHEAT_PAR_NONE;
dev_dbg(dev, "%s - parity = %c\n", __func__, port_settings.parity);
/* figure out the stop bits requested */
if (cflag & CSTOPB)
port_settings.stop = 2;
else
port_settings.stop = 1;
dev_dbg(dev, "%s - stop bits = %d\n", __func__, port_settings.stop);
/* figure out the flow control settings */
if (cflag & CRTSCTS)
port_settings.hflow = (WHITEHEAT_HFLOW_CTS |
WHITEHEAT_HFLOW_RTS);
else
port_settings.hflow = WHITEHEAT_HFLOW_NONE;
dev_dbg(dev, "%s - hardware flow control = %s %s %s %s\n", __func__,
(port_settings.hflow & WHITEHEAT_HFLOW_CTS) ? "CTS" : "",
(port_settings.hflow & WHITEHEAT_HFLOW_RTS) ? "RTS" : "",
(port_settings.hflow & WHITEHEAT_HFLOW_DSR) ? "DSR" : "",
(port_settings.hflow & WHITEHEAT_HFLOW_DTR) ? "DTR" : "");
/* determine software flow control */
if (I_IXOFF(tty))
port_settings.sflow = WHITEHEAT_SFLOW_RXTX;
else
port_settings.sflow = WHITEHEAT_SFLOW_NONE;
dev_dbg(dev, "%s - software flow control = %c\n", __func__, port_settings.sflow);
port_settings.xon = START_CHAR(tty);
port_settings.xoff = STOP_CHAR(tty);
dev_dbg(dev, "%s - XON = %2x, XOFF = %2x\n", __func__, port_settings.xon, port_settings.xoff);
/* get the baud rate wanted */
baud = tty_get_baud_rate(tty);
port_settings.baud = cpu_to_le32(baud);
dev_dbg(dev, "%s - baud rate = %u\n", __func__, baud);
/* fixme: should set validated settings */
tty_encode_baud_rate(tty, baud, baud);
/* handle any settings that aren't specified in the tty structure */
port_settings.lloop = 0;
/* now send the message to the device */
firm_send_command(port, WHITEHEAT_SETUP_PORT,
(__u8 *)&port_settings, sizeof(port_settings));
}
static int firm_set_rts(struct usb_serial_port *port, __u8 onoff)
{
struct whiteheat_set_rdb rts_command;
rts_command.port = port->port_number + 1;
rts_command.state = onoff;
return firm_send_command(port, WHITEHEAT_SET_RTS,
(__u8 *)&rts_command, sizeof(rts_command));
}
static int firm_set_dtr(struct usb_serial_port *port, __u8 onoff)
{
struct whiteheat_set_rdb dtr_command;
dtr_command.port = port->port_number + 1;
dtr_command.state = onoff;
return firm_send_command(port, WHITEHEAT_SET_DTR,
(__u8 *)&dtr_command, sizeof(dtr_command));
}
static int firm_set_break(struct usb_serial_port *port, __u8 onoff)
{
struct whiteheat_set_rdb break_command;
break_command.port = port->port_number + 1;
break_command.state = onoff;
return firm_send_command(port, WHITEHEAT_SET_BREAK,
(__u8 *)&break_command, sizeof(break_command));
}
static int firm_purge(struct usb_serial_port *port, __u8 rxtx)
{
struct whiteheat_purge purge_command;
purge_command.port = port->port_number + 1;
purge_command.what = rxtx;
return firm_send_command(port, WHITEHEAT_PURGE,
(__u8 *)&purge_command, sizeof(purge_command));
}
static int firm_get_dtr_rts(struct usb_serial_port *port)
{
struct whiteheat_simple get_dr_command;
get_dr_command.port = port->port_number + 1;
return firm_send_command(port, WHITEHEAT_GET_DTR_RTS,
(__u8 *)&get_dr_command, sizeof(get_dr_command));
}
static int firm_report_tx_done(struct usb_serial_port *port)
{
struct whiteheat_simple close_command;
close_command.port = port->port_number + 1;
return firm_send_command(port, WHITEHEAT_REPORT_TX_DONE,
(__u8 *)&close_command, sizeof(close_command));
}
/*****************************************************************************
* Connect Tech's White Heat utility functions
*****************************************************************************/
static int start_command_port(struct usb_serial *serial)
{
struct usb_serial_port *command_port;
struct whiteheat_command_private *command_info;
int retval = 0;
command_port = serial->port[COMMAND_PORT];
command_info = usb_get_serial_port_data(command_port);
mutex_lock(&command_info->mutex);
if (!command_info->port_running) {
/* Work around HCD bugs */
usb_clear_halt(serial->dev, command_port->read_urb->pipe);
retval = usb_submit_urb(command_port->read_urb, GFP_KERNEL);
if (retval) {
dev_err(&serial->dev->dev,
"%s - failed submitting read urb, error %d\n",
__func__, retval);
goto exit;
}
}
command_info->port_running++;
exit:
mutex_unlock(&command_info->mutex);
return retval;
}
static void stop_command_port(struct usb_serial *serial)
{
struct usb_serial_port *command_port;
struct whiteheat_command_private *command_info;
command_port = serial->port[COMMAND_PORT];
command_info = usb_get_serial_port_data(command_port);
mutex_lock(&command_info->mutex);
command_info->port_running--;
if (!command_info->port_running)
usb_kill_urb(command_port->read_urb);
mutex_unlock(&command_info->mutex);
}
module_usb_serial_driver(serial_drivers, id_table_combined);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("whiteheat.fw");
MODULE_FIRMWARE("whiteheat_loader.fw");
| linux-master | drivers/usb/serial/whiteheat.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* USB Keyspan PDA / Xircom / Entrega Converter driver
*
* Copyright (C) 1999 - 2001 Greg Kroah-Hartman <[email protected]>
* Copyright (C) 1999, 2000 Brian Warner <[email protected]>
* Copyright (C) 2000 Al Borchers <[email protected]>
* Copyright (C) 2020 Johan Hovold <[email protected]>
*
* See Documentation/usb/usb-serial.rst for more information on using this
* driver
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/usb/ezusb.h>
#define DRIVER_AUTHOR "Brian Warner <[email protected]>, Johan Hovold <[email protected]>"
#define DRIVER_DESC "USB Keyspan PDA Converter driver"
#define KEYSPAN_TX_THRESHOLD 128
struct keyspan_pda_private {
int tx_room;
struct work_struct unthrottle_work;
struct usb_serial *serial;
struct usb_serial_port *port;
};
static int keyspan_pda_write_start(struct usb_serial_port *port);
#define KEYSPAN_VENDOR_ID 0x06cd
#define KEYSPAN_PDA_FAKE_ID 0x0103
#define KEYSPAN_PDA_ID 0x0104 /* no clue */
/* For Xircom PGSDB9 and older Entrega version of the same device */
#define XIRCOM_VENDOR_ID 0x085a
#define XIRCOM_FAKE_ID 0x8027
#define XIRCOM_FAKE_ID_2 0x8025 /* "PGMFHUB" serial */
#define ENTREGA_VENDOR_ID 0x1645
#define ENTREGA_FAKE_ID 0x8093
static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_FAKE_ID) },
{ USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
{ USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) },
{ USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table_combined);
static const struct usb_device_id id_table_std[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) },
{ } /* Terminating entry */
};
static const struct usb_device_id id_table_fake[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_FAKE_ID) },
{ USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
{ USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) },
{ USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) },
{ } /* Terminating entry */
};
static int keyspan_pda_get_write_room(struct keyspan_pda_private *priv)
{
struct usb_serial_port *port = priv->port;
struct usb_serial *serial = port->serial;
u8 room;
int rc;
rc = usb_control_msg_recv(serial->dev,
0,
6, /* write_room */
USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_IN,
0, /* value: 0 means "remaining room" */
0, /* index */
&room,
1,
2000,
GFP_KERNEL);
if (rc) {
dev_dbg(&port->dev, "roomquery failed: %d\n", rc);
return rc;
}
dev_dbg(&port->dev, "roomquery says %d\n", room);
return room;
}
static void keyspan_pda_request_unthrottle(struct work_struct *work)
{
struct keyspan_pda_private *priv =
container_of(work, struct keyspan_pda_private, unthrottle_work);
struct usb_serial_port *port = priv->port;
struct usb_serial *serial = port->serial;
unsigned long flags;
int result;
dev_dbg(&port->dev, "%s\n", __func__);
/*
* Ask the device to tell us when the tx buffer becomes
* sufficiently empty.
*/
result = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0),
7, /* request_unthrottle */
USB_TYPE_VENDOR | USB_RECIP_INTERFACE
| USB_DIR_OUT,
KEYSPAN_TX_THRESHOLD,
0, /* index */
NULL,
0,
2000);
if (result < 0)
dev_dbg(&serial->dev->dev, "%s - error %d from usb_control_msg\n",
__func__, result);
/*
* Need to check available space after requesting notification in case
* buffer is already empty so that no notification is sent.
*/
result = keyspan_pda_get_write_room(priv);
if (result > KEYSPAN_TX_THRESHOLD) {
spin_lock_irqsave(&port->lock, flags);
priv->tx_room = max(priv->tx_room, result);
spin_unlock_irqrestore(&port->lock, flags);
usb_serial_port_softint(port);
}
}
static void keyspan_pda_rx_interrupt(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
unsigned int len = urb->actual_length;
int retval;
int status = urb->status;
struct keyspan_pda_private *priv;
unsigned long flags;
priv = usb_get_serial_port_data(port);
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n", __func__, status);
return;
default:
dev_dbg(&urb->dev->dev, "%s - nonzero urb status received: %d\n", __func__, status);
goto exit;
}
if (len < 1) {
dev_warn(&port->dev, "short message received\n");
goto exit;
}
/* see if the message is data or a status interrupt */
switch (data[0]) {
case 0:
/* rest of message is rx data */
if (len < 2)
break;
tty_insert_flip_string(&port->port, data + 1, len - 1);
tty_flip_buffer_push(&port->port);
break;
case 1:
/* status interrupt */
if (len < 2) {
dev_warn(&port->dev, "short interrupt message received\n");
break;
}
dev_dbg(&port->dev, "rx int, d1=%d\n", data[1]);
switch (data[1]) {
case 1: /* modemline change */
break;
case 2: /* tx unthrottle interrupt */
spin_lock_irqsave(&port->lock, flags);
priv->tx_room = max(priv->tx_room, KEYSPAN_TX_THRESHOLD);
spin_unlock_irqrestore(&port->lock, flags);
keyspan_pda_write_start(port);
usb_serial_port_softint(port);
break;
default:
break;
}
break;
default:
break;
}
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&port->dev,
"%s - usb_submit_urb failed with result %d\n",
__func__, retval);
}
static void keyspan_pda_rx_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
/*
* Stop receiving characters. We just turn off the URB request, and
* let chars pile up in the device. If we're doing hardware
* flowcontrol, the device will signal the other end when its buffer
* fills up. If we're doing XON/XOFF, this would be a good time to
* send an XOFF, although it might make sense to foist that off upon
* the device too.
*/
usb_kill_urb(port->interrupt_in_urb);
}
static void keyspan_pda_rx_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
/* just restart the receive interrupt URB */
if (usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL))
dev_dbg(&port->dev, "usb_submit_urb(read urb) failed\n");
}
static speed_t keyspan_pda_setbaud(struct usb_serial *serial, speed_t baud)
{
int rc;
int bindex;
switch (baud) {
case 110:
bindex = 0;
break;
case 300:
bindex = 1;
break;
case 1200:
bindex = 2;
break;
case 2400:
bindex = 3;
break;
case 4800:
bindex = 4;
break;
case 9600:
bindex = 5;
break;
case 19200:
bindex = 6;
break;
case 38400:
bindex = 7;
break;
case 57600:
bindex = 8;
break;
case 115200:
bindex = 9;
break;
default:
bindex = 5; /* Default to 9600 */
baud = 9600;
}
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
0, /* set baud */
USB_TYPE_VENDOR
| USB_RECIP_INTERFACE
| USB_DIR_OUT, /* type */
bindex, /* value */
0, /* index */
NULL, /* &data */
0, /* size */
2000); /* timeout */
if (rc < 0)
return 0;
return baud;
}
static int keyspan_pda_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
int value;
int result;
if (break_state == -1)
value = 1; /* start break */
else
value = 0; /* clear break */
result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
4, /* set break */
USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
value, 0, NULL, 0, 2000);
if (result < 0) {
dev_dbg(&port->dev, "%s - error %d from usb_control_msg\n",
__func__, result);
return result;
}
return 0;
}
static void keyspan_pda_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
speed_t speed;
/*
* cflag specifies lots of stuff: number of stop bits, parity, number
* of data bits, baud. What can the device actually handle?:
* CSTOPB (1 stop bit or 2)
* PARENB (parity)
* CSIZE (5bit .. 8bit)
* There is minimal hw support for parity (a PSW bit seems to hold the
* parity of whatever is in the accumulator). The UART either deals
* with 10 bits (start, 8 data, stop) or 11 bits (start, 8 data,
* 1 special, stop). So, with firmware changes, we could do:
* 8N1: 10 bit
* 8N2: 11 bit, extra bit always (mark?)
* 8[EOMS]1: 11 bit, extra bit is parity
* 7[EOMS]1: 10 bit, b0/b7 is parity
* 7[EOMS]2: 11 bit, b0/b7 is parity, extra bit always (mark?)
*
* HW flow control is dictated by the tty->termios.c_cflags & CRTSCTS
* bit.
*
* For now, just do baud.
*/
speed = tty_get_baud_rate(tty);
speed = keyspan_pda_setbaud(serial, speed);
if (speed == 0) {
dev_dbg(&port->dev, "can't handle requested baud rate\n");
/* It hasn't changed so.. */
speed = tty_termios_baud_rate(old_termios);
}
/*
* Only speed can change so copy the old h/w parameters then encode
* the new speed.
*/
tty_termios_copy_hw(&tty->termios, old_termios);
tty_encode_baud_rate(tty, speed, speed);
}
/*
* Modem control pins: DTR and RTS are outputs and can be controlled.
* DCD, RI, DSR, CTS are inputs and can be read. All outputs can also be
* read. The byte passed is: DTR(b7) DCD RI DSR CTS RTS(b2) unused unused.
*/
static int keyspan_pda_get_modem_info(struct usb_serial *serial,
unsigned char *value)
{
int rc;
u8 data;
rc = usb_control_msg_recv(serial->dev, 0,
3, /* get pins */
USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_IN,
0,
0,
&data,
1,
2000,
GFP_KERNEL);
if (rc == 0)
*value = data;
return rc;
}
static int keyspan_pda_set_modem_info(struct usb_serial *serial,
unsigned char value)
{
int rc;
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
3, /* set pins */
USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_OUT,
value, 0, NULL, 0, 2000);
return rc;
}
static int keyspan_pda_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
int rc;
unsigned char status;
int value;
rc = keyspan_pda_get_modem_info(serial, &status);
if (rc < 0)
return rc;
value = ((status & BIT(7)) ? TIOCM_DTR : 0) |
((status & BIT(6)) ? TIOCM_CAR : 0) |
((status & BIT(5)) ? TIOCM_RNG : 0) |
((status & BIT(4)) ? TIOCM_DSR : 0) |
((status & BIT(3)) ? TIOCM_CTS : 0) |
((status & BIT(2)) ? TIOCM_RTS : 0);
return value;
}
static int keyspan_pda_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
int rc;
unsigned char status;
rc = keyspan_pda_get_modem_info(serial, &status);
if (rc < 0)
return rc;
if (set & TIOCM_RTS)
status |= BIT(2);
if (set & TIOCM_DTR)
status |= BIT(7);
if (clear & TIOCM_RTS)
status &= ~BIT(2);
if (clear & TIOCM_DTR)
status &= ~BIT(7);
rc = keyspan_pda_set_modem_info(serial, status);
return rc;
}
static int keyspan_pda_write_start(struct usb_serial_port *port)
{
struct keyspan_pda_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
struct urb *urb;
int count;
int room;
int rc;
/*
* Guess how much room is left in the device's ring buffer. If our
* write will result in no room left, ask the device to give us an
* interrupt when the room available rises above a threshold but also
* query how much room is currently available (in case our guess was
* too conservative and the buffer is already empty when the
* unthrottle work is scheduled).
*/
/*
* We might block because of:
* the TX urb is in-flight (wait until it completes)
* the device is full (wait until it says there is room)
*/
spin_lock_irqsave(&port->lock, flags);
room = priv->tx_room;
count = kfifo_len(&port->write_fifo);
if (!test_bit(0, &port->write_urbs_free) || count == 0 || room == 0) {
spin_unlock_irqrestore(&port->lock, flags);
return 0;
}
__clear_bit(0, &port->write_urbs_free);
if (count > room)
count = room;
if (count > port->bulk_out_size)
count = port->bulk_out_size;
urb = port->write_urb;
count = kfifo_out(&port->write_fifo, urb->transfer_buffer, count);
urb->transfer_buffer_length = count;
port->tx_bytes += count;
priv->tx_room -= count;
spin_unlock_irqrestore(&port->lock, flags);
dev_dbg(&port->dev, "%s - count = %d, txroom = %d\n", __func__, count, room);
rc = usb_submit_urb(urb, GFP_ATOMIC);
if (rc) {
dev_dbg(&port->dev, "usb_submit_urb(write bulk) failed\n");
spin_lock_irqsave(&port->lock, flags);
port->tx_bytes -= count;
priv->tx_room = max(priv->tx_room, room + count);
__set_bit(0, &port->write_urbs_free);
spin_unlock_irqrestore(&port->lock, flags);
return rc;
}
if (count == room)
schedule_work(&priv->unthrottle_work);
return count;
}
static void keyspan_pda_write_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
port->tx_bytes -= urb->transfer_buffer_length;
__set_bit(0, &port->write_urbs_free);
spin_unlock_irqrestore(&port->lock, flags);
keyspan_pda_write_start(port);
usb_serial_port_softint(port);
}
static int keyspan_pda_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
int rc;
dev_dbg(&port->dev, "%s - count = %d\n", __func__, count);
if (!count)
return 0;
count = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock);
rc = keyspan_pda_write_start(port);
if (rc)
return rc;
return count;
}
static void keyspan_pda_dtr_rts(struct usb_serial_port *port, int on)
{
struct usb_serial *serial = port->serial;
if (on)
keyspan_pda_set_modem_info(serial, BIT(7) | BIT(2));
else
keyspan_pda_set_modem_info(serial, 0);
}
static int keyspan_pda_open(struct tty_struct *tty,
struct usb_serial_port *port)
{
struct keyspan_pda_private *priv = usb_get_serial_port_data(port);
int rc;
/* find out how much room is in the Tx ring */
rc = keyspan_pda_get_write_room(priv);
if (rc < 0)
return rc;
spin_lock_irq(&port->lock);
priv->tx_room = rc;
spin_unlock_irq(&port->lock);
rc = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (rc) {
dev_dbg(&port->dev, "%s - usb_submit_urb(read int) failed\n", __func__);
return rc;
}
return 0;
}
static void keyspan_pda_close(struct usb_serial_port *port)
{
struct keyspan_pda_private *priv = usb_get_serial_port_data(port);
/*
* Stop the interrupt URB first as its completion handler may submit
* the write URB.
*/
usb_kill_urb(port->interrupt_in_urb);
usb_kill_urb(port->write_urb);
cancel_work_sync(&priv->unthrottle_work);
spin_lock_irq(&port->lock);
kfifo_reset(&port->write_fifo);
spin_unlock_irq(&port->lock);
}
/* download the firmware to a "fake" device (pre-renumeration) */
static int keyspan_pda_fake_startup(struct usb_serial *serial)
{
unsigned int vid = le16_to_cpu(serial->dev->descriptor.idVendor);
const char *fw_name;
/* download the firmware here ... */
ezusb_fx1_set_reset(serial->dev, 1);
switch (vid) {
case KEYSPAN_VENDOR_ID:
fw_name = "keyspan_pda/keyspan_pda.fw";
break;
case XIRCOM_VENDOR_ID:
case ENTREGA_VENDOR_ID:
fw_name = "keyspan_pda/xircom_pgs.fw";
break;
default:
dev_err(&serial->dev->dev, "%s: unknown vendor, aborting.\n",
__func__);
return -ENODEV;
}
if (ezusb_fx1_ihex_firmware_download(serial->dev, fw_name) < 0) {
dev_err(&serial->dev->dev, "failed to load firmware \"%s\"\n",
fw_name);
return -ENOENT;
}
/*
* After downloading firmware renumeration will occur in a moment and
* the new device will bind to the real driver.
*/
/* We want this device to fail to have a driver assigned to it. */
return 1;
}
MODULE_FIRMWARE("keyspan_pda/keyspan_pda.fw");
MODULE_FIRMWARE("keyspan_pda/xircom_pgs.fw");
static int keyspan_pda_port_probe(struct usb_serial_port *port)
{
struct keyspan_pda_private *priv;
priv = kmalloc(sizeof(struct keyspan_pda_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
INIT_WORK(&priv->unthrottle_work, keyspan_pda_request_unthrottle);
priv->port = port;
usb_set_serial_port_data(port, priv);
return 0;
}
static void keyspan_pda_port_remove(struct usb_serial_port *port)
{
struct keyspan_pda_private *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
}
static struct usb_serial_driver keyspan_pda_fake_device = {
.driver = {
.owner = THIS_MODULE,
.name = "keyspan_pda_pre",
},
.description = "Keyspan PDA - (prerenumeration)",
.id_table = id_table_fake,
.num_ports = 1,
.attach = keyspan_pda_fake_startup,
};
static struct usb_serial_driver keyspan_pda_device = {
.driver = {
.owner = THIS_MODULE,
.name = "keyspan_pda",
},
.description = "Keyspan PDA",
.id_table = id_table_std,
.num_ports = 1,
.num_bulk_out = 1,
.num_interrupt_in = 1,
.dtr_rts = keyspan_pda_dtr_rts,
.open = keyspan_pda_open,
.close = keyspan_pda_close,
.write = keyspan_pda_write,
.write_bulk_callback = keyspan_pda_write_bulk_callback,
.read_int_callback = keyspan_pda_rx_interrupt,
.throttle = keyspan_pda_rx_throttle,
.unthrottle = keyspan_pda_rx_unthrottle,
.set_termios = keyspan_pda_set_termios,
.break_ctl = keyspan_pda_break_ctl,
.tiocmget = keyspan_pda_tiocmget,
.tiocmset = keyspan_pda_tiocmset,
.port_probe = keyspan_pda_port_probe,
.port_remove = keyspan_pda_port_remove,
};
static struct usb_serial_driver * const serial_drivers[] = {
&keyspan_pda_device,
&keyspan_pda_fake_device,
NULL
};
module_usb_serial_driver(serial_drivers, id_table_combined);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/serial/keyspan_pda.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2009 by Bart Hartgers ([email protected])
* Original version:
* Copyright (C) 2006
* Simon Schulz (ark3116_driver <at> auctionant.de)
*
* ark3116
* - implements a driver for the arkmicro ark3116 chipset (vendor=0x6547,
* productid=0x0232) (used in a datacable called KQ-U8A)
*
* Supports full modem status lines, break, hardware flow control. Does not
* support software flow control, since I do not know how to enable it in hw.
*
* This driver is a essentially new implementation. I initially dug
* into the old ark3116.c driver and suddenly realized the ark3116 is
* a 16450 with a USB interface glued to it. See comments at the
* bottom of this file.
*/
#include <linux/kernel.h>
#include <linux/ioctl.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/uaccess.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#define DRIVER_AUTHOR "Bart Hartgers <[email protected]>"
#define DRIVER_DESC "USB ARK3116 serial/IrDA driver"
#define DRIVER_DEV_DESC "ARK3116 RS232/IrDA"
#define DRIVER_NAME "ark3116"
/* usb timeout of 1 second */
#define ARK_TIMEOUT 1000
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x6547, 0x0232) },
{ USB_DEVICE(0x18ec, 0x3118) }, /* USB to IrDA adapter */
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
static int is_irda(struct usb_serial *serial)
{
struct usb_device *dev = serial->dev;
if (le16_to_cpu(dev->descriptor.idVendor) == 0x18ec &&
le16_to_cpu(dev->descriptor.idProduct) == 0x3118)
return 1;
return 0;
}
struct ark3116_private {
int irda; /* 1 for irda device */
/* protects hw register updates */
struct mutex hw_lock;
int quot; /* baudrate divisor */
__u32 lcr; /* line control register value */
__u32 hcr; /* handshake control register (0x8)
* value */
__u32 mcr; /* modem control register value */
/* protects the status values below */
spinlock_t status_lock;
__u32 msr; /* modem status register value */
__u32 lsr; /* line status register value */
};
static int ark3116_write_reg(struct usb_serial *serial,
unsigned reg, __u8 val)
{
int result;
/* 0xfe 0x40 are magic values taken from original driver */
result = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0),
0xfe, 0x40, val, reg,
NULL, 0, ARK_TIMEOUT);
if (result)
return result;
return 0;
}
static int ark3116_read_reg(struct usb_serial *serial,
unsigned reg, unsigned char *buf)
{
int result;
/* 0xfe 0xc0 are magic values taken from original driver */
result = usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
0xfe, 0xc0, 0, reg,
buf, 1, ARK_TIMEOUT);
if (result < 1) {
dev_err(&serial->interface->dev,
"failed to read register %u: %d\n",
reg, result);
if (result >= 0)
result = -EIO;
return result;
}
return 0;
}
static inline int calc_divisor(int bps)
{
/* Original ark3116 made some exceptions in rounding here
* because windows did the same. Assume that is not really
* necessary.
* Crystal is 12MHz, probably because of USB, but we divide by 4?
*/
return (12000000 + 2*bps) / (4*bps);
}
static int ark3116_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct ark3116_private *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
mutex_init(&priv->hw_lock);
spin_lock_init(&priv->status_lock);
priv->irda = is_irda(serial);
usb_set_serial_port_data(port, priv);
/* setup the hardware */
ark3116_write_reg(serial, UART_IER, 0);
/* disable DMA */
ark3116_write_reg(serial, UART_FCR, 0);
/* handshake control */
priv->hcr = 0;
ark3116_write_reg(serial, 0x8 , 0);
/* modem control */
priv->mcr = 0;
ark3116_write_reg(serial, UART_MCR, 0);
if (!(priv->irda)) {
ark3116_write_reg(serial, 0xb , 0);
} else {
ark3116_write_reg(serial, 0xb , 1);
ark3116_write_reg(serial, 0xc , 0);
ark3116_write_reg(serial, 0xd , 0x41);
ark3116_write_reg(serial, 0xa , 1);
}
/* setup baudrate */
ark3116_write_reg(serial, UART_LCR, UART_LCR_DLAB);
/* setup for 9600 8N1 */
priv->quot = calc_divisor(9600);
ark3116_write_reg(serial, UART_DLL, priv->quot & 0xff);
ark3116_write_reg(serial, UART_DLM, (priv->quot>>8) & 0xff);
priv->lcr = UART_LCR_WLEN8;
ark3116_write_reg(serial, UART_LCR, UART_LCR_WLEN8);
ark3116_write_reg(serial, 0xe, 0);
if (priv->irda)
ark3116_write_reg(serial, 0x9, 0);
dev_info(&port->dev, "using %s mode\n", priv->irda ? "IrDA" : "RS232");
return 0;
}
static void ark3116_port_remove(struct usb_serial_port *port)
{
struct ark3116_private *priv = usb_get_serial_port_data(port);
/* device is closed, so URBs and DMA should be down */
mutex_destroy(&priv->hw_lock);
kfree(priv);
}
static void ark3116_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
struct ark3116_private *priv = usb_get_serial_port_data(port);
struct ktermios *termios = &tty->termios;
unsigned int cflag = termios->c_cflag;
int bps = tty_get_baud_rate(tty);
int quot;
__u8 lcr, hcr, eval;
/* set data bit count */
lcr = UART_LCR_WLEN(tty_get_char_size(cflag));
if (cflag & CSTOPB)
lcr |= UART_LCR_STOP;
if (cflag & PARENB)
lcr |= UART_LCR_PARITY;
if (!(cflag & PARODD))
lcr |= UART_LCR_EPAR;
if (cflag & CMSPAR)
lcr |= UART_LCR_SPAR;
/* handshake control */
hcr = (cflag & CRTSCTS) ? 0x03 : 0x00;
/* calc baudrate */
dev_dbg(&port->dev, "%s - setting bps to %d\n", __func__, bps);
eval = 0;
switch (bps) {
case 0:
quot = calc_divisor(9600);
break;
default:
if ((bps < 75) || (bps > 3000000))
bps = 9600;
quot = calc_divisor(bps);
break;
case 460800:
eval = 1;
quot = calc_divisor(bps);
break;
case 921600:
eval = 2;
quot = calc_divisor(bps);
break;
}
/* Update state: synchronize */
mutex_lock(&priv->hw_lock);
/* keep old LCR_SBC bit */
lcr |= (priv->lcr & UART_LCR_SBC);
dev_dbg(&port->dev, "%s - setting hcr:0x%02x,lcr:0x%02x,quot:%d\n",
__func__, hcr, lcr, quot);
/* handshake control */
if (priv->hcr != hcr) {
priv->hcr = hcr;
ark3116_write_reg(serial, 0x8, hcr);
}
/* baudrate */
if (priv->quot != quot) {
priv->quot = quot;
priv->lcr = lcr; /* need to write lcr anyway */
/* disable DMA since transmit/receive is
* shadowed by UART_DLL
*/
ark3116_write_reg(serial, UART_FCR, 0);
ark3116_write_reg(serial, UART_LCR,
lcr|UART_LCR_DLAB);
ark3116_write_reg(serial, UART_DLL, quot & 0xff);
ark3116_write_reg(serial, UART_DLM, (quot>>8) & 0xff);
/* restore lcr */
ark3116_write_reg(serial, UART_LCR, lcr);
/* magic baudrate thingy: not sure what it does,
* but windows does this as well.
*/
ark3116_write_reg(serial, 0xe, eval);
/* enable DMA */
ark3116_write_reg(serial, UART_FCR, UART_FCR_DMA_SELECT);
} else if (priv->lcr != lcr) {
priv->lcr = lcr;
ark3116_write_reg(serial, UART_LCR, lcr);
}
mutex_unlock(&priv->hw_lock);
/* check for software flow control */
if (I_IXOFF(tty) || I_IXON(tty)) {
dev_warn(&port->dev,
"software flow control not implemented\n");
}
/* Don't rewrite B0 */
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, bps, bps);
}
static void ark3116_close(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
/* disable DMA */
ark3116_write_reg(serial, UART_FCR, 0);
/* deactivate interrupts */
ark3116_write_reg(serial, UART_IER, 0);
usb_serial_generic_close(port);
usb_kill_urb(port->interrupt_in_urb);
}
static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct ark3116_private *priv = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
unsigned char *buf;
int result;
buf = kmalloc(1, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
result = usb_serial_generic_open(tty, port);
if (result) {
dev_dbg(&port->dev,
"%s - usb_serial_generic_open failed: %d\n",
__func__, result);
goto err_free;
}
/* remove any data still left: also clears error state */
ark3116_read_reg(serial, UART_RX, buf);
/* read modem status */
result = ark3116_read_reg(serial, UART_MSR, buf);
if (result)
goto err_close;
priv->msr = *buf;
/* read line status */
result = ark3116_read_reg(serial, UART_LSR, buf);
if (result)
goto err_close;
priv->lsr = *buf;
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "submit irq_in urb failed %d\n",
result);
goto err_close;
}
/* activate interrupts */
ark3116_write_reg(port->serial, UART_IER, UART_IER_MSI|UART_IER_RLSI);
/* enable DMA */
ark3116_write_reg(port->serial, UART_FCR, UART_FCR_DMA_SELECT);
/* setup termios */
if (tty)
ark3116_set_termios(tty, port, NULL);
kfree(buf);
return 0;
err_close:
usb_serial_generic_close(port);
err_free:
kfree(buf);
return result;
}
static int ark3116_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct ark3116_private *priv = usb_get_serial_port_data(port);
__u32 status;
__u32 ctrl;
unsigned long flags;
mutex_lock(&priv->hw_lock);
ctrl = priv->mcr;
mutex_unlock(&priv->hw_lock);
spin_lock_irqsave(&priv->status_lock, flags);
status = priv->msr;
spin_unlock_irqrestore(&priv->status_lock, flags);
return (status & UART_MSR_DSR ? TIOCM_DSR : 0) |
(status & UART_MSR_CTS ? TIOCM_CTS : 0) |
(status & UART_MSR_RI ? TIOCM_RI : 0) |
(status & UART_MSR_DCD ? TIOCM_CD : 0) |
(ctrl & UART_MCR_DTR ? TIOCM_DTR : 0) |
(ctrl & UART_MCR_RTS ? TIOCM_RTS : 0) |
(ctrl & UART_MCR_OUT1 ? TIOCM_OUT1 : 0) |
(ctrl & UART_MCR_OUT2 ? TIOCM_OUT2 : 0);
}
static int ark3116_tiocmset(struct tty_struct *tty,
unsigned set, unsigned clr)
{
struct usb_serial_port *port = tty->driver_data;
struct ark3116_private *priv = usb_get_serial_port_data(port);
/* we need to take the mutex here, to make sure that the value
* in priv->mcr is actually the one that is in the hardware
*/
mutex_lock(&priv->hw_lock);
if (set & TIOCM_RTS)
priv->mcr |= UART_MCR_RTS;
if (set & TIOCM_DTR)
priv->mcr |= UART_MCR_DTR;
if (set & TIOCM_OUT1)
priv->mcr |= UART_MCR_OUT1;
if (set & TIOCM_OUT2)
priv->mcr |= UART_MCR_OUT2;
if (clr & TIOCM_RTS)
priv->mcr &= ~UART_MCR_RTS;
if (clr & TIOCM_DTR)
priv->mcr &= ~UART_MCR_DTR;
if (clr & TIOCM_OUT1)
priv->mcr &= ~UART_MCR_OUT1;
if (clr & TIOCM_OUT2)
priv->mcr &= ~UART_MCR_OUT2;
ark3116_write_reg(port->serial, UART_MCR, priv->mcr);
mutex_unlock(&priv->hw_lock);
return 0;
}
static int ark3116_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct ark3116_private *priv = usb_get_serial_port_data(port);
int ret;
/* LCR is also used for other things: protect access */
mutex_lock(&priv->hw_lock);
if (break_state)
priv->lcr |= UART_LCR_SBC;
else
priv->lcr &= ~UART_LCR_SBC;
ret = ark3116_write_reg(port->serial, UART_LCR, priv->lcr);
mutex_unlock(&priv->hw_lock);
return ret;
}
static void ark3116_update_msr(struct usb_serial_port *port, __u8 msr)
{
struct ark3116_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
spin_lock_irqsave(&priv->status_lock, flags);
priv->msr = msr;
spin_unlock_irqrestore(&priv->status_lock, flags);
if (msr & UART_MSR_ANY_DELTA) {
/* update input line counters */
if (msr & UART_MSR_DCTS)
port->icount.cts++;
if (msr & UART_MSR_DDSR)
port->icount.dsr++;
if (msr & UART_MSR_DDCD)
port->icount.dcd++;
if (msr & UART_MSR_TERI)
port->icount.rng++;
wake_up_interruptible(&port->port.delta_msr_wait);
}
}
static void ark3116_update_lsr(struct usb_serial_port *port, __u8 lsr)
{
struct ark3116_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
spin_lock_irqsave(&priv->status_lock, flags);
/* combine bits */
priv->lsr |= lsr;
spin_unlock_irqrestore(&priv->status_lock, flags);
if (lsr&UART_LSR_BRK_ERROR_BITS) {
if (lsr & UART_LSR_BI)
port->icount.brk++;
if (lsr & UART_LSR_FE)
port->icount.frame++;
if (lsr & UART_LSR_PE)
port->icount.parity++;
if (lsr & UART_LSR_OE)
port->icount.overrun++;
}
}
static void ark3116_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
int status = urb->status;
const __u8 *data = urb->transfer_buffer;
int result;
switch (status) {
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&port->dev, "%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_dbg(&port->dev, "%s - nonzero urb status received: %d\n",
__func__, status);
break;
case 0: /* success */
/* discovered this by trail and error... */
if ((urb->actual_length == 4) && (data[0] == 0xe8)) {
const __u8 id = data[1]&UART_IIR_ID;
dev_dbg(&port->dev, "%s: iir=%02x\n", __func__, data[1]);
if (id == UART_IIR_MSI) {
dev_dbg(&port->dev, "%s: msr=%02x\n",
__func__, data[3]);
ark3116_update_msr(port, data[3]);
break;
} else if (id == UART_IIR_RLSI) {
dev_dbg(&port->dev, "%s: lsr=%02x\n",
__func__, data[2]);
ark3116_update_lsr(port, data[2]);
break;
}
}
/*
* Not sure what this data meant...
*/
usb_serial_debug_data(&port->dev, __func__,
urb->actual_length,
urb->transfer_buffer);
break;
}
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
dev_err(&port->dev, "failed to resubmit interrupt urb: %d\n",
result);
}
/* Data comes in via the bulk (data) URB, errors/interrupts via the int URB.
* This means that we cannot be sure which data byte has an associated error
* condition, so we report an error for all data in the next bulk read.
*
* Actually, there might even be a window between the bulk data leaving the
* ark and reading/resetting the lsr in the read_bulk_callback where an
* interrupt for the next data block could come in.
* Without somekind of ordering on the ark, we would have to report the
* error for the next block of data as well...
* For now, let's pretend this can't happen.
*/
static void ark3116_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct ark3116_private *priv = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
char tty_flag = TTY_NORMAL;
unsigned long flags;
__u32 lsr;
/* update line status */
spin_lock_irqsave(&priv->status_lock, flags);
lsr = priv->lsr;
priv->lsr &= ~UART_LSR_BRK_ERROR_BITS;
spin_unlock_irqrestore(&priv->status_lock, flags);
if (!urb->actual_length)
return;
if (lsr & UART_LSR_BRK_ERROR_BITS) {
if (lsr & UART_LSR_BI)
tty_flag = TTY_BREAK;
else if (lsr & UART_LSR_PE)
tty_flag = TTY_PARITY;
else if (lsr & UART_LSR_FE)
tty_flag = TTY_FRAME;
/* overrun is special, not associated with a char */
if (lsr & UART_LSR_OE)
tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
}
tty_insert_flip_string_fixed_flag(&port->port, data, tty_flag,
urb->actual_length);
tty_flip_buffer_push(&port->port);
}
static struct usb_serial_driver ark3116_device = {
.driver = {
.owner = THIS_MODULE,
.name = "ark3116",
},
.id_table = id_table,
.num_ports = 1,
.num_bulk_in = 1,
.num_bulk_out = 1,
.num_interrupt_in = 1,
.port_probe = ark3116_port_probe,
.port_remove = ark3116_port_remove,
.set_termios = ark3116_set_termios,
.tiocmget = ark3116_tiocmget,
.tiocmset = ark3116_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
.open = ark3116_open,
.close = ark3116_close,
.break_ctl = ark3116_break_ctl,
.read_int_callback = ark3116_read_int_callback,
.process_read_urb = ark3116_process_read_urb,
};
static struct usb_serial_driver * const serial_drivers[] = {
&ark3116_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_LICENSE("GPL");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
/*
* The following describes what I learned from studying the old
* ark3116.c driver, disassembling the windows driver, and some lucky
* guesses. Since I do not have any datasheet or other
* documentation, inaccuracies are almost guaranteed.
*
* Some specs for the ARK3116 can be found here:
* http://web.archive.org/web/20060318000438/
* www.arkmicro.com/en/products/view.php?id=10
* On that page, 2 GPIO pins are mentioned: I assume these are the
* OUT1 and OUT2 pins of the UART, so I added support for those
* through the MCR. Since the pins are not available on my hardware,
* I could not verify this.
* Also, it states there is "on-chip hardware flow control". I have
* discovered how to enable that. Unfortunately, I do not know how to
* enable XON/XOFF (software) flow control, which would need support
* from the chip as well to work. Because of the wording on the web
* page there is a real possibility the chip simply does not support
* software flow control.
*
* I got my ark3116 as part of a mobile phone adapter cable. On the
* PCB, the following numbered contacts are present:
*
* 1:- +5V
* 2:o DTR
* 3:i RX
* 4:i DCD
* 5:o RTS
* 6:o TX
* 7:i RI
* 8:i DSR
* 10:- 0V
* 11:i CTS
*
* On my chip, all signals seem to be 3.3V, but 5V tolerant. But that
* may be different for the one you have ;-).
*
* The windows driver limits the registers to 0-F, so I assume there
* are actually 16 present on the device.
*
* On an UART interrupt, 4 bytes of data come in on the interrupt
* endpoint. The bytes are 0xe8 IIR LSR MSR.
*
* The baudrate seems to be generated from the 12MHz crystal, using
* 4-times subsampling. So quot=12e6/(4*baud). Also see description
* of register E.
*
* Registers 0-7:
* These seem to be the same as for a regular 16450. The FCR is set
* to UART_FCR_DMA_SELECT (0x8), I guess to enable transfers between
* the UART and the USB bridge/DMA engine.
*
* Register 8:
* By trial and error, I found out that bit 0 enables hardware CTS,
* stopping TX when CTS is +5V. Bit 1 does the same for RTS, making
* RTS +5V when the 3116 cannot transfer the data to the USB bus
* (verified by disabling the reading URB). Note that as far as I can
* tell, the windows driver does NOT use this, so there might be some
* hardware bug or something.
*
* According to a patch provided here
* https://lore.kernel.org/lkml/[email protected]
* the ARK3116 can also be used as an IrDA dongle. Since I do not have
* such a thing, I could not investigate that aspect. However, I can
* speculate ;-).
*
* - IrDA encodes data differently than RS232. Most likely, one of
* the bits in registers 9..E enables the IR ENDEC (encoder/decoder).
* - Depending on the IR transceiver, the input and output need to be
* inverted, so there are probably bits for that as well.
* - IrDA is half-duplex, so there should be a bit for selecting that.
*
* This still leaves at least two registers unaccounted for. Perhaps
* The chip can do XON/XOFF or CRC in HW?
*
* Register 9:
* Set to 0x00 for IrDA, when the baudrate is initialised.
*
* Register A:
* Set to 0x01 for IrDA, at init.
*
* Register B:
* Set to 0x01 for IrDA, 0x00 for RS232, at init.
*
* Register C:
* Set to 00 for IrDA, at init.
*
* Register D:
* Set to 0x41 for IrDA, at init.
*
* Register E:
* Somekind of baudrate override. The windows driver seems to set
* this to 0x00 for normal baudrates, 0x01 for 460800, 0x02 for 921600.
* Since 460800 and 921600 cannot be obtained by dividing 3MHz by an integer,
* it could be somekind of subdivisor thingy.
* However,it does not seem to do anything: selecting 921600 (divisor 3,
* reg E=2), still gets 1 MHz. I also checked if registers 9, C or F would
* work, but they don't.
*
* Register F: unknown
*/
| linux-master | drivers/usb/serial/ark3116.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Fintek F81232 USB to serial adaptor driver
* Fintek F81532A/534A/535/536 USB to 2/4/8/12 serial adaptor driver
*
* Copyright (C) 2012 Greg Kroah-Hartman ([email protected])
* Copyright (C) 2012 Linux Foundation
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/serial_reg.h>
#define F81232_ID \
{ USB_DEVICE(0x1934, 0x0706) } /* 1 port UART device */
#define F81534A_SERIES_ID \
{ USB_DEVICE(0x2c42, 0x1602) }, /* In-Box 2 port UART device */ \
{ USB_DEVICE(0x2c42, 0x1604) }, /* In-Box 4 port UART device */ \
{ USB_DEVICE(0x2c42, 0x1605) }, /* In-Box 8 port UART device */ \
{ USB_DEVICE(0x2c42, 0x1606) }, /* In-Box 12 port UART device */ \
{ USB_DEVICE(0x2c42, 0x1608) }, /* Non-Flash type */ \
{ USB_DEVICE(0x2c42, 0x1632) }, /* 2 port UART device */ \
{ USB_DEVICE(0x2c42, 0x1634) }, /* 4 port UART device */ \
{ USB_DEVICE(0x2c42, 0x1635) }, /* 8 port UART device */ \
{ USB_DEVICE(0x2c42, 0x1636) } /* 12 port UART device */
#define F81534A_CTRL_ID \
{ USB_DEVICE(0x2c42, 0x16f8) } /* Global control device */
static const struct usb_device_id f81232_id_table[] = {
F81232_ID,
{ } /* Terminating entry */
};
static const struct usb_device_id f81534a_id_table[] = {
F81534A_SERIES_ID,
{ } /* Terminating entry */
};
static const struct usb_device_id f81534a_ctrl_id_table[] = {
F81534A_CTRL_ID,
{ } /* Terminating entry */
};
static const struct usb_device_id combined_id_table[] = {
F81232_ID,
F81534A_SERIES_ID,
F81534A_CTRL_ID,
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, combined_id_table);
/* Maximum baudrate for F81232 */
#define F81232_MAX_BAUDRATE 1500000
#define F81232_DEF_BAUDRATE 9600
/* USB Control EP parameter */
#define F81232_REGISTER_REQUEST 0xa0
#define F81232_GET_REGISTER 0xc0
#define F81232_SET_REGISTER 0x40
#define F81534A_ACCESS_REG_RETRY 2
#define SERIAL_BASE_ADDRESS 0x0120
#define RECEIVE_BUFFER_REGISTER (0x00 + SERIAL_BASE_ADDRESS)
#define INTERRUPT_ENABLE_REGISTER (0x01 + SERIAL_BASE_ADDRESS)
#define FIFO_CONTROL_REGISTER (0x02 + SERIAL_BASE_ADDRESS)
#define LINE_CONTROL_REGISTER (0x03 + SERIAL_BASE_ADDRESS)
#define MODEM_CONTROL_REGISTER (0x04 + SERIAL_BASE_ADDRESS)
#define LINE_STATUS_REGISTER (0x05 + SERIAL_BASE_ADDRESS)
#define MODEM_STATUS_REGISTER (0x06 + SERIAL_BASE_ADDRESS)
/*
* F81232 Clock registers (106h)
*
* Bit1-0: Clock source selector
* 00: 1.846MHz.
* 01: 18.46MHz.
* 10: 24MHz.
* 11: 14.77MHz.
*/
#define F81232_CLK_REGISTER 0x106
#define F81232_CLK_1_846_MHZ 0
#define F81232_CLK_18_46_MHZ BIT(0)
#define F81232_CLK_24_MHZ BIT(1)
#define F81232_CLK_14_77_MHZ (BIT(1) | BIT(0))
#define F81232_CLK_MASK GENMASK(1, 0)
#define F81534A_MODE_REG 0x107
#define F81534A_TRIGGER_MASK GENMASK(3, 2)
#define F81534A_TRIGGER_MULTIPLE_4X BIT(3)
#define F81534A_FIFO_128BYTE (BIT(1) | BIT(0))
/* Serial port self GPIO control, 2bytes [control&output data][input data] */
#define F81534A_GPIO_REG 0x10e
#define F81534A_GPIO_MODE2_DIR BIT(6) /* 1: input, 0: output */
#define F81534A_GPIO_MODE1_DIR BIT(5)
#define F81534A_GPIO_MODE0_DIR BIT(4)
#define F81534A_GPIO_MODE2_OUTPUT BIT(2)
#define F81534A_GPIO_MODE1_OUTPUT BIT(1)
#define F81534A_GPIO_MODE0_OUTPUT BIT(0)
#define F81534A_CTRL_CMD_ENABLE_PORT 0x116
struct f81232_private {
struct mutex lock;
u8 modem_control;
u8 modem_status;
u8 shadow_lcr;
speed_t baud_base;
struct work_struct lsr_work;
struct work_struct interrupt_work;
struct usb_serial_port *port;
};
static u32 const baudrate_table[] = { 115200, 921600, 1152000, 1500000 };
static u8 const clock_table[] = { F81232_CLK_1_846_MHZ, F81232_CLK_14_77_MHZ,
F81232_CLK_18_46_MHZ, F81232_CLK_24_MHZ };
static int calc_baud_divisor(speed_t baudrate, speed_t clockrate)
{
return DIV_ROUND_CLOSEST(clockrate, baudrate);
}
static int f81232_get_register(struct usb_serial_port *port, u16 reg, u8 *val)
{
int status;
struct usb_device *dev = port->serial->dev;
status = usb_control_msg_recv(dev,
0,
F81232_REGISTER_REQUEST,
F81232_GET_REGISTER,
reg,
0,
val,
sizeof(*val),
USB_CTRL_GET_TIMEOUT,
GFP_KERNEL);
if (status) {
dev_err(&port->dev, "%s failed status: %d\n", __func__, status);
status = usb_translate_errors(status);
}
return status;
}
static int f81232_set_register(struct usb_serial_port *port, u16 reg, u8 val)
{
int status;
struct usb_device *dev = port->serial->dev;
status = usb_control_msg_send(dev,
0,
F81232_REGISTER_REQUEST,
F81232_SET_REGISTER,
reg,
0,
&val,
sizeof(val),
USB_CTRL_SET_TIMEOUT,
GFP_KERNEL);
if (status) {
dev_err(&port->dev, "%s failed status: %d\n", __func__, status);
status = usb_translate_errors(status);
}
return status;
}
static int f81232_set_mask_register(struct usb_serial_port *port, u16 reg,
u8 mask, u8 val)
{
int status;
u8 tmp;
status = f81232_get_register(port, reg, &tmp);
if (status)
return status;
tmp = (tmp & ~mask) | (val & mask);
return f81232_set_register(port, reg, tmp);
}
static void f81232_read_msr(struct usb_serial_port *port)
{
int status;
u8 current_msr;
struct tty_struct *tty;
struct f81232_private *priv = usb_get_serial_port_data(port);
mutex_lock(&priv->lock);
status = f81232_get_register(port, MODEM_STATUS_REGISTER,
¤t_msr);
if (status) {
dev_err(&port->dev, "%s fail, status: %d\n", __func__, status);
mutex_unlock(&priv->lock);
return;
}
if (!(current_msr & UART_MSR_ANY_DELTA)) {
mutex_unlock(&priv->lock);
return;
}
priv->modem_status = current_msr;
if (current_msr & UART_MSR_DCTS)
port->icount.cts++;
if (current_msr & UART_MSR_DDSR)
port->icount.dsr++;
if (current_msr & UART_MSR_TERI)
port->icount.rng++;
if (current_msr & UART_MSR_DDCD) {
port->icount.dcd++;
tty = tty_port_tty_get(&port->port);
if (tty) {
usb_serial_handle_dcd_change(port, tty,
current_msr & UART_MSR_DCD);
tty_kref_put(tty);
}
}
wake_up_interruptible(&port->port.delta_msr_wait);
mutex_unlock(&priv->lock);
}
static int f81232_set_mctrl(struct usb_serial_port *port,
unsigned int set, unsigned int clear)
{
u8 val;
int status;
struct f81232_private *priv = usb_get_serial_port_data(port);
if (((set | clear) & (TIOCM_DTR | TIOCM_RTS)) == 0)
return 0; /* no change */
/* 'set' takes precedence over 'clear' */
clear &= ~set;
/* force enable interrupt with OUT2 */
mutex_lock(&priv->lock);
val = UART_MCR_OUT2 | priv->modem_control;
if (clear & TIOCM_DTR)
val &= ~UART_MCR_DTR;
if (clear & TIOCM_RTS)
val &= ~UART_MCR_RTS;
if (set & TIOCM_DTR)
val |= UART_MCR_DTR;
if (set & TIOCM_RTS)
val |= UART_MCR_RTS;
dev_dbg(&port->dev, "%s new:%02x old:%02x\n", __func__,
val, priv->modem_control);
status = f81232_set_register(port, MODEM_CONTROL_REGISTER, val);
if (status) {
dev_err(&port->dev, "%s set MCR status < 0\n", __func__);
mutex_unlock(&priv->lock);
return status;
}
priv->modem_control = val;
mutex_unlock(&priv->lock);
return 0;
}
static void f81232_update_line_status(struct usb_serial_port *port,
unsigned char *data,
size_t actual_length)
{
struct f81232_private *priv = usb_get_serial_port_data(port);
if (!actual_length)
return;
switch (data[0] & 0x07) {
case 0x00: /* msr change */
dev_dbg(&port->dev, "IIR: MSR Change: %02x\n", data[0]);
schedule_work(&priv->interrupt_work);
break;
case 0x02: /* tx-empty */
break;
case 0x04: /* rx data available */
break;
case 0x06: /* lsr change */
/* we can forget it. the LSR will read from bulk-in */
dev_dbg(&port->dev, "IIR: LSR Change: %02x\n", data[0]);
break;
}
}
static void f81232_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
unsigned int actual_length = urb->actual_length;
int status = urb->status;
int retval;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&port->dev, "%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_dbg(&port->dev, "%s - nonzero urb status received: %d\n",
__func__, status);
goto exit;
}
usb_serial_debug_data(&port->dev, __func__,
urb->actual_length, urb->transfer_buffer);
f81232_update_line_status(port, data, actual_length);
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&urb->dev->dev,
"%s - usb_submit_urb failed with result %d\n",
__func__, retval);
}
static char f81232_handle_lsr(struct usb_serial_port *port, u8 lsr)
{
struct f81232_private *priv = usb_get_serial_port_data(port);
char tty_flag = TTY_NORMAL;
if (!(lsr & UART_LSR_BRK_ERROR_BITS))
return tty_flag;
if (lsr & UART_LSR_BI) {
tty_flag = TTY_BREAK;
port->icount.brk++;
usb_serial_handle_break(port);
} else if (lsr & UART_LSR_PE) {
tty_flag = TTY_PARITY;
port->icount.parity++;
} else if (lsr & UART_LSR_FE) {
tty_flag = TTY_FRAME;
port->icount.frame++;
}
if (lsr & UART_LSR_OE) {
port->icount.overrun++;
schedule_work(&priv->lsr_work);
tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
}
return tty_flag;
}
static void f81232_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
char tty_flag;
unsigned int i;
u8 lsr;
/*
* When opening the port we get a 1-byte packet with the current LSR,
* which we discard.
*/
if ((urb->actual_length < 2) || (urb->actual_length % 2))
return;
/* bulk-in data: [LSR(1Byte)+DATA(1Byte)][LSR(1Byte)+DATA(1Byte)]... */
for (i = 0; i < urb->actual_length; i += 2) {
lsr = data[i];
tty_flag = f81232_handle_lsr(port, lsr);
if (port->sysrq) {
if (usb_serial_handle_sysrq_char(port, data[i + 1]))
continue;
}
tty_insert_flip_char(&port->port, data[i + 1], tty_flag);
}
tty_flip_buffer_push(&port->port);
}
static void f81534a_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
char tty_flag;
unsigned int i;
u8 lsr;
u8 len;
if (urb->actual_length < 3) {
dev_err(&port->dev, "short message received: %d\n",
urb->actual_length);
return;
}
len = data[0];
if (len != urb->actual_length) {
dev_err(&port->dev, "malformed message received: %d (%d)\n",
urb->actual_length, len);
return;
}
/* bulk-in data: [LEN][Data.....][LSR] */
lsr = data[len - 1];
tty_flag = f81232_handle_lsr(port, lsr);
if (port->sysrq) {
for (i = 1; i < len - 1; ++i) {
if (!usb_serial_handle_sysrq_char(port, data[i])) {
tty_insert_flip_char(&port->port, data[i],
tty_flag);
}
}
} else {
tty_insert_flip_string_fixed_flag(&port->port, &data[1],
tty_flag, len - 2);
}
tty_flip_buffer_push(&port->port);
}
static int f81232_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct f81232_private *priv = usb_get_serial_port_data(port);
int status;
mutex_lock(&priv->lock);
if (break_state)
priv->shadow_lcr |= UART_LCR_SBC;
else
priv->shadow_lcr &= ~UART_LCR_SBC;
status = f81232_set_register(port, LINE_CONTROL_REGISTER,
priv->shadow_lcr);
if (status)
dev_err(&port->dev, "set break failed: %d\n", status);
mutex_unlock(&priv->lock);
return status;
}
static int f81232_find_clk(speed_t baudrate)
{
int idx;
for (idx = 0; idx < ARRAY_SIZE(baudrate_table); ++idx) {
if (baudrate <= baudrate_table[idx] &&
baudrate_table[idx] % baudrate == 0)
return idx;
}
return -EINVAL;
}
static void f81232_set_baudrate(struct tty_struct *tty,
struct usb_serial_port *port, speed_t baudrate,
speed_t old_baudrate)
{
struct f81232_private *priv = usb_get_serial_port_data(port);
u8 lcr;
int divisor;
int status = 0;
int i;
int idx;
speed_t baud_list[] = { baudrate, old_baudrate, F81232_DEF_BAUDRATE };
for (i = 0; i < ARRAY_SIZE(baud_list); ++i) {
baudrate = baud_list[i];
if (baudrate == 0) {
tty_encode_baud_rate(tty, 0, 0);
return;
}
idx = f81232_find_clk(baudrate);
if (idx >= 0) {
tty_encode_baud_rate(tty, baudrate, baudrate);
break;
}
}
if (idx < 0)
return;
priv->baud_base = baudrate_table[idx];
divisor = calc_baud_divisor(baudrate, priv->baud_base);
status = f81232_set_mask_register(port, F81232_CLK_REGISTER,
F81232_CLK_MASK, clock_table[idx]);
if (status) {
dev_err(&port->dev, "%s failed to set CLK_REG: %d\n",
__func__, status);
return;
}
status = f81232_get_register(port, LINE_CONTROL_REGISTER,
&lcr); /* get LCR */
if (status) {
dev_err(&port->dev, "%s failed to get LCR: %d\n",
__func__, status);
return;
}
status = f81232_set_register(port, LINE_CONTROL_REGISTER,
lcr | UART_LCR_DLAB); /* Enable DLAB */
if (status) {
dev_err(&port->dev, "%s failed to set DLAB: %d\n",
__func__, status);
return;
}
status = f81232_set_register(port, RECEIVE_BUFFER_REGISTER,
divisor & 0x00ff); /* low */
if (status) {
dev_err(&port->dev, "%s failed to set baudrate MSB: %d\n",
__func__, status);
goto reapply_lcr;
}
status = f81232_set_register(port, INTERRUPT_ENABLE_REGISTER,
(divisor & 0xff00) >> 8); /* high */
if (status) {
dev_err(&port->dev, "%s failed to set baudrate LSB: %d\n",
__func__, status);
}
reapply_lcr:
status = f81232_set_register(port, LINE_CONTROL_REGISTER,
lcr & ~UART_LCR_DLAB);
if (status) {
dev_err(&port->dev, "%s failed to set DLAB: %d\n",
__func__, status);
}
}
static int f81232_port_enable(struct usb_serial_port *port)
{
u8 val;
int status;
/* fifo on, trigger8, clear TX/RX*/
val = UART_FCR_TRIGGER_8 | UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR |
UART_FCR_CLEAR_XMIT;
status = f81232_set_register(port, FIFO_CONTROL_REGISTER, val);
if (status) {
dev_err(&port->dev, "%s failed to set FCR: %d\n",
__func__, status);
return status;
}
/* MSR Interrupt only, LSR will read from Bulk-in odd byte */
status = f81232_set_register(port, INTERRUPT_ENABLE_REGISTER,
UART_IER_MSI);
if (status) {
dev_err(&port->dev, "%s failed to set IER: %d\n",
__func__, status);
return status;
}
return 0;
}
static int f81232_port_disable(struct usb_serial_port *port)
{
int status;
status = f81232_set_register(port, INTERRUPT_ENABLE_REGISTER, 0);
if (status) {
dev_err(&port->dev, "%s failed to set IER: %d\n",
__func__, status);
return status;
}
return 0;
}
static void f81232_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct f81232_private *priv = usb_get_serial_port_data(port);
u8 new_lcr = 0;
int status = 0;
speed_t baudrate;
speed_t old_baud;
/* Don't change anything if nothing has changed */
if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
return;
if (C_BAUD(tty) == B0)
f81232_set_mctrl(port, 0, TIOCM_DTR | TIOCM_RTS);
else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
f81232_set_mctrl(port, TIOCM_DTR | TIOCM_RTS, 0);
baudrate = tty_get_baud_rate(tty);
if (baudrate > 0) {
if (old_termios)
old_baud = tty_termios_baud_rate(old_termios);
else
old_baud = F81232_DEF_BAUDRATE;
f81232_set_baudrate(tty, port, baudrate, old_baud);
}
if (C_PARENB(tty)) {
new_lcr |= UART_LCR_PARITY;
if (!C_PARODD(tty))
new_lcr |= UART_LCR_EPAR;
if (C_CMSPAR(tty))
new_lcr |= UART_LCR_SPAR;
}
if (C_CSTOPB(tty))
new_lcr |= UART_LCR_STOP;
new_lcr |= UART_LCR_WLEN(tty_get_char_size(tty->termios.c_cflag));
mutex_lock(&priv->lock);
new_lcr |= (priv->shadow_lcr & UART_LCR_SBC);
status = f81232_set_register(port, LINE_CONTROL_REGISTER, new_lcr);
if (status) {
dev_err(&port->dev, "%s failed to set LCR: %d\n",
__func__, status);
}
priv->shadow_lcr = new_lcr;
mutex_unlock(&priv->lock);
}
static int f81232_tiocmget(struct tty_struct *tty)
{
int r;
struct usb_serial_port *port = tty->driver_data;
struct f81232_private *port_priv = usb_get_serial_port_data(port);
u8 mcr, msr;
/* force get current MSR changed state */
f81232_read_msr(port);
mutex_lock(&port_priv->lock);
mcr = port_priv->modem_control;
msr = port_priv->modem_status;
mutex_unlock(&port_priv->lock);
r = (mcr & UART_MCR_DTR ? TIOCM_DTR : 0) |
(mcr & UART_MCR_RTS ? TIOCM_RTS : 0) |
(msr & UART_MSR_CTS ? TIOCM_CTS : 0) |
(msr & UART_MSR_DCD ? TIOCM_CAR : 0) |
(msr & UART_MSR_RI ? TIOCM_RI : 0) |
(msr & UART_MSR_DSR ? TIOCM_DSR : 0);
return r;
}
static int f81232_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
return f81232_set_mctrl(port, set, clear);
}
static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port)
{
int result;
result = f81232_port_enable(port);
if (result)
return result;
/* Setup termios */
if (tty)
f81232_set_termios(tty, port, NULL);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "%s - failed submitting interrupt urb,"
" error %d\n", __func__, result);
return result;
}
result = usb_serial_generic_open(tty, port);
if (result) {
usb_kill_urb(port->interrupt_in_urb);
return result;
}
return 0;
}
static int f81534a_open(struct tty_struct *tty, struct usb_serial_port *port)
{
int status;
u8 mask;
u8 val;
val = F81534A_TRIGGER_MULTIPLE_4X | F81534A_FIFO_128BYTE;
mask = F81534A_TRIGGER_MASK | F81534A_FIFO_128BYTE;
status = f81232_set_mask_register(port, F81534A_MODE_REG, mask, val);
if (status) {
dev_err(&port->dev, "failed to set MODE_REG: %d\n", status);
return status;
}
return f81232_open(tty, port);
}
static void f81232_close(struct usb_serial_port *port)
{
struct f81232_private *port_priv = usb_get_serial_port_data(port);
f81232_port_disable(port);
usb_serial_generic_close(port);
usb_kill_urb(port->interrupt_in_urb);
flush_work(&port_priv->interrupt_work);
flush_work(&port_priv->lsr_work);
}
static void f81232_dtr_rts(struct usb_serial_port *port, int on)
{
if (on)
f81232_set_mctrl(port, TIOCM_DTR | TIOCM_RTS, 0);
else
f81232_set_mctrl(port, 0, TIOCM_DTR | TIOCM_RTS);
}
static bool f81232_tx_empty(struct usb_serial_port *port)
{
int status;
u8 tmp;
status = f81232_get_register(port, LINE_STATUS_REGISTER, &tmp);
if (!status) {
if ((tmp & UART_LSR_TEMT) != UART_LSR_TEMT)
return false;
}
return true;
}
static int f81232_carrier_raised(struct usb_serial_port *port)
{
u8 msr;
struct f81232_private *priv = usb_get_serial_port_data(port);
mutex_lock(&priv->lock);
msr = priv->modem_status;
mutex_unlock(&priv->lock);
if (msr & UART_MSR_DCD)
return 1;
return 0;
}
static void f81232_get_serial(struct tty_struct *tty, struct serial_struct *ss)
{
struct usb_serial_port *port = tty->driver_data;
struct f81232_private *priv = usb_get_serial_port_data(port);
ss->baud_base = priv->baud_base;
}
static void f81232_interrupt_work(struct work_struct *work)
{
struct f81232_private *priv =
container_of(work, struct f81232_private, interrupt_work);
f81232_read_msr(priv->port);
}
static void f81232_lsr_worker(struct work_struct *work)
{
struct f81232_private *priv;
struct usb_serial_port *port;
int status;
u8 tmp;
priv = container_of(work, struct f81232_private, lsr_work);
port = priv->port;
status = f81232_get_register(port, LINE_STATUS_REGISTER, &tmp);
if (status)
dev_warn(&port->dev, "read LSR failed: %d\n", status);
}
static int f81534a_ctrl_set_register(struct usb_interface *intf, u16 reg,
u16 size, void *val)
{
struct usb_device *dev = interface_to_usbdev(intf);
int retry = F81534A_ACCESS_REG_RETRY;
int status;
while (retry--) {
status = usb_control_msg_send(dev,
0,
F81232_REGISTER_REQUEST,
F81232_SET_REGISTER,
reg,
0,
val,
size,
USB_CTRL_SET_TIMEOUT,
GFP_KERNEL);
if (status) {
status = usb_translate_errors(status);
if (status == -EIO)
continue;
}
break;
}
if (status) {
dev_err(&intf->dev, "failed to set register 0x%x: %d\n",
reg, status);
}
return status;
}
static int f81534a_ctrl_enable_all_ports(struct usb_interface *intf, bool en)
{
unsigned char enable[2] = {0};
int status;
/*
* Enable all available serial ports, define as following:
* bit 15 : Reset behavior (when HUB got soft reset)
* 0: maintain all serial port enabled state.
* 1: disable all serial port.
* bit 0~11 : Serial port enable bit.
*/
if (en) {
enable[0] = 0xff;
enable[1] = 0x8f;
}
status = f81534a_ctrl_set_register(intf, F81534A_CTRL_CMD_ENABLE_PORT,
sizeof(enable), enable);
if (status)
dev_err(&intf->dev, "failed to enable ports: %d\n", status);
return status;
}
static int f81534a_ctrl_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return f81534a_ctrl_enable_all_ports(intf, true);
}
static void f81534a_ctrl_disconnect(struct usb_interface *intf)
{
f81534a_ctrl_enable_all_ports(intf, false);
}
static int f81534a_ctrl_resume(struct usb_interface *intf)
{
return f81534a_ctrl_enable_all_ports(intf, true);
}
static int f81232_port_probe(struct usb_serial_port *port)
{
struct f81232_private *priv;
priv = devm_kzalloc(&port->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
mutex_init(&priv->lock);
INIT_WORK(&priv->interrupt_work, f81232_interrupt_work);
INIT_WORK(&priv->lsr_work, f81232_lsr_worker);
usb_set_serial_port_data(port, priv);
priv->port = port;
return 0;
}
static int f81534a_port_probe(struct usb_serial_port *port)
{
int status;
/* tri-state with pull-high, default RS232 Mode */
status = f81232_set_register(port, F81534A_GPIO_REG,
F81534A_GPIO_MODE2_DIR);
if (status)
return status;
return f81232_port_probe(port);
}
static int f81232_suspend(struct usb_serial *serial, pm_message_t message)
{
struct usb_serial_port *port = serial->port[0];
struct f81232_private *port_priv = usb_get_serial_port_data(port);
int i;
for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i)
usb_kill_urb(port->read_urbs[i]);
usb_kill_urb(port->interrupt_in_urb);
if (port_priv) {
flush_work(&port_priv->interrupt_work);
flush_work(&port_priv->lsr_work);
}
return 0;
}
static int f81232_resume(struct usb_serial *serial)
{
struct usb_serial_port *port = serial->port[0];
int result;
if (tty_port_initialized(&port->port)) {
result = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
if (result) {
dev_err(&port->dev, "submit interrupt urb failed: %d\n",
result);
return result;
}
}
return usb_serial_generic_resume(serial);
}
static struct usb_serial_driver f81232_device = {
.driver = {
.owner = THIS_MODULE,
.name = "f81232",
},
.id_table = f81232_id_table,
.num_ports = 1,
.bulk_in_size = 256,
.bulk_out_size = 256,
.open = f81232_open,
.close = f81232_close,
.dtr_rts = f81232_dtr_rts,
.carrier_raised = f81232_carrier_raised,
.get_serial = f81232_get_serial,
.break_ctl = f81232_break_ctl,
.set_termios = f81232_set_termios,
.tiocmget = f81232_tiocmget,
.tiocmset = f81232_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.tx_empty = f81232_tx_empty,
.process_read_urb = f81232_process_read_urb,
.read_int_callback = f81232_read_int_callback,
.port_probe = f81232_port_probe,
.suspend = f81232_suspend,
.resume = f81232_resume,
};
static struct usb_serial_driver f81534a_device = {
.driver = {
.owner = THIS_MODULE,
.name = "f81534a",
},
.id_table = f81534a_id_table,
.num_ports = 1,
.open = f81534a_open,
.close = f81232_close,
.dtr_rts = f81232_dtr_rts,
.carrier_raised = f81232_carrier_raised,
.get_serial = f81232_get_serial,
.break_ctl = f81232_break_ctl,
.set_termios = f81232_set_termios,
.tiocmget = f81232_tiocmget,
.tiocmset = f81232_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.tx_empty = f81232_tx_empty,
.process_read_urb = f81534a_process_read_urb,
.read_int_callback = f81232_read_int_callback,
.port_probe = f81534a_port_probe,
.suspend = f81232_suspend,
.resume = f81232_resume,
};
static struct usb_serial_driver * const serial_drivers[] = {
&f81232_device,
&f81534a_device,
NULL,
};
static struct usb_driver f81534a_ctrl_driver = {
.name = "f81534a_ctrl",
.id_table = f81534a_ctrl_id_table,
.probe = f81534a_ctrl_probe,
.disconnect = f81534a_ctrl_disconnect,
.resume = f81534a_ctrl_resume,
};
static int __init f81232_init(void)
{
int status;
status = usb_register_driver(&f81534a_ctrl_driver, THIS_MODULE,
KBUILD_MODNAME);
if (status)
return status;
status = usb_serial_register_drivers(serial_drivers, KBUILD_MODNAME,
combined_id_table);
if (status) {
usb_deregister(&f81534a_ctrl_driver);
return status;
}
return 0;
}
static void __exit f81232_exit(void)
{
usb_serial_deregister_drivers(serial_drivers);
usb_deregister(&f81534a_ctrl_driver);
}
module_init(f81232_init);
module_exit(f81232_exit);
MODULE_DESCRIPTION("Fintek F81232/532A/534A/535/536 USB to serial driver");
MODULE_AUTHOR("Greg Kroah-Hartman <[email protected]>");
MODULE_AUTHOR("Peter Hong <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/f81232.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* spcp8x5 USB to serial adaptor driver
*
* Copyright (C) 2010-2013 Johan Hovold ([email protected])
* Copyright (C) 2006 Linxb ([email protected])
* Copyright (C) 2006 S1 Corp.
*
* Original driver for 2.6.10 pl2303 driver by
* Greg Kroah-Hartman ([email protected])
* Changes for 2.6.20 by Harald Klein <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#define DRIVER_DESC "SPCP8x5 USB to serial adaptor driver"
#define SPCP825_QUIRK_NO_UART_STATUS 0x01
#define SPCP825_QUIRK_NO_WORK_MODE 0x02
#define SPCP8x5_007_VID 0x04FC
#define SPCP8x5_007_PID 0x0201
#define SPCP8x5_008_VID 0x04fc
#define SPCP8x5_008_PID 0x0235
#define SPCP8x5_PHILIPS_VID 0x0471
#define SPCP8x5_PHILIPS_PID 0x081e
#define SPCP8x5_INTERMATIC_VID 0x04FC
#define SPCP8x5_INTERMATIC_PID 0x0204
#define SPCP8x5_835_VID 0x04fc
#define SPCP8x5_835_PID 0x0231
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(SPCP8x5_PHILIPS_VID , SPCP8x5_PHILIPS_PID)},
{ USB_DEVICE(SPCP8x5_INTERMATIC_VID, SPCP8x5_INTERMATIC_PID)},
{ USB_DEVICE(SPCP8x5_835_VID, SPCP8x5_835_PID)},
{ USB_DEVICE(SPCP8x5_008_VID, SPCP8x5_008_PID)},
{ USB_DEVICE(SPCP8x5_007_VID, SPCP8x5_007_PID),
.driver_info = SPCP825_QUIRK_NO_UART_STATUS |
SPCP825_QUIRK_NO_WORK_MODE },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
struct spcp8x5_usb_ctrl_arg {
u8 type;
u8 cmd;
u8 cmd_type;
u16 value;
u16 index;
u16 length;
};
/* spcp8x5 spec register define */
#define MCR_CONTROL_LINE_RTS 0x02
#define MCR_CONTROL_LINE_DTR 0x01
#define MCR_DTR 0x01
#define MCR_RTS 0x02
#define MSR_STATUS_LINE_DCD 0x80
#define MSR_STATUS_LINE_RI 0x40
#define MSR_STATUS_LINE_DSR 0x20
#define MSR_STATUS_LINE_CTS 0x10
/* verdor command here , we should define myself */
#define SET_DEFAULT 0x40
#define SET_DEFAULT_TYPE 0x20
#define SET_UART_FORMAT 0x40
#define SET_UART_FORMAT_TYPE 0x21
#define SET_UART_FORMAT_SIZE_5 0x00
#define SET_UART_FORMAT_SIZE_6 0x01
#define SET_UART_FORMAT_SIZE_7 0x02
#define SET_UART_FORMAT_SIZE_8 0x03
#define SET_UART_FORMAT_STOP_1 0x00
#define SET_UART_FORMAT_STOP_2 0x04
#define SET_UART_FORMAT_PAR_NONE 0x00
#define SET_UART_FORMAT_PAR_ODD 0x10
#define SET_UART_FORMAT_PAR_EVEN 0x30
#define SET_UART_FORMAT_PAR_MASK 0xD0
#define SET_UART_FORMAT_PAR_SPACE 0x90
#define GET_UART_STATUS_TYPE 0xc0
#define GET_UART_STATUS 0x22
#define GET_UART_STATUS_MSR 0x06
#define SET_UART_STATUS 0x40
#define SET_UART_STATUS_TYPE 0x23
#define SET_UART_STATUS_MCR 0x0004
#define SET_UART_STATUS_MCR_DTR 0x01
#define SET_UART_STATUS_MCR_RTS 0x02
#define SET_UART_STATUS_MCR_LOOP 0x10
#define SET_WORKING_MODE 0x40
#define SET_WORKING_MODE_TYPE 0x24
#define SET_WORKING_MODE_U2C 0x00
#define SET_WORKING_MODE_RS485 0x01
#define SET_WORKING_MODE_PDMA 0x02
#define SET_WORKING_MODE_SPP 0x03
#define SET_FLOWCTL_CHAR 0x40
#define SET_FLOWCTL_CHAR_TYPE 0x25
#define GET_VERSION 0xc0
#define GET_VERSION_TYPE 0x26
#define SET_REGISTER 0x40
#define SET_REGISTER_TYPE 0x27
#define GET_REGISTER 0xc0
#define GET_REGISTER_TYPE 0x28
#define SET_RAM 0x40
#define SET_RAM_TYPE 0x31
#define GET_RAM 0xc0
#define GET_RAM_TYPE 0x32
/* how come ??? */
#define UART_STATE 0x08
#define UART_STATE_TRANSIENT_MASK 0x75
#define UART_DCD 0x01
#define UART_DSR 0x02
#define UART_BREAK_ERROR 0x04
#define UART_RING 0x08
#define UART_FRAME_ERROR 0x10
#define UART_PARITY_ERROR 0x20
#define UART_OVERRUN_ERROR 0x40
#define UART_CTS 0x80
struct spcp8x5_private {
unsigned quirks;
spinlock_t lock;
u8 line_control;
};
static int spcp8x5_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
usb_set_serial_data(serial, (void *)id);
return 0;
}
static int spcp8x5_port_probe(struct usb_serial_port *port)
{
const struct usb_device_id *id = usb_get_serial_data(port->serial);
struct spcp8x5_private *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
spin_lock_init(&priv->lock);
priv->quirks = id->driver_info;
usb_set_serial_port_data(port, priv);
port->port.drain_delay = 256;
return 0;
}
static void spcp8x5_port_remove(struct usb_serial_port *port)
{
struct spcp8x5_private *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
}
static int spcp8x5_set_ctrl_line(struct usb_serial_port *port, u8 mcr)
{
struct spcp8x5_private *priv = usb_get_serial_port_data(port);
struct usb_device *dev = port->serial->dev;
int retval;
if (priv->quirks & SPCP825_QUIRK_NO_UART_STATUS)
return -EPERM;
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
SET_UART_STATUS_TYPE, SET_UART_STATUS,
mcr, 0x04, NULL, 0, 100);
if (retval != 0) {
dev_err(&port->dev, "failed to set control lines: %d\n",
retval);
}
return retval;
}
static int spcp8x5_get_msr(struct usb_serial_port *port, u8 *status)
{
struct spcp8x5_private *priv = usb_get_serial_port_data(port);
struct usb_device *dev = port->serial->dev;
u8 *buf;
int ret;
if (priv->quirks & SPCP825_QUIRK_NO_UART_STATUS)
return -EPERM;
buf = kzalloc(1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
GET_UART_STATUS, GET_UART_STATUS_TYPE,
0, GET_UART_STATUS_MSR, buf, 1, 100);
if (ret < 1) {
dev_err(&port->dev, "failed to get modem status: %d\n", ret);
if (ret >= 0)
ret = -EIO;
goto out;
}
dev_dbg(&port->dev, "0xc0:0x22:0:6 %d - 0x02%x\n", ret, *buf);
*status = *buf;
ret = 0;
out:
kfree(buf);
return ret;
}
static void spcp8x5_set_work_mode(struct usb_serial_port *port, u16 value,
u16 index)
{
struct spcp8x5_private *priv = usb_get_serial_port_data(port);
struct usb_device *dev = port->serial->dev;
int ret;
if (priv->quirks & SPCP825_QUIRK_NO_WORK_MODE)
return;
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
SET_WORKING_MODE_TYPE, SET_WORKING_MODE,
value, index, NULL, 0, 100);
dev_dbg(&port->dev, "value = %#x , index = %#x\n", value, index);
if (ret < 0)
dev_err(&port->dev, "failed to set work mode: %d\n", ret);
}
static int spcp8x5_carrier_raised(struct usb_serial_port *port)
{
u8 msr;
int ret;
ret = spcp8x5_get_msr(port, &msr);
if (ret || msr & MSR_STATUS_LINE_DCD)
return 1;
return 0;
}
static void spcp8x5_dtr_rts(struct usb_serial_port *port, int on)
{
struct spcp8x5_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
u8 control;
spin_lock_irqsave(&priv->lock, flags);
if (on)
priv->line_control = MCR_CONTROL_LINE_DTR
| MCR_CONTROL_LINE_RTS;
else
priv->line_control &= ~ (MCR_CONTROL_LINE_DTR
| MCR_CONTROL_LINE_RTS);
control = priv->line_control;
spin_unlock_irqrestore(&priv->lock, flags);
spcp8x5_set_ctrl_line(port, control);
}
static void spcp8x5_init_termios(struct tty_struct *tty)
{
tty_encode_baud_rate(tty, 115200, 115200);
}
static void spcp8x5_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
struct spcp8x5_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
unsigned int cflag = tty->termios.c_cflag;
unsigned short uartdata;
unsigned char buf[2] = {0, 0};
int baud;
int i;
u8 control;
/* check that they really want us to change something */
if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
return;
/* set DTR/RTS active */
spin_lock_irqsave(&priv->lock, flags);
control = priv->line_control;
if (old_termios && (old_termios->c_cflag & CBAUD) == B0) {
priv->line_control |= MCR_DTR;
if (!(old_termios->c_cflag & CRTSCTS))
priv->line_control |= MCR_RTS;
}
if (control != priv->line_control) {
control = priv->line_control;
spin_unlock_irqrestore(&priv->lock, flags);
spcp8x5_set_ctrl_line(port, control);
} else {
spin_unlock_irqrestore(&priv->lock, flags);
}
/* Set Baud Rate */
baud = tty_get_baud_rate(tty);
switch (baud) {
case 300: buf[0] = 0x00; break;
case 600: buf[0] = 0x01; break;
case 1200: buf[0] = 0x02; break;
case 2400: buf[0] = 0x03; break;
case 4800: buf[0] = 0x04; break;
case 9600: buf[0] = 0x05; break;
case 19200: buf[0] = 0x07; break;
case 38400: buf[0] = 0x09; break;
case 57600: buf[0] = 0x0a; break;
case 115200: buf[0] = 0x0b; break;
case 230400: buf[0] = 0x0c; break;
case 460800: buf[0] = 0x0d; break;
case 921600: buf[0] = 0x0e; break;
/* case 1200000: buf[0] = 0x0f; break; */
/* case 2400000: buf[0] = 0x10; break; */
case 3000000: buf[0] = 0x11; break;
/* case 6000000: buf[0] = 0x12; break; */
case 0:
case 1000000:
buf[0] = 0x0b; break;
default:
dev_err(&port->dev, "unsupported baudrate, using 9600\n");
}
/* Set Data Length : 00:5bit, 01:6bit, 10:7bit, 11:8bit */
switch (cflag & CSIZE) {
case CS5:
buf[1] |= SET_UART_FORMAT_SIZE_5;
break;
case CS6:
buf[1] |= SET_UART_FORMAT_SIZE_6;
break;
case CS7:
buf[1] |= SET_UART_FORMAT_SIZE_7;
break;
default:
case CS8:
buf[1] |= SET_UART_FORMAT_SIZE_8;
break;
}
/* Set Stop bit2 : 0:1bit 1:2bit */
buf[1] |= (cflag & CSTOPB) ? SET_UART_FORMAT_STOP_2 :
SET_UART_FORMAT_STOP_1;
/* Set Parity bit3-4 01:Odd 11:Even */
if (cflag & PARENB) {
buf[1] |= (cflag & PARODD) ?
SET_UART_FORMAT_PAR_ODD : SET_UART_FORMAT_PAR_EVEN ;
} else {
buf[1] |= SET_UART_FORMAT_PAR_NONE;
}
uartdata = buf[0] | buf[1]<<8;
i = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
SET_UART_FORMAT_TYPE, SET_UART_FORMAT,
uartdata, 0, NULL, 0, 100);
if (i < 0)
dev_err(&port->dev, "Set UART format %#x failed (error = %d)\n",
uartdata, i);
dev_dbg(&port->dev, "0x21:0x40:0:0 %d\n", i);
if (cflag & CRTSCTS) {
/* enable hardware flow control */
spcp8x5_set_work_mode(port, 0x000a, SET_WORKING_MODE_U2C);
}
}
static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct spcp8x5_private *priv = usb_get_serial_port_data(port);
int ret;
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
ret = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
0x09, 0x00,
0x01, 0x00, NULL, 0x00, 100);
if (ret)
return ret;
spcp8x5_set_ctrl_line(port, priv->line_control);
if (tty)
spcp8x5_set_termios(tty, port, NULL);
return usb_serial_generic_open(tty, port);
}
static int spcp8x5_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct spcp8x5_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
u8 control;
spin_lock_irqsave(&priv->lock, flags);
if (set & TIOCM_RTS)
priv->line_control |= MCR_RTS;
if (set & TIOCM_DTR)
priv->line_control |= MCR_DTR;
if (clear & TIOCM_RTS)
priv->line_control &= ~MCR_RTS;
if (clear & TIOCM_DTR)
priv->line_control &= ~MCR_DTR;
control = priv->line_control;
spin_unlock_irqrestore(&priv->lock, flags);
return spcp8x5_set_ctrl_line(port, control);
}
static int spcp8x5_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct spcp8x5_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
unsigned int mcr;
u8 status;
unsigned int result;
result = spcp8x5_get_msr(port, &status);
if (result)
return result;
spin_lock_irqsave(&priv->lock, flags);
mcr = priv->line_control;
spin_unlock_irqrestore(&priv->lock, flags);
result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0)
| ((mcr & MCR_RTS) ? TIOCM_RTS : 0)
| ((status & MSR_STATUS_LINE_CTS) ? TIOCM_CTS : 0)
| ((status & MSR_STATUS_LINE_DSR) ? TIOCM_DSR : 0)
| ((status & MSR_STATUS_LINE_RI) ? TIOCM_RI : 0)
| ((status & MSR_STATUS_LINE_DCD) ? TIOCM_CD : 0);
return result;
}
static struct usb_serial_driver spcp8x5_device = {
.driver = {
.owner = THIS_MODULE,
.name = "SPCP8x5",
},
.id_table = id_table,
.num_ports = 1,
.num_bulk_in = 1,
.num_bulk_out = 1,
.open = spcp8x5_open,
.dtr_rts = spcp8x5_dtr_rts,
.carrier_raised = spcp8x5_carrier_raised,
.set_termios = spcp8x5_set_termios,
.init_termios = spcp8x5_init_termios,
.tiocmget = spcp8x5_tiocmget,
.tiocmset = spcp8x5_tiocmset,
.probe = spcp8x5_probe,
.port_probe = spcp8x5_port_probe,
.port_remove = spcp8x5_port_remove,
};
static struct usb_serial_driver * const serial_drivers[] = {
&spcp8x5_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/serial/spcp8x5.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Belkin USB Serial Adapter Driver
*
* Copyright (C) 2000 William Greathouse ([email protected])
* Copyright (C) 2000-2001 Greg Kroah-Hartman ([email protected])
* Copyright (C) 2010 Johan Hovold ([email protected])
*
* This program is largely derived from work by the linux-usb group
* and associated source files. Please see the usb/serial files for
* individual credits and copyrights.
*
* See Documentation/usb/usb-serial.rst for more information on using this
* driver
*
* TODO:
* -- Add true modem control line query capability. Currently we track the
* states reported by the interrupt and the states we request.
* -- Add support for flush commands
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include "belkin_sa.h"
#define DRIVER_AUTHOR "William Greathouse <[email protected]>"
#define DRIVER_DESC "USB Belkin Serial converter driver"
/* function prototypes for a Belkin USB Serial Adapter F5U103 */
static int belkin_sa_port_probe(struct usb_serial_port *port);
static void belkin_sa_port_remove(struct usb_serial_port *port);
static int belkin_sa_open(struct tty_struct *tty,
struct usb_serial_port *port);
static void belkin_sa_close(struct usb_serial_port *port);
static void belkin_sa_read_int_callback(struct urb *urb);
static void belkin_sa_process_read_urb(struct urb *urb);
static void belkin_sa_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios);
static int belkin_sa_break_ctl(struct tty_struct *tty, int break_state);
static int belkin_sa_tiocmget(struct tty_struct *tty);
static int belkin_sa_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(BELKIN_SA_VID, BELKIN_SA_PID) },
{ USB_DEVICE(BELKIN_OLD_VID, BELKIN_OLD_PID) },
{ USB_DEVICE(PERACOM_VID, PERACOM_PID) },
{ USB_DEVICE(GOHUBS_VID, GOHUBS_PID) },
{ USB_DEVICE(GOHUBS_VID, HANDYLINK_PID) },
{ USB_DEVICE(BELKIN_DOCKSTATION_VID, BELKIN_DOCKSTATION_PID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
/* All of the device info needed for the serial converters */
static struct usb_serial_driver belkin_device = {
.driver = {
.owner = THIS_MODULE,
.name = "belkin",
},
.description = "Belkin / Peracom / GoHubs USB Serial Adapter",
.id_table = id_table,
.num_ports = 1,
.open = belkin_sa_open,
.close = belkin_sa_close,
.read_int_callback = belkin_sa_read_int_callback,
.process_read_urb = belkin_sa_process_read_urb,
.set_termios = belkin_sa_set_termios,
.break_ctl = belkin_sa_break_ctl,
.tiocmget = belkin_sa_tiocmget,
.tiocmset = belkin_sa_tiocmset,
.port_probe = belkin_sa_port_probe,
.port_remove = belkin_sa_port_remove,
};
static struct usb_serial_driver * const serial_drivers[] = {
&belkin_device, NULL
};
struct belkin_sa_private {
spinlock_t lock;
unsigned long control_state;
unsigned char last_lsr;
unsigned char last_msr;
int bad_flow_control;
};
/*
* ***************************************************************************
* Belkin USB Serial Adapter F5U103 specific driver functions
* ***************************************************************************
*/
#define WDR_TIMEOUT 5000 /* default urb timeout */
/* assumes that struct usb_serial *serial is available */
#define BSA_USB_CMD(c, v) usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), \
(c), BELKIN_SA_SET_REQUEST_TYPE, \
(v), 0, NULL, 0, WDR_TIMEOUT)
static int belkin_sa_port_probe(struct usb_serial_port *port)
{
struct usb_device *dev = port->serial->dev;
struct belkin_sa_private *priv;
priv = kmalloc(sizeof(struct belkin_sa_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
spin_lock_init(&priv->lock);
priv->control_state = 0;
priv->last_lsr = 0;
priv->last_msr = 0;
/* see comments at top of file */
priv->bad_flow_control =
(le16_to_cpu(dev->descriptor.bcdDevice) <= 0x0206) ? 1 : 0;
dev_info(&dev->dev, "bcdDevice: %04x, bfc: %d\n",
le16_to_cpu(dev->descriptor.bcdDevice),
priv->bad_flow_control);
usb_set_serial_port_data(port, priv);
return 0;
}
static void belkin_sa_port_remove(struct usb_serial_port *port)
{
struct belkin_sa_private *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
}
static int belkin_sa_open(struct tty_struct *tty,
struct usb_serial_port *port)
{
int retval;
retval = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (retval) {
dev_err(&port->dev, "usb_submit_urb(read int) failed\n");
return retval;
}
retval = usb_serial_generic_open(tty, port);
if (retval)
usb_kill_urb(port->interrupt_in_urb);
return retval;
}
static void belkin_sa_close(struct usb_serial_port *port)
{
usb_serial_generic_close(port);
usb_kill_urb(port->interrupt_in_urb);
}
static void belkin_sa_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct belkin_sa_private *priv;
unsigned char *data = urb->transfer_buffer;
int retval;
int status = urb->status;
unsigned long flags;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&port->dev, "%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_dbg(&port->dev, "%s - nonzero urb status received: %d\n",
__func__, status);
goto exit;
}
usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
/* Handle known interrupt data */
/* ignore data[0] and data[1] */
priv = usb_get_serial_port_data(port);
spin_lock_irqsave(&priv->lock, flags);
priv->last_msr = data[BELKIN_SA_MSR_INDEX];
/* Record Control Line states */
if (priv->last_msr & BELKIN_SA_MSR_DSR)
priv->control_state |= TIOCM_DSR;
else
priv->control_state &= ~TIOCM_DSR;
if (priv->last_msr & BELKIN_SA_MSR_CTS)
priv->control_state |= TIOCM_CTS;
else
priv->control_state &= ~TIOCM_CTS;
if (priv->last_msr & BELKIN_SA_MSR_RI)
priv->control_state |= TIOCM_RI;
else
priv->control_state &= ~TIOCM_RI;
if (priv->last_msr & BELKIN_SA_MSR_CD)
priv->control_state |= TIOCM_CD;
else
priv->control_state &= ~TIOCM_CD;
priv->last_lsr = data[BELKIN_SA_LSR_INDEX];
spin_unlock_irqrestore(&priv->lock, flags);
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&port->dev, "%s - usb_submit_urb failed with "
"result %d\n", __func__, retval);
}
static void belkin_sa_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct belkin_sa_private *priv = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
unsigned long flags;
unsigned char status;
char tty_flag;
/* Update line status */
tty_flag = TTY_NORMAL;
spin_lock_irqsave(&priv->lock, flags);
status = priv->last_lsr;
priv->last_lsr &= ~BELKIN_SA_LSR_ERR;
spin_unlock_irqrestore(&priv->lock, flags);
if (!urb->actual_length)
return;
if (status & BELKIN_SA_LSR_ERR) {
/* Break takes precedence over parity, which takes precedence
* over framing errors. */
if (status & BELKIN_SA_LSR_BI)
tty_flag = TTY_BREAK;
else if (status & BELKIN_SA_LSR_PE)
tty_flag = TTY_PARITY;
else if (status & BELKIN_SA_LSR_FE)
tty_flag = TTY_FRAME;
dev_dbg(&port->dev, "tty_flag = %d\n", tty_flag);
/* Overrun is special, not associated with a char. */
if (status & BELKIN_SA_LSR_OE)
tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
}
tty_insert_flip_string_fixed_flag(&port->port, data, tty_flag,
urb->actual_length);
tty_flip_buffer_push(&port->port);
}
static void belkin_sa_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
struct belkin_sa_private *priv = usb_get_serial_port_data(port);
unsigned int iflag;
unsigned int cflag;
unsigned int old_iflag = 0;
unsigned int old_cflag = 0;
__u16 urb_value = 0; /* Will hold the new flags */
unsigned long flags;
unsigned long control_state;
int bad_flow_control;
speed_t baud;
struct ktermios *termios = &tty->termios;
iflag = termios->c_iflag;
cflag = termios->c_cflag;
termios->c_cflag &= ~CMSPAR;
/* get a local copy of the current port settings */
spin_lock_irqsave(&priv->lock, flags);
control_state = priv->control_state;
bad_flow_control = priv->bad_flow_control;
spin_unlock_irqrestore(&priv->lock, flags);
old_iflag = old_termios->c_iflag;
old_cflag = old_termios->c_cflag;
/* Set the baud rate */
if ((cflag & CBAUD) != (old_cflag & CBAUD)) {
/* reassert DTR and (maybe) RTS on transition from B0 */
if ((old_cflag & CBAUD) == B0) {
control_state |= (TIOCM_DTR|TIOCM_RTS);
if (BSA_USB_CMD(BELKIN_SA_SET_DTR_REQUEST, 1) < 0)
dev_err(&port->dev, "Set DTR error\n");
/* don't set RTS if using hardware flow control */
if (!(old_cflag & CRTSCTS))
if (BSA_USB_CMD(BELKIN_SA_SET_RTS_REQUEST
, 1) < 0)
dev_err(&port->dev, "Set RTS error\n");
}
}
baud = tty_get_baud_rate(tty);
if (baud) {
urb_value = BELKIN_SA_BAUD(baud);
/* Clip to maximum speed */
if (urb_value == 0)
urb_value = 1;
/* Turn it back into a resulting real baud rate */
baud = BELKIN_SA_BAUD(urb_value);
/* Report the actual baud rate back to the caller */
tty_encode_baud_rate(tty, baud, baud);
if (BSA_USB_CMD(BELKIN_SA_SET_BAUDRATE_REQUEST, urb_value) < 0)
dev_err(&port->dev, "Set baudrate error\n");
} else {
/* Disable flow control */
if (BSA_USB_CMD(BELKIN_SA_SET_FLOW_CTRL_REQUEST,
BELKIN_SA_FLOW_NONE) < 0)
dev_err(&port->dev, "Disable flowcontrol error\n");
/* Drop RTS and DTR */
control_state &= ~(TIOCM_DTR | TIOCM_RTS);
if (BSA_USB_CMD(BELKIN_SA_SET_DTR_REQUEST, 0) < 0)
dev_err(&port->dev, "DTR LOW error\n");
if (BSA_USB_CMD(BELKIN_SA_SET_RTS_REQUEST, 0) < 0)
dev_err(&port->dev, "RTS LOW error\n");
}
/* set the parity */
if ((cflag ^ old_cflag) & (PARENB | PARODD)) {
if (cflag & PARENB)
urb_value = (cflag & PARODD) ? BELKIN_SA_PARITY_ODD
: BELKIN_SA_PARITY_EVEN;
else
urb_value = BELKIN_SA_PARITY_NONE;
if (BSA_USB_CMD(BELKIN_SA_SET_PARITY_REQUEST, urb_value) < 0)
dev_err(&port->dev, "Set parity error\n");
}
/* set the number of data bits */
if ((cflag & CSIZE) != (old_cflag & CSIZE)) {
urb_value = BELKIN_SA_DATA_BITS(tty_get_char_size(cflag));
if (BSA_USB_CMD(BELKIN_SA_SET_DATA_BITS_REQUEST, urb_value) < 0)
dev_err(&port->dev, "Set data bits error\n");
}
/* set the number of stop bits */
if ((cflag & CSTOPB) != (old_cflag & CSTOPB)) {
urb_value = (cflag & CSTOPB) ? BELKIN_SA_STOP_BITS(2)
: BELKIN_SA_STOP_BITS(1);
if (BSA_USB_CMD(BELKIN_SA_SET_STOP_BITS_REQUEST,
urb_value) < 0)
dev_err(&port->dev, "Set stop bits error\n");
}
/* Set flow control */
if (((iflag ^ old_iflag) & (IXOFF | IXON)) ||
((cflag ^ old_cflag) & CRTSCTS)) {
urb_value = 0;
if ((iflag & IXOFF) || (iflag & IXON))
urb_value |= (BELKIN_SA_FLOW_OXON | BELKIN_SA_FLOW_IXON);
else
urb_value &= ~(BELKIN_SA_FLOW_OXON | BELKIN_SA_FLOW_IXON);
if (cflag & CRTSCTS)
urb_value |= (BELKIN_SA_FLOW_OCTS | BELKIN_SA_FLOW_IRTS);
else
urb_value &= ~(BELKIN_SA_FLOW_OCTS | BELKIN_SA_FLOW_IRTS);
if (bad_flow_control)
urb_value &= ~(BELKIN_SA_FLOW_IRTS);
if (BSA_USB_CMD(BELKIN_SA_SET_FLOW_CTRL_REQUEST, urb_value) < 0)
dev_err(&port->dev, "Set flow control error\n");
}
/* save off the modified port settings */
spin_lock_irqsave(&priv->lock, flags);
priv->control_state = control_state;
spin_unlock_irqrestore(&priv->lock, flags);
}
static int belkin_sa_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
int ret;
ret = BSA_USB_CMD(BELKIN_SA_SET_BREAK_REQUEST, break_state ? 1 : 0);
if (ret < 0) {
dev_err(&port->dev, "Set break_ctl %d\n", break_state);
return ret;
}
return 0;
}
static int belkin_sa_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct belkin_sa_private *priv = usb_get_serial_port_data(port);
unsigned long control_state;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
control_state = priv->control_state;
spin_unlock_irqrestore(&priv->lock, flags);
return control_state;
}
static int belkin_sa_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
struct belkin_sa_private *priv = usb_get_serial_port_data(port);
unsigned long control_state;
unsigned long flags;
int retval;
int rts = 0;
int dtr = 0;
spin_lock_irqsave(&priv->lock, flags);
control_state = priv->control_state;
if (set & TIOCM_RTS) {
control_state |= TIOCM_RTS;
rts = 1;
}
if (set & TIOCM_DTR) {
control_state |= TIOCM_DTR;
dtr = 1;
}
if (clear & TIOCM_RTS) {
control_state &= ~TIOCM_RTS;
rts = 0;
}
if (clear & TIOCM_DTR) {
control_state &= ~TIOCM_DTR;
dtr = 0;
}
priv->control_state = control_state;
spin_unlock_irqrestore(&priv->lock, flags);
retval = BSA_USB_CMD(BELKIN_SA_SET_RTS_REQUEST, rts);
if (retval < 0) {
dev_err(&port->dev, "Set RTS error %d\n", retval);
goto exit;
}
retval = BSA_USB_CMD(BELKIN_SA_SET_DTR_REQUEST, dtr);
if (retval < 0) {
dev_err(&port->dev, "Set DTR error %d\n", retval);
goto exit;
}
exit:
return retval;
}
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/serial/belkin_sa.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB Serial Converter driver
*
* Copyright (C) 2009 - 2013 Johan Hovold ([email protected])
* Copyright (C) 1999 - 2012 Greg Kroah-Hartman ([email protected])
* Copyright (C) 2000 Peter Berger ([email protected])
* Copyright (C) 2000 Al Borchers ([email protected])
*
* This driver was originally based on the ACM driver by Armin Fuerst (which was
* based on a driver by Brad Keryan)
*
* See Documentation/usb/usb-serial.rst for more information on using this
* driver
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/uaccess.h>
#include <linux/serial.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/kfifo.h>
#include <linux/idr.h>
#define DRIVER_AUTHOR "Greg Kroah-Hartman <[email protected]>"
#define DRIVER_DESC "USB Serial Driver core"
#define USB_SERIAL_TTY_MAJOR 188
#define USB_SERIAL_TTY_MINORS 512 /* should be enough for a while */
/* There is no MODULE_DEVICE_TABLE for usbserial.c. Instead
the MODULE_DEVICE_TABLE declarations in each serial driver
cause the "hotplug" program to pull in whatever module is necessary
via modprobe, and modprobe will load usbserial because the serial
drivers depend on it.
*/
static DEFINE_IDR(serial_minors);
static DEFINE_MUTEX(table_lock);
static LIST_HEAD(usb_serial_driver_list);
/*
* Look up the serial port structure. If it is found and it hasn't been
* disconnected, return with the parent usb_serial structure's disc_mutex held
* and its refcount incremented. Otherwise return NULL.
*/
struct usb_serial_port *usb_serial_port_get_by_minor(unsigned minor)
{
struct usb_serial *serial;
struct usb_serial_port *port;
mutex_lock(&table_lock);
port = idr_find(&serial_minors, minor);
if (!port)
goto exit;
serial = port->serial;
mutex_lock(&serial->disc_mutex);
if (serial->disconnected) {
mutex_unlock(&serial->disc_mutex);
port = NULL;
} else {
kref_get(&serial->kref);
}
exit:
mutex_unlock(&table_lock);
return port;
}
static int allocate_minors(struct usb_serial *serial, int num_ports)
{
struct usb_serial_port *port;
unsigned int i, j;
int minor;
dev_dbg(&serial->interface->dev, "%s %d\n", __func__, num_ports);
mutex_lock(&table_lock);
for (i = 0; i < num_ports; ++i) {
port = serial->port[i];
minor = idr_alloc(&serial_minors, port, 0,
USB_SERIAL_TTY_MINORS, GFP_KERNEL);
if (minor < 0)
goto error;
port->minor = minor;
port->port_number = i;
}
serial->minors_reserved = 1;
mutex_unlock(&table_lock);
return 0;
error:
/* unwind the already allocated minors */
for (j = 0; j < i; ++j)
idr_remove(&serial_minors, serial->port[j]->minor);
mutex_unlock(&table_lock);
return minor;
}
static void release_minors(struct usb_serial *serial)
{
int i;
mutex_lock(&table_lock);
for (i = 0; i < serial->num_ports; ++i)
idr_remove(&serial_minors, serial->port[i]->minor);
mutex_unlock(&table_lock);
serial->minors_reserved = 0;
}
int usb_serial_claim_interface(struct usb_serial *serial, struct usb_interface *intf)
{
struct usb_driver *driver = serial->type->usb_driver;
int ret;
if (serial->sibling)
return -EBUSY;
ret = usb_driver_claim_interface(driver, intf, serial);
if (ret) {
dev_err(&serial->interface->dev,
"failed to claim sibling interface: %d\n", ret);
return ret;
}
serial->sibling = intf;
return 0;
}
EXPORT_SYMBOL_GPL(usb_serial_claim_interface);
static void release_sibling(struct usb_serial *serial, struct usb_interface *intf)
{
struct usb_driver *driver = serial->type->usb_driver;
struct usb_interface *sibling;
if (!serial->sibling)
return;
if (intf == serial->sibling)
sibling = serial->interface;
else
sibling = serial->sibling;
usb_set_intfdata(sibling, NULL);
usb_driver_release_interface(driver, sibling);
}
static void destroy_serial(struct kref *kref)
{
struct usb_serial *serial;
struct usb_serial_port *port;
int i;
serial = to_usb_serial(kref);
/* return the minor range that this device had */
if (serial->minors_reserved)
release_minors(serial);
if (serial->attached && serial->type->release)
serial->type->release(serial);
/* Now that nothing is using the ports, they can be freed */
for (i = 0; i < serial->num_port_pointers; ++i) {
port = serial->port[i];
if (port) {
port->serial = NULL;
put_device(&port->dev);
}
}
usb_put_intf(serial->interface);
usb_put_dev(serial->dev);
kfree(serial);
}
void usb_serial_put(struct usb_serial *serial)
{
kref_put(&serial->kref, destroy_serial);
}
/*****************************************************************************
* Driver tty interface functions
*****************************************************************************/
/**
* serial_install - install tty
* @driver: the driver (USB in our case)
* @tty: the tty being created
*
* Initialise the termios structure for this tty. We use the default
* USB serial settings but permit them to be overridden by
* serial->type->init_termios on first open.
*
* This is the first place a new tty gets used. Hence this is where we
* acquire references to the usb_serial structure and the driver module,
* where we store a pointer to the port. All these actions are reversed
* in serial_cleanup().
*/
static int serial_install(struct tty_driver *driver, struct tty_struct *tty)
{
int idx = tty->index;
struct usb_serial *serial;
struct usb_serial_port *port;
bool init_termios;
int retval = -ENODEV;
port = usb_serial_port_get_by_minor(idx);
if (!port)
return retval;
serial = port->serial;
if (!try_module_get(serial->type->driver.owner))
goto err_put_serial;
init_termios = (driver->termios[idx] == NULL);
retval = tty_standard_install(driver, tty);
if (retval)
goto err_put_module;
mutex_unlock(&serial->disc_mutex);
/* allow the driver to update the initial settings */
if (init_termios && serial->type->init_termios)
serial->type->init_termios(tty);
tty->driver_data = port;
return retval;
err_put_module:
module_put(serial->type->driver.owner);
err_put_serial:
usb_serial_put(serial);
mutex_unlock(&serial->disc_mutex);
return retval;
}
static int serial_port_activate(struct tty_port *tport, struct tty_struct *tty)
{
struct usb_serial_port *port =
container_of(tport, struct usb_serial_port, port);
struct usb_serial *serial = port->serial;
int retval;
mutex_lock(&serial->disc_mutex);
if (serial->disconnected) {
retval = -ENODEV;
goto out_unlock;
}
retval = usb_autopm_get_interface(serial->interface);
if (retval)
goto out_unlock;
retval = port->serial->type->open(tty, port);
if (retval)
usb_autopm_put_interface(serial->interface);
out_unlock:
mutex_unlock(&serial->disc_mutex);
if (retval < 0)
retval = usb_translate_errors(retval);
return retval;
}
static int serial_open(struct tty_struct *tty, struct file *filp)
{
struct usb_serial_port *port = tty->driver_data;
dev_dbg(&port->dev, "%s\n", __func__);
return tty_port_open(&port->port, tty, filp);
}
/**
* serial_port_shutdown - shut down hardware
* @tport: tty port to shut down
*
* Shut down a USB serial port. Serialized against activate by the
* tport mutex and kept to matching open/close pairs
* of calls by the tty-port initialized flag.
*
* Not called if tty is console.
*/
static void serial_port_shutdown(struct tty_port *tport)
{
struct usb_serial_port *port =
container_of(tport, struct usb_serial_port, port);
struct usb_serial_driver *drv = port->serial->type;
if (drv->close)
drv->close(port);
usb_autopm_put_interface(port->serial->interface);
}
static void serial_hangup(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
dev_dbg(&port->dev, "%s\n", __func__);
tty_port_hangup(&port->port);
}
static void serial_close(struct tty_struct *tty, struct file *filp)
{
struct usb_serial_port *port = tty->driver_data;
dev_dbg(&port->dev, "%s\n", __func__);
tty_port_close(&port->port, tty, filp);
}
/**
* serial_cleanup - free resources post close/hangup
* @tty: tty to clean up
*
* Do the resource freeing and refcount dropping for the port.
* Avoid freeing the console.
*
* Called asynchronously after the last tty kref is dropped.
*/
static void serial_cleanup(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial;
struct module *owner;
dev_dbg(&port->dev, "%s\n", __func__);
/* The console is magical. Do not hang up the console hardware
* or there will be tears.
*/
if (port->port.console)
return;
tty->driver_data = NULL;
serial = port->serial;
owner = serial->type->driver.owner;
usb_serial_put(serial);
module_put(owner);
}
static ssize_t serial_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
struct usb_serial_port *port = tty->driver_data;
int retval = -ENODEV;
if (port->serial->dev->state == USB_STATE_NOTATTACHED)
goto exit;
dev_dbg(&port->dev, "%s - %zu byte(s)\n", __func__, count);
retval = port->serial->type->write(tty, port, buf, count);
if (retval < 0)
retval = usb_translate_errors(retval);
exit:
return retval;
}
static unsigned int serial_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
dev_dbg(&port->dev, "%s\n", __func__);
return port->serial->type->write_room(tty);
}
static unsigned int serial_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
dev_dbg(&port->dev, "%s\n", __func__);
if (serial->disconnected)
return 0;
return serial->type->chars_in_buffer(tty);
}
static void serial_wait_until_sent(struct tty_struct *tty, int timeout)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
dev_dbg(&port->dev, "%s\n", __func__);
if (!port->serial->type->wait_until_sent)
return;
mutex_lock(&serial->disc_mutex);
if (!serial->disconnected)
port->serial->type->wait_until_sent(tty, timeout);
mutex_unlock(&serial->disc_mutex);
}
static void serial_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
dev_dbg(&port->dev, "%s\n", __func__);
if (port->serial->type->throttle)
port->serial->type->throttle(tty);
}
static void serial_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
dev_dbg(&port->dev, "%s\n", __func__);
if (port->serial->type->unthrottle)
port->serial->type->unthrottle(tty);
}
static int serial_get_serial(struct tty_struct *tty, struct serial_struct *ss)
{
struct usb_serial_port *port = tty->driver_data;
struct tty_port *tport = &port->port;
unsigned int close_delay, closing_wait;
mutex_lock(&tport->mutex);
close_delay = jiffies_to_msecs(tport->close_delay) / 10;
closing_wait = tport->closing_wait;
if (closing_wait != ASYNC_CLOSING_WAIT_NONE)
closing_wait = jiffies_to_msecs(closing_wait) / 10;
ss->line = port->minor;
ss->close_delay = close_delay;
ss->closing_wait = closing_wait;
if (port->serial->type->get_serial)
port->serial->type->get_serial(tty, ss);
mutex_unlock(&tport->mutex);
return 0;
}
static int serial_set_serial(struct tty_struct *tty, struct serial_struct *ss)
{
struct usb_serial_port *port = tty->driver_data;
struct tty_port *tport = &port->port;
unsigned int close_delay, closing_wait;
int ret = 0;
close_delay = msecs_to_jiffies(ss->close_delay * 10);
closing_wait = ss->closing_wait;
if (closing_wait != ASYNC_CLOSING_WAIT_NONE)
closing_wait = msecs_to_jiffies(closing_wait * 10);
mutex_lock(&tport->mutex);
if (!capable(CAP_SYS_ADMIN)) {
if (close_delay != tport->close_delay ||
closing_wait != tport->closing_wait) {
ret = -EPERM;
goto out_unlock;
}
}
if (port->serial->type->set_serial) {
ret = port->serial->type->set_serial(tty, ss);
if (ret)
goto out_unlock;
}
tport->close_delay = close_delay;
tport->closing_wait = closing_wait;
out_unlock:
mutex_unlock(&tport->mutex);
return ret;
}
static int serial_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
int retval = -ENOIOCTLCMD;
dev_dbg(&port->dev, "%s - cmd 0x%04x\n", __func__, cmd);
switch (cmd) {
case TIOCMIWAIT:
if (port->serial->type->tiocmiwait)
retval = port->serial->type->tiocmiwait(tty, arg);
break;
default:
if (port->serial->type->ioctl)
retval = port->serial->type->ioctl(tty, cmd, arg);
}
return retval;
}
static void serial_set_termios(struct tty_struct *tty,
const struct ktermios *old)
{
struct usb_serial_port *port = tty->driver_data;
dev_dbg(&port->dev, "%s\n", __func__);
if (port->serial->type->set_termios)
port->serial->type->set_termios(tty, port, old);
else
tty_termios_copy_hw(&tty->termios, old);
}
static int serial_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
dev_dbg(&port->dev, "%s\n", __func__);
if (port->serial->type->break_ctl)
return port->serial->type->break_ctl(tty, break_state);
return -ENOTTY;
}
static int serial_proc_show(struct seq_file *m, void *v)
{
struct usb_serial *serial;
struct usb_serial_port *port;
int i;
char tmp[40];
seq_puts(m, "usbserinfo:1.0 driver:2.0\n");
for (i = 0; i < USB_SERIAL_TTY_MINORS; ++i) {
port = usb_serial_port_get_by_minor(i);
if (port == NULL)
continue;
serial = port->serial;
seq_printf(m, "%d:", i);
if (serial->type->driver.owner)
seq_printf(m, " module:%s",
module_name(serial->type->driver.owner));
seq_printf(m, " name:\"%s\"",
serial->type->description);
seq_printf(m, " vendor:%04x product:%04x",
le16_to_cpu(serial->dev->descriptor.idVendor),
le16_to_cpu(serial->dev->descriptor.idProduct));
seq_printf(m, " num_ports:%d", serial->num_ports);
seq_printf(m, " port:%d", port->port_number);
usb_make_path(serial->dev, tmp, sizeof(tmp));
seq_printf(m, " path:%s", tmp);
seq_putc(m, '\n');
usb_serial_put(serial);
mutex_unlock(&serial->disc_mutex);
}
return 0;
}
static int serial_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
dev_dbg(&port->dev, "%s\n", __func__);
if (port->serial->type->tiocmget)
return port->serial->type->tiocmget(tty);
return -ENOTTY;
}
static int serial_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
dev_dbg(&port->dev, "%s\n", __func__);
if (port->serial->type->tiocmset)
return port->serial->type->tiocmset(tty, set, clear);
return -ENOTTY;
}
static int serial_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount)
{
struct usb_serial_port *port = tty->driver_data;
dev_dbg(&port->dev, "%s\n", __func__);
if (port->serial->type->get_icount)
return port->serial->type->get_icount(tty, icount);
return -ENOTTY;
}
/*
* We would be calling tty_wakeup here, but unfortunately some line
* disciplines have an annoying habit of calling tty->write from
* the write wakeup callback (e.g. n_hdlc.c).
*/
void usb_serial_port_softint(struct usb_serial_port *port)
{
schedule_work(&port->work);
}
EXPORT_SYMBOL_GPL(usb_serial_port_softint);
static void usb_serial_port_work(struct work_struct *work)
{
struct usb_serial_port *port =
container_of(work, struct usb_serial_port, work);
tty_port_tty_wakeup(&port->port);
}
static void usb_serial_port_poison_urbs(struct usb_serial_port *port)
{
int i;
for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i)
usb_poison_urb(port->read_urbs[i]);
for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
usb_poison_urb(port->write_urbs[i]);
usb_poison_urb(port->interrupt_in_urb);
usb_poison_urb(port->interrupt_out_urb);
}
static void usb_serial_port_unpoison_urbs(struct usb_serial_port *port)
{
int i;
for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i)
usb_unpoison_urb(port->read_urbs[i]);
for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
usb_unpoison_urb(port->write_urbs[i]);
usb_unpoison_urb(port->interrupt_in_urb);
usb_unpoison_urb(port->interrupt_out_urb);
}
static void usb_serial_port_release(struct device *dev)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
int i;
dev_dbg(dev, "%s\n", __func__);
usb_free_urb(port->interrupt_in_urb);
usb_free_urb(port->interrupt_out_urb);
for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i) {
usb_free_urb(port->read_urbs[i]);
kfree(port->bulk_in_buffers[i]);
}
for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i) {
usb_free_urb(port->write_urbs[i]);
kfree(port->bulk_out_buffers[i]);
}
kfifo_free(&port->write_fifo);
kfree(port->interrupt_in_buffer);
kfree(port->interrupt_out_buffer);
tty_port_destroy(&port->port);
kfree(port);
}
static struct usb_serial *create_serial(struct usb_device *dev,
struct usb_interface *interface,
struct usb_serial_driver *driver)
{
struct usb_serial *serial;
serial = kzalloc(sizeof(*serial), GFP_KERNEL);
if (!serial)
return NULL;
serial->dev = usb_get_dev(dev);
serial->type = driver;
serial->interface = usb_get_intf(interface);
kref_init(&serial->kref);
mutex_init(&serial->disc_mutex);
serial->minors_reserved = 0;
return serial;
}
static const struct usb_device_id *match_dynamic_id(struct usb_interface *intf,
struct usb_serial_driver *drv)
{
struct usb_dynid *dynid;
spin_lock(&drv->dynids.lock);
list_for_each_entry(dynid, &drv->dynids.list, node) {
if (usb_match_one_id(intf, &dynid->id)) {
spin_unlock(&drv->dynids.lock);
return &dynid->id;
}
}
spin_unlock(&drv->dynids.lock);
return NULL;
}
static const struct usb_device_id *get_iface_id(struct usb_serial_driver *drv,
struct usb_interface *intf)
{
const struct usb_device_id *id;
id = usb_match_id(intf, drv->id_table);
if (id) {
dev_dbg(&intf->dev, "static descriptor matches\n");
goto exit;
}
id = match_dynamic_id(intf, drv);
if (id)
dev_dbg(&intf->dev, "dynamic descriptor matches\n");
exit:
return id;
}
/* Caller must hold table_lock */
static struct usb_serial_driver *search_serial_device(
struct usb_interface *iface)
{
const struct usb_device_id *id = NULL;
struct usb_serial_driver *drv;
struct usb_driver *driver = to_usb_driver(iface->dev.driver);
/* Check if the usb id matches a known device */
list_for_each_entry(drv, &usb_serial_driver_list, driver_list) {
if (drv->usb_driver == driver)
id = get_iface_id(drv, iface);
if (id)
return drv;
}
return NULL;
}
static bool serial_port_carrier_raised(struct tty_port *port)
{
struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
struct usb_serial_driver *drv = p->serial->type;
if (drv->carrier_raised)
return drv->carrier_raised(p);
/* No carrier control - don't block */
return true;
}
static void serial_port_dtr_rts(struct tty_port *port, bool on)
{
struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
struct usb_serial_driver *drv = p->serial->type;
if (drv->dtr_rts)
drv->dtr_rts(p, on);
}
static ssize_t port_number_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
return sprintf(buf, "%u\n", port->port_number);
}
static DEVICE_ATTR_RO(port_number);
static struct attribute *usb_serial_port_attrs[] = {
&dev_attr_port_number.attr,
NULL
};
ATTRIBUTE_GROUPS(usb_serial_port);
static const struct tty_port_operations serial_port_ops = {
.carrier_raised = serial_port_carrier_raised,
.dtr_rts = serial_port_dtr_rts,
.activate = serial_port_activate,
.shutdown = serial_port_shutdown,
};
static void store_endpoint(struct usb_serial *serial,
struct usb_serial_endpoints *epds,
struct usb_endpoint_descriptor *epd)
{
struct device *dev = &serial->interface->dev;
u8 addr = epd->bEndpointAddress;
if (usb_endpoint_is_bulk_in(epd)) {
if (epds->num_bulk_in == ARRAY_SIZE(epds->bulk_in))
return;
dev_dbg(dev, "found bulk in endpoint %02x\n", addr);
epds->bulk_in[epds->num_bulk_in++] = epd;
} else if (usb_endpoint_is_bulk_out(epd)) {
if (epds->num_bulk_out == ARRAY_SIZE(epds->bulk_out))
return;
dev_dbg(dev, "found bulk out endpoint %02x\n", addr);
epds->bulk_out[epds->num_bulk_out++] = epd;
} else if (usb_endpoint_is_int_in(epd)) {
if (epds->num_interrupt_in == ARRAY_SIZE(epds->interrupt_in))
return;
dev_dbg(dev, "found interrupt in endpoint %02x\n", addr);
epds->interrupt_in[epds->num_interrupt_in++] = epd;
} else if (usb_endpoint_is_int_out(epd)) {
if (epds->num_interrupt_out == ARRAY_SIZE(epds->interrupt_out))
return;
dev_dbg(dev, "found interrupt out endpoint %02x\n", addr);
epds->interrupt_out[epds->num_interrupt_out++] = epd;
}
}
static void find_endpoints(struct usb_serial *serial,
struct usb_serial_endpoints *epds,
struct usb_interface *intf)
{
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *epd;
unsigned int i;
iface_desc = intf->cur_altsetting;
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
epd = &iface_desc->endpoint[i].desc;
store_endpoint(serial, epds, epd);
}
}
static int setup_port_bulk_in(struct usb_serial_port *port,
struct usb_endpoint_descriptor *epd)
{
struct usb_serial_driver *type = port->serial->type;
struct usb_device *udev = port->serial->dev;
int buffer_size;
int i;
buffer_size = max_t(int, type->bulk_in_size, usb_endpoint_maxp(epd));
port->bulk_in_size = buffer_size;
port->bulk_in_endpointAddress = epd->bEndpointAddress;
for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i) {
set_bit(i, &port->read_urbs_free);
port->read_urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
if (!port->read_urbs[i])
return -ENOMEM;
port->bulk_in_buffers[i] = kmalloc(buffer_size, GFP_KERNEL);
if (!port->bulk_in_buffers[i])
return -ENOMEM;
usb_fill_bulk_urb(port->read_urbs[i], udev,
usb_rcvbulkpipe(udev, epd->bEndpointAddress),
port->bulk_in_buffers[i], buffer_size,
type->read_bulk_callback, port);
}
port->read_urb = port->read_urbs[0];
port->bulk_in_buffer = port->bulk_in_buffers[0];
return 0;
}
static int setup_port_bulk_out(struct usb_serial_port *port,
struct usb_endpoint_descriptor *epd)
{
struct usb_serial_driver *type = port->serial->type;
struct usb_device *udev = port->serial->dev;
int buffer_size;
int i;
if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
return -ENOMEM;
if (type->bulk_out_size)
buffer_size = type->bulk_out_size;
else
buffer_size = usb_endpoint_maxp(epd);
port->bulk_out_size = buffer_size;
port->bulk_out_endpointAddress = epd->bEndpointAddress;
for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i) {
set_bit(i, &port->write_urbs_free);
port->write_urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
if (!port->write_urbs[i])
return -ENOMEM;
port->bulk_out_buffers[i] = kmalloc(buffer_size, GFP_KERNEL);
if (!port->bulk_out_buffers[i])
return -ENOMEM;
usb_fill_bulk_urb(port->write_urbs[i], udev,
usb_sndbulkpipe(udev, epd->bEndpointAddress),
port->bulk_out_buffers[i], buffer_size,
type->write_bulk_callback, port);
}
port->write_urb = port->write_urbs[0];
port->bulk_out_buffer = port->bulk_out_buffers[0];
return 0;
}
static int setup_port_interrupt_in(struct usb_serial_port *port,
struct usb_endpoint_descriptor *epd)
{
struct usb_serial_driver *type = port->serial->type;
struct usb_device *udev = port->serial->dev;
int buffer_size;
port->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!port->interrupt_in_urb)
return -ENOMEM;
buffer_size = usb_endpoint_maxp(epd);
port->interrupt_in_endpointAddress = epd->bEndpointAddress;
port->interrupt_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
if (!port->interrupt_in_buffer)
return -ENOMEM;
usb_fill_int_urb(port->interrupt_in_urb, udev,
usb_rcvintpipe(udev, epd->bEndpointAddress),
port->interrupt_in_buffer, buffer_size,
type->read_int_callback, port,
epd->bInterval);
return 0;
}
static int setup_port_interrupt_out(struct usb_serial_port *port,
struct usb_endpoint_descriptor *epd)
{
struct usb_serial_driver *type = port->serial->type;
struct usb_device *udev = port->serial->dev;
int buffer_size;
port->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!port->interrupt_out_urb)
return -ENOMEM;
buffer_size = usb_endpoint_maxp(epd);
port->interrupt_out_size = buffer_size;
port->interrupt_out_endpointAddress = epd->bEndpointAddress;
port->interrupt_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
if (!port->interrupt_out_buffer)
return -ENOMEM;
usb_fill_int_urb(port->interrupt_out_urb, udev,
usb_sndintpipe(udev, epd->bEndpointAddress),
port->interrupt_out_buffer, buffer_size,
type->write_int_callback, port,
epd->bInterval);
return 0;
}
static int usb_serial_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct device *ddev = &interface->dev;
struct usb_device *dev = interface_to_usbdev(interface);
struct usb_serial *serial = NULL;
struct usb_serial_port *port;
struct usb_serial_endpoints *epds;
struct usb_serial_driver *type = NULL;
int retval;
int i;
int num_ports = 0;
unsigned char max_endpoints;
mutex_lock(&table_lock);
type = search_serial_device(interface);
if (!type) {
mutex_unlock(&table_lock);
dev_dbg(ddev, "none matched\n");
return -ENODEV;
}
if (!try_module_get(type->driver.owner)) {
mutex_unlock(&table_lock);
dev_err(ddev, "module get failed, exiting\n");
return -EIO;
}
mutex_unlock(&table_lock);
serial = create_serial(dev, interface, type);
if (!serial) {
retval = -ENOMEM;
goto err_put_module;
}
/* if this device type has a probe function, call it */
if (type->probe) {
const struct usb_device_id *id;
id = get_iface_id(type, interface);
retval = type->probe(serial, id);
if (retval) {
dev_dbg(ddev, "sub driver rejected device\n");
goto err_release_sibling;
}
}
/* descriptor matches, let's find the endpoints needed */
epds = kzalloc(sizeof(*epds), GFP_KERNEL);
if (!epds) {
retval = -ENOMEM;
goto err_release_sibling;
}
find_endpoints(serial, epds, interface);
if (serial->sibling)
find_endpoints(serial, epds, serial->sibling);
if (epds->num_bulk_in < type->num_bulk_in ||
epds->num_bulk_out < type->num_bulk_out ||
epds->num_interrupt_in < type->num_interrupt_in ||
epds->num_interrupt_out < type->num_interrupt_out) {
dev_err(ddev, "required endpoints missing\n");
retval = -ENODEV;
goto err_free_epds;
}
if (type->calc_num_ports) {
retval = type->calc_num_ports(serial, epds);
if (retval < 0)
goto err_free_epds;
num_ports = retval;
}
if (!num_ports)
num_ports = type->num_ports;
if (num_ports > MAX_NUM_PORTS) {
dev_warn(ddev, "too many ports requested: %d\n", num_ports);
num_ports = MAX_NUM_PORTS;
}
serial->num_ports = (unsigned char)num_ports;
serial->num_bulk_in = epds->num_bulk_in;
serial->num_bulk_out = epds->num_bulk_out;
serial->num_interrupt_in = epds->num_interrupt_in;
serial->num_interrupt_out = epds->num_interrupt_out;
/* found all that we need */
dev_info(ddev, "%s converter detected\n", type->description);
/* create our ports, we need as many as the max endpoints */
/* we don't use num_ports here because some devices have more
endpoint pairs than ports */
max_endpoints = max(epds->num_bulk_in, epds->num_bulk_out);
max_endpoints = max(max_endpoints, epds->num_interrupt_in);
max_endpoints = max(max_endpoints, epds->num_interrupt_out);
max_endpoints = max(max_endpoints, serial->num_ports);
serial->num_port_pointers = max_endpoints;
dev_dbg(ddev, "setting up %d port structure(s)\n", max_endpoints);
for (i = 0; i < max_endpoints; ++i) {
port = kzalloc(sizeof(struct usb_serial_port), GFP_KERNEL);
if (!port) {
retval = -ENOMEM;
goto err_free_epds;
}
tty_port_init(&port->port);
port->port.ops = &serial_port_ops;
port->serial = serial;
spin_lock_init(&port->lock);
/* Keep this for private driver use for the moment but
should probably go away */
INIT_WORK(&port->work, usb_serial_port_work);
serial->port[i] = port;
port->dev.parent = &interface->dev;
port->dev.driver = NULL;
port->dev.bus = &usb_serial_bus_type;
port->dev.release = &usb_serial_port_release;
port->dev.groups = usb_serial_port_groups;
device_initialize(&port->dev);
}
/* set up the endpoint information */
for (i = 0; i < epds->num_bulk_in; ++i) {
retval = setup_port_bulk_in(serial->port[i], epds->bulk_in[i]);
if (retval)
goto err_free_epds;
}
for (i = 0; i < epds->num_bulk_out; ++i) {
retval = setup_port_bulk_out(serial->port[i],
epds->bulk_out[i]);
if (retval)
goto err_free_epds;
}
if (serial->type->read_int_callback) {
for (i = 0; i < epds->num_interrupt_in; ++i) {
retval = setup_port_interrupt_in(serial->port[i],
epds->interrupt_in[i]);
if (retval)
goto err_free_epds;
}
} else if (epds->num_interrupt_in) {
dev_dbg(ddev, "The device claims to support interrupt in transfers, but read_int_callback is not defined\n");
}
if (serial->type->write_int_callback) {
for (i = 0; i < epds->num_interrupt_out; ++i) {
retval = setup_port_interrupt_out(serial->port[i],
epds->interrupt_out[i]);
if (retval)
goto err_free_epds;
}
} else if (epds->num_interrupt_out) {
dev_dbg(ddev, "The device claims to support interrupt out transfers, but write_int_callback is not defined\n");
}
usb_set_intfdata(interface, serial);
/* if this device type has an attach function, call it */
if (type->attach) {
retval = type->attach(serial);
if (retval < 0)
goto err_free_epds;
serial->attached = 1;
if (retval > 0) {
/* quietly accept this device, but don't bind to a
serial port as it's about to disappear */
serial->num_ports = 0;
goto exit;
}
} else {
serial->attached = 1;
}
retval = allocate_minors(serial, num_ports);
if (retval) {
dev_err(ddev, "No more free serial minor numbers\n");
goto err_free_epds;
}
/* register all of the individual ports with the driver core */
for (i = 0; i < num_ports; ++i) {
port = serial->port[i];
dev_set_name(&port->dev, "ttyUSB%d", port->minor);
dev_dbg(ddev, "registering %s\n", dev_name(&port->dev));
device_enable_async_suspend(&port->dev);
retval = device_add(&port->dev);
if (retval)
dev_err(ddev, "Error registering port device, continuing\n");
}
if (num_ports > 0)
usb_serial_console_init(serial->port[0]->minor);
exit:
kfree(epds);
module_put(type->driver.owner);
return 0;
err_free_epds:
kfree(epds);
err_release_sibling:
release_sibling(serial, interface);
usb_serial_put(serial);
err_put_module:
module_put(type->driver.owner);
return retval;
}
static void usb_serial_disconnect(struct usb_interface *interface)
{
int i;
struct usb_serial *serial = usb_get_intfdata(interface);
struct device *dev = &interface->dev;
struct usb_serial_port *port;
struct tty_struct *tty;
/* sibling interface is cleaning up */
if (!serial)
return;
usb_serial_console_disconnect(serial);
mutex_lock(&serial->disc_mutex);
/* must set a flag, to signal subdrivers */
serial->disconnected = 1;
mutex_unlock(&serial->disc_mutex);
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
tty = tty_port_tty_get(&port->port);
if (tty) {
tty_vhangup(tty);
tty_kref_put(tty);
}
usb_serial_port_poison_urbs(port);
wake_up_interruptible(&port->port.delta_msr_wait);
cancel_work_sync(&port->work);
if (device_is_registered(&port->dev))
device_del(&port->dev);
}
if (serial->type->disconnect)
serial->type->disconnect(serial);
release_sibling(serial, interface);
/* let the last holder of this object cause it to be cleaned up */
usb_serial_put(serial);
dev_info(dev, "device disconnected\n");
}
int usb_serial_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usb_serial *serial = usb_get_intfdata(intf);
int i, r;
/* suspend when called for first sibling interface */
if (serial->suspend_count++)
return 0;
/*
* serial->type->suspend() MUST return 0 in system sleep context,
* otherwise, the resume callback has to recover device from
* previous suspend failure.
*/
if (serial->type->suspend) {
r = serial->type->suspend(serial, message);
if (r < 0) {
serial->suspend_count--;
return r;
}
}
for (i = 0; i < serial->num_ports; ++i)
usb_serial_port_poison_urbs(serial->port[i]);
return 0;
}
EXPORT_SYMBOL(usb_serial_suspend);
static void usb_serial_unpoison_port_urbs(struct usb_serial *serial)
{
int i;
for (i = 0; i < serial->num_ports; ++i)
usb_serial_port_unpoison_urbs(serial->port[i]);
}
int usb_serial_resume(struct usb_interface *intf)
{
struct usb_serial *serial = usb_get_intfdata(intf);
int rv;
/* resume when called for last sibling interface */
if (--serial->suspend_count)
return 0;
usb_serial_unpoison_port_urbs(serial);
if (serial->type->resume)
rv = serial->type->resume(serial);
else
rv = usb_serial_generic_resume(serial);
return rv;
}
EXPORT_SYMBOL(usb_serial_resume);
static int usb_serial_reset_resume(struct usb_interface *intf)
{
struct usb_serial *serial = usb_get_intfdata(intf);
int rv;
/* resume when called for last sibling interface */
if (--serial->suspend_count)
return 0;
usb_serial_unpoison_port_urbs(serial);
if (serial->type->reset_resume) {
rv = serial->type->reset_resume(serial);
} else {
rv = -EOPNOTSUPP;
intf->needs_binding = 1;
}
return rv;
}
static const struct tty_operations serial_ops = {
.open = serial_open,
.close = serial_close,
.write = serial_write,
.hangup = serial_hangup,
.write_room = serial_write_room,
.ioctl = serial_ioctl,
.set_termios = serial_set_termios,
.throttle = serial_throttle,
.unthrottle = serial_unthrottle,
.break_ctl = serial_break,
.chars_in_buffer = serial_chars_in_buffer,
.wait_until_sent = serial_wait_until_sent,
.tiocmget = serial_tiocmget,
.tiocmset = serial_tiocmset,
.get_icount = serial_get_icount,
.set_serial = serial_set_serial,
.get_serial = serial_get_serial,
.cleanup = serial_cleanup,
.install = serial_install,
.proc_show = serial_proc_show,
};
struct tty_driver *usb_serial_tty_driver;
static int __init usb_serial_init(void)
{
int result;
usb_serial_tty_driver = tty_alloc_driver(USB_SERIAL_TTY_MINORS,
TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(usb_serial_tty_driver))
return PTR_ERR(usb_serial_tty_driver);
/* Initialize our global data */
result = bus_register(&usb_serial_bus_type);
if (result) {
pr_err("%s - registering bus driver failed\n", __func__);
goto err_put_driver;
}
usb_serial_tty_driver->driver_name = "usbserial";
usb_serial_tty_driver->name = "ttyUSB";
usb_serial_tty_driver->major = USB_SERIAL_TTY_MAJOR;
usb_serial_tty_driver->minor_start = 0;
usb_serial_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
usb_serial_tty_driver->subtype = SERIAL_TYPE_NORMAL;
usb_serial_tty_driver->init_termios = tty_std_termios;
usb_serial_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD
| HUPCL | CLOCAL;
usb_serial_tty_driver->init_termios.c_ispeed = 9600;
usb_serial_tty_driver->init_termios.c_ospeed = 9600;
tty_set_operations(usb_serial_tty_driver, &serial_ops);
result = tty_register_driver(usb_serial_tty_driver);
if (result) {
pr_err("%s - tty_register_driver failed\n", __func__);
goto err_unregister_bus;
}
/* register the generic driver, if we should */
result = usb_serial_generic_register();
if (result < 0) {
pr_err("%s - registering generic driver failed\n", __func__);
goto err_unregister_driver;
}
return result;
err_unregister_driver:
tty_unregister_driver(usb_serial_tty_driver);
err_unregister_bus:
bus_unregister(&usb_serial_bus_type);
err_put_driver:
pr_err("%s - returning with error %d\n", __func__, result);
tty_driver_kref_put(usb_serial_tty_driver);
return result;
}
static void __exit usb_serial_exit(void)
{
usb_serial_console_exit();
usb_serial_generic_deregister();
tty_unregister_driver(usb_serial_tty_driver);
tty_driver_kref_put(usb_serial_tty_driver);
bus_unregister(&usb_serial_bus_type);
idr_destroy(&serial_minors);
}
module_init(usb_serial_init);
module_exit(usb_serial_exit);
#define set_to_generic_if_null(type, function) \
do { \
if (!type->function) { \
type->function = usb_serial_generic_##function; \
pr_debug("%s: using generic " #function "\n", \
type->driver.name); \
} \
} while (0)
static void usb_serial_operations_init(struct usb_serial_driver *device)
{
set_to_generic_if_null(device, open);
set_to_generic_if_null(device, write);
set_to_generic_if_null(device, close);
set_to_generic_if_null(device, write_room);
set_to_generic_if_null(device, chars_in_buffer);
if (device->tx_empty)
set_to_generic_if_null(device, wait_until_sent);
set_to_generic_if_null(device, read_bulk_callback);
set_to_generic_if_null(device, write_bulk_callback);
set_to_generic_if_null(device, process_read_urb);
set_to_generic_if_null(device, prepare_write_buffer);
}
static int usb_serial_register(struct usb_serial_driver *driver)
{
int retval;
if (usb_disabled())
return -ENODEV;
if (!driver->description)
driver->description = driver->driver.name;
if (!driver->usb_driver) {
WARN(1, "Serial driver %s has no usb_driver\n",
driver->description);
return -EINVAL;
}
/* Prevent individual ports from being unbound. */
driver->driver.suppress_bind_attrs = true;
usb_serial_operations_init(driver);
/* Add this device to our list of devices */
mutex_lock(&table_lock);
list_add(&driver->driver_list, &usb_serial_driver_list);
retval = usb_serial_bus_register(driver);
if (retval) {
pr_err("problem %d when registering driver %s\n", retval, driver->description);
list_del(&driver->driver_list);
} else {
pr_info("USB Serial support registered for %s\n", driver->description);
}
mutex_unlock(&table_lock);
return retval;
}
static void usb_serial_deregister(struct usb_serial_driver *device)
{
pr_info("USB Serial deregistering driver %s\n", device->description);
mutex_lock(&table_lock);
list_del(&device->driver_list);
mutex_unlock(&table_lock);
usb_serial_bus_deregister(device);
}
/**
* usb_serial_register_drivers - register drivers for a usb-serial module
* @serial_drivers: NULL-terminated array of pointers to drivers to be registered
* @name: name of the usb_driver for this set of @serial_drivers
* @id_table: list of all devices this @serial_drivers set binds to
*
* Registers all the drivers in the @serial_drivers array, and dynamically
* creates a struct usb_driver with the name @name and id_table of @id_table.
*/
int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[],
const char *name,
const struct usb_device_id *id_table)
{
int rc;
struct usb_driver *udriver;
struct usb_serial_driver * const *sd;
/*
* udriver must be registered before any of the serial drivers,
* because the store_new_id() routine for the serial drivers (in
* bus.c) probes udriver.
*
* Performance hack: We don't want udriver to be probed until
* the serial drivers are registered, because the probe would
* simply fail for lack of a matching serial driver.
* So we leave udriver's id_table set to NULL until we are all set.
*
* Suspend/resume support is implemented in the usb-serial core,
* so fill in the PM-related fields in udriver.
*/
udriver = kzalloc(sizeof(*udriver), GFP_KERNEL);
if (!udriver)
return -ENOMEM;
udriver->name = name;
udriver->no_dynamic_id = 1;
udriver->supports_autosuspend = 1;
udriver->suspend = usb_serial_suspend;
udriver->resume = usb_serial_resume;
udriver->probe = usb_serial_probe;
udriver->disconnect = usb_serial_disconnect;
/* we only set the reset_resume field if the serial_driver has one */
for (sd = serial_drivers; *sd; ++sd) {
if ((*sd)->reset_resume) {
udriver->reset_resume = usb_serial_reset_resume;
break;
}
}
rc = usb_register(udriver);
if (rc)
goto err_free_driver;
for (sd = serial_drivers; *sd; ++sd) {
(*sd)->usb_driver = udriver;
rc = usb_serial_register(*sd);
if (rc)
goto err_deregister_drivers;
}
/* Now set udriver's id_table and look for matches */
udriver->id_table = id_table;
rc = driver_attach(&udriver->drvwrap.driver);
return 0;
err_deregister_drivers:
while (sd-- > serial_drivers)
usb_serial_deregister(*sd);
usb_deregister(udriver);
err_free_driver:
kfree(udriver);
return rc;
}
EXPORT_SYMBOL_GPL(usb_serial_register_drivers);
/**
* usb_serial_deregister_drivers - deregister drivers for a usb-serial module
* @serial_drivers: NULL-terminated array of pointers to drivers to be deregistered
*
* Deregisters all the drivers in the @serial_drivers array and deregisters and
* frees the struct usb_driver that was created by the call to
* usb_serial_register_drivers().
*/
void usb_serial_deregister_drivers(struct usb_serial_driver *const serial_drivers[])
{
struct usb_driver *udriver = (*serial_drivers)->usb_driver;
for (; *serial_drivers; ++serial_drivers)
usb_serial_deregister(*serial_drivers);
usb_deregister(udriver);
kfree(udriver);
}
EXPORT_SYMBOL_GPL(usb_serial_deregister_drivers);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/usb-serial.c |
// SPDX-License-Identifier: GPL-2.0
/*
* usb-serial driver for Quatech SSU-100
*
* based on ftdi_sio.c and the original serqt_usb.c from Quatech
*
*/
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/serial.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/serial_reg.h>
#include <linux/uaccess.h>
#define QT_OPEN_CLOSE_CHANNEL 0xca
#define QT_SET_GET_DEVICE 0xc2
#define QT_SET_GET_REGISTER 0xc0
#define QT_GET_SET_PREBUF_TRIG_LVL 0xcc
#define QT_SET_ATF 0xcd
#define QT_GET_SET_UART 0xc1
#define QT_TRANSFER_IN 0xc0
#define QT_HW_FLOW_CONTROL_MASK 0xc5
#define QT_SW_FLOW_CONTROL_MASK 0xc6
#define SERIAL_MSR_MASK 0xf0
#define SERIAL_CRTSCTS ((UART_MCR_RTS << 8) | UART_MSR_CTS)
#define SERIAL_EVEN_PARITY (UART_LCR_PARITY | UART_LCR_EPAR)
#define MAX_BAUD_RATE 460800
#define ATC_DISABLED 0x00
#define DUPMODE_BITS 0xc0
#define RR_BITS 0x03
#define LOOPMODE_BITS 0x41
#define RS232_MODE 0x00
#define RTSCTS_TO_CONNECTOR 0x40
#define CLKS_X4 0x02
#define FULLPWRBIT 0x00000080
#define NEXT_BOARD_POWER_BIT 0x00000004
#define DRIVER_DESC "Quatech SSU-100 USB to Serial Driver"
#define USB_VENDOR_ID_QUATECH 0x061d /* Quatech VID */
#define QUATECH_SSU100 0xC020 /* SSU100 */
static const struct usb_device_id id_table[] = {
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_SSU100)},
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
struct ssu100_port_private {
spinlock_t status_lock;
u8 shadowLSR;
u8 shadowMSR;
};
static inline int ssu100_control_msg(struct usb_device *dev,
u8 request, u16 data, u16 index)
{
return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
request, 0x40, data, index,
NULL, 0, 300);
}
static inline int ssu100_setdevice(struct usb_device *dev, u8 *data)
{
u16 x = ((u16)(data[1] << 8) | (u16)(data[0]));
return ssu100_control_msg(dev, QT_SET_GET_DEVICE, x, 0);
}
static inline int ssu100_getdevice(struct usb_device *dev, u8 *data)
{
int ret;
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
QT_SET_GET_DEVICE, 0xc0, 0, 0,
data, 3, 300);
if (ret < 3) {
if (ret >= 0)
ret = -EIO;
}
return ret;
}
static inline int ssu100_getregister(struct usb_device *dev,
unsigned short uart,
unsigned short reg,
u8 *data)
{
int ret;
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
QT_SET_GET_REGISTER, 0xc0, reg,
uart, data, sizeof(*data), 300);
if (ret < (int)sizeof(*data)) {
if (ret >= 0)
ret = -EIO;
}
return ret;
}
static inline int ssu100_setregister(struct usb_device *dev,
unsigned short uart,
unsigned short reg,
u16 data)
{
u16 value = (data << 8) | reg;
return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
QT_SET_GET_REGISTER, 0x40, value, uart,
NULL, 0, 300);
}
#define set_mctrl(dev, set) update_mctrl((dev), (set), 0)
#define clear_mctrl(dev, clear) update_mctrl((dev), 0, (clear))
/* these do not deal with device that have more than 1 port */
static inline int update_mctrl(struct usb_device *dev, unsigned int set,
unsigned int clear)
{
unsigned urb_value;
int result;
if (((set | clear) & (TIOCM_DTR | TIOCM_RTS)) == 0) {
dev_dbg(&dev->dev, "%s - DTR|RTS not being set|cleared\n", __func__);
return 0; /* no change */
}
clear &= ~set; /* 'set' takes precedence over 'clear' */
urb_value = 0;
if (set & TIOCM_DTR)
urb_value |= UART_MCR_DTR;
if (set & TIOCM_RTS)
urb_value |= UART_MCR_RTS;
result = ssu100_setregister(dev, 0, UART_MCR, urb_value);
if (result < 0)
dev_dbg(&dev->dev, "%s Error from MODEM_CTRL urb\n", __func__);
return result;
}
static int ssu100_initdevice(struct usb_device *dev)
{
u8 *data;
int result = 0;
data = kzalloc(3, GFP_KERNEL);
if (!data)
return -ENOMEM;
result = ssu100_getdevice(dev, data);
if (result < 0) {
dev_dbg(&dev->dev, "%s - get_device failed %i\n", __func__, result);
goto out;
}
data[1] &= ~FULLPWRBIT;
result = ssu100_setdevice(dev, data);
if (result < 0) {
dev_dbg(&dev->dev, "%s - setdevice failed %i\n", __func__, result);
goto out;
}
result = ssu100_control_msg(dev, QT_GET_SET_PREBUF_TRIG_LVL, 128, 0);
if (result < 0) {
dev_dbg(&dev->dev, "%s - set prebuffer level failed %i\n", __func__, result);
goto out;
}
result = ssu100_control_msg(dev, QT_SET_ATF, ATC_DISABLED, 0);
if (result < 0) {
dev_dbg(&dev->dev, "%s - set ATFprebuffer level failed %i\n", __func__, result);
goto out;
}
result = ssu100_getdevice(dev, data);
if (result < 0) {
dev_dbg(&dev->dev, "%s - get_device failed %i\n", __func__, result);
goto out;
}
data[0] &= ~(RR_BITS | DUPMODE_BITS);
data[0] |= CLKS_X4;
data[1] &= ~(LOOPMODE_BITS);
data[1] |= RS232_MODE;
result = ssu100_setdevice(dev, data);
if (result < 0) {
dev_dbg(&dev->dev, "%s - setdevice failed %i\n", __func__, result);
goto out;
}
out: kfree(data);
return result;
}
static void ssu100_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct usb_device *dev = port->serial->dev;
struct ktermios *termios = &tty->termios;
u16 baud, divisor, remainder;
unsigned int cflag = termios->c_cflag;
u16 urb_value = 0; /* will hold the new flags */
int result;
if (cflag & PARENB) {
if (cflag & PARODD)
urb_value |= UART_LCR_PARITY;
else
urb_value |= SERIAL_EVEN_PARITY;
}
urb_value |= UART_LCR_WLEN(tty_get_char_size(cflag));
baud = tty_get_baud_rate(tty);
if (!baud)
baud = 9600;
dev_dbg(&port->dev, "%s - got baud = %d\n", __func__, baud);
divisor = MAX_BAUD_RATE / baud;
remainder = MAX_BAUD_RATE % baud;
if (((remainder * 2) >= baud) && (baud != 110))
divisor++;
urb_value = urb_value << 8;
result = ssu100_control_msg(dev, QT_GET_SET_UART, divisor, urb_value);
if (result < 0)
dev_dbg(&port->dev, "%s - set uart failed\n", __func__);
if (cflag & CRTSCTS)
result = ssu100_control_msg(dev, QT_HW_FLOW_CONTROL_MASK,
SERIAL_CRTSCTS, 0);
else
result = ssu100_control_msg(dev, QT_HW_FLOW_CONTROL_MASK,
0, 0);
if (result < 0)
dev_dbg(&port->dev, "%s - set HW flow control failed\n", __func__);
if (I_IXOFF(tty) || I_IXON(tty)) {
u16 x = ((u16)(START_CHAR(tty) << 8) | (u16)(STOP_CHAR(tty)));
result = ssu100_control_msg(dev, QT_SW_FLOW_CONTROL_MASK,
x, 0);
} else
result = ssu100_control_msg(dev, QT_SW_FLOW_CONTROL_MASK,
0, 0);
if (result < 0)
dev_dbg(&port->dev, "%s - set SW flow control failed\n", __func__);
}
static int ssu100_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_device *dev = port->serial->dev;
struct ssu100_port_private *priv = usb_get_serial_port_data(port);
u8 *data;
int result;
unsigned long flags;
data = kzalloc(2, GFP_KERNEL);
if (!data)
return -ENOMEM;
result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
QT_OPEN_CLOSE_CHANNEL,
QT_TRANSFER_IN, 0x01,
0, data, 2, 300);
if (result < 2) {
dev_dbg(&port->dev, "%s - open failed %i\n", __func__, result);
if (result >= 0)
result = -EIO;
kfree(data);
return result;
}
spin_lock_irqsave(&priv->status_lock, flags);
priv->shadowLSR = data[0];
priv->shadowMSR = data[1];
spin_unlock_irqrestore(&priv->status_lock, flags);
kfree(data);
/* set to 9600 */
result = ssu100_control_msg(dev, QT_GET_SET_UART, 0x30, 0x0300);
if (result < 0)
dev_dbg(&port->dev, "%s - set uart failed\n", __func__);
if (tty)
ssu100_set_termios(tty, port, &tty->termios);
return usb_serial_generic_open(tty, port);
}
static int ssu100_attach(struct usb_serial *serial)
{
return ssu100_initdevice(serial->dev);
}
static int ssu100_port_probe(struct usb_serial_port *port)
{
struct ssu100_port_private *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
spin_lock_init(&priv->status_lock);
usb_set_serial_port_data(port, priv);
return 0;
}
static void ssu100_port_remove(struct usb_serial_port *port)
{
struct ssu100_port_private *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
}
static int ssu100_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_device *dev = port->serial->dev;
u8 *d;
int r;
d = kzalloc(2, GFP_KERNEL);
if (!d)
return -ENOMEM;
r = ssu100_getregister(dev, 0, UART_MCR, d);
if (r < 0)
goto mget_out;
r = ssu100_getregister(dev, 0, UART_MSR, d+1);
if (r < 0)
goto mget_out;
r = (d[0] & UART_MCR_DTR ? TIOCM_DTR : 0) |
(d[0] & UART_MCR_RTS ? TIOCM_RTS : 0) |
(d[1] & UART_MSR_CTS ? TIOCM_CTS : 0) |
(d[1] & UART_MSR_DCD ? TIOCM_CAR : 0) |
(d[1] & UART_MSR_RI ? TIOCM_RI : 0) |
(d[1] & UART_MSR_DSR ? TIOCM_DSR : 0);
mget_out:
kfree(d);
return r;
}
static int ssu100_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_device *dev = port->serial->dev;
return update_mctrl(dev, set, clear);
}
static void ssu100_dtr_rts(struct usb_serial_port *port, int on)
{
struct usb_device *dev = port->serial->dev;
/* Disable flow control */
if (!on) {
if (ssu100_setregister(dev, 0, UART_MCR, 0) < 0)
dev_err(&port->dev, "error from flowcontrol urb\n");
}
/* drop RTS and DTR */
if (on)
set_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
else
clear_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
}
static void ssu100_update_msr(struct usb_serial_port *port, u8 msr)
{
struct ssu100_port_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
spin_lock_irqsave(&priv->status_lock, flags);
priv->shadowMSR = msr;
spin_unlock_irqrestore(&priv->status_lock, flags);
if (msr & UART_MSR_ANY_DELTA) {
/* update input line counters */
if (msr & UART_MSR_DCTS)
port->icount.cts++;
if (msr & UART_MSR_DDSR)
port->icount.dsr++;
if (msr & UART_MSR_DDCD)
port->icount.dcd++;
if (msr & UART_MSR_TERI)
port->icount.rng++;
wake_up_interruptible(&port->port.delta_msr_wait);
}
}
static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr,
char *tty_flag)
{
struct ssu100_port_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
spin_lock_irqsave(&priv->status_lock, flags);
priv->shadowLSR = lsr;
spin_unlock_irqrestore(&priv->status_lock, flags);
*tty_flag = TTY_NORMAL;
if (lsr & UART_LSR_BRK_ERROR_BITS) {
/* we always want to update icount, but we only want to
* update tty_flag for one case */
if (lsr & UART_LSR_BI) {
port->icount.brk++;
*tty_flag = TTY_BREAK;
usb_serial_handle_break(port);
}
if (lsr & UART_LSR_PE) {
port->icount.parity++;
if (*tty_flag == TTY_NORMAL)
*tty_flag = TTY_PARITY;
}
if (lsr & UART_LSR_FE) {
port->icount.frame++;
if (*tty_flag == TTY_NORMAL)
*tty_flag = TTY_FRAME;
}
if (lsr & UART_LSR_OE) {
port->icount.overrun++;
tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
}
}
}
static void ssu100_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
char *packet = urb->transfer_buffer;
char flag = TTY_NORMAL;
u32 len = urb->actual_length;
int i;
char *ch;
if ((len >= 4) &&
(packet[0] == 0x1b) && (packet[1] == 0x1b) &&
((packet[2] == 0x00) || (packet[2] == 0x01))) {
if (packet[2] == 0x00)
ssu100_update_lsr(port, packet[3], &flag);
if (packet[2] == 0x01)
ssu100_update_msr(port, packet[3]);
len -= 4;
ch = packet + 4;
} else
ch = packet;
if (!len)
return; /* status only */
if (port->sysrq) {
for (i = 0; i < len; i++, ch++) {
if (!usb_serial_handle_sysrq_char(port, *ch))
tty_insert_flip_char(&port->port, *ch, flag);
}
} else {
tty_insert_flip_string_fixed_flag(&port->port, ch, flag, len);
}
tty_flip_buffer_push(&port->port);
}
static struct usb_serial_driver ssu100_device = {
.driver = {
.owner = THIS_MODULE,
.name = "ssu100",
},
.description = DRIVER_DESC,
.id_table = id_table,
.num_ports = 1,
.open = ssu100_open,
.attach = ssu100_attach,
.port_probe = ssu100_port_probe,
.port_remove = ssu100_port_remove,
.dtr_rts = ssu100_dtr_rts,
.process_read_urb = ssu100_process_read_urb,
.tiocmget = ssu100_tiocmget,
.tiocmset = ssu100_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
.set_termios = ssu100_set_termios,
};
static struct usb_serial_driver * const serial_drivers[] = {
&ssu100_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/ssu100.c |
// SPDX-License-Identifier: GPL-2.0
/*
Some of this code is credited to Linux USB open source files that are
distributed with Linux.
Copyright: 2007 Metrologic Instruments. All rights reserved.
Copyright: 2011 Azimut Ltd. <http://azimutrzn.ru/>
*/
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/usb/serial.h>
#define DRIVER_DESC "Metrologic Instruments Inc. - USB-POS driver"
/* Product information. */
#define FOCUS_VENDOR_ID 0x0C2E
#define FOCUS_PRODUCT_ID_BI 0x0720
#define FOCUS_PRODUCT_ID_UNI 0x0700
#define METROUSB_SET_REQUEST_TYPE 0x40
#define METROUSB_SET_MODEM_CTRL_REQUEST 10
#define METROUSB_SET_BREAK_REQUEST 0x40
#define METROUSB_MCR_NONE 0x08 /* Deactivate DTR and RTS. */
#define METROUSB_MCR_RTS 0x0a /* Activate RTS. */
#define METROUSB_MCR_DTR 0x09 /* Activate DTR. */
#define WDR_TIMEOUT 5000 /* default urb timeout. */
/* Private data structure. */
struct metrousb_private {
spinlock_t lock;
int throttled;
unsigned long control_state;
};
/* Device table list. */
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_BI) },
{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_UNI) },
{ USB_DEVICE_INTERFACE_CLASS(0x0c2e, 0x0730, 0xff) }, /* MS7820 */
{ }, /* Terminating entry. */
};
MODULE_DEVICE_TABLE(usb, id_table);
/* UNI-Directional mode commands for device configure */
#define UNI_CMD_OPEN 0x80
#define UNI_CMD_CLOSE 0xFF
static int metrousb_is_unidirectional_mode(struct usb_serial *serial)
{
u16 product_id = le16_to_cpu(serial->dev->descriptor.idProduct);
return product_id == FOCUS_PRODUCT_ID_UNI;
}
static int metrousb_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds)
{
if (metrousb_is_unidirectional_mode(serial)) {
if (epds->num_interrupt_out == 0) {
dev_err(&serial->interface->dev, "interrupt-out endpoint missing\n");
return -ENODEV;
}
}
return 1;
}
static int metrousb_send_unidirectional_cmd(u8 cmd, struct usb_serial_port *port)
{
int ret;
int actual_len;
u8 *buffer_cmd = NULL;
if (!metrousb_is_unidirectional_mode(port->serial))
return 0;
buffer_cmd = kzalloc(sizeof(cmd), GFP_KERNEL);
if (!buffer_cmd)
return -ENOMEM;
*buffer_cmd = cmd;
ret = usb_interrupt_msg(port->serial->dev,
usb_sndintpipe(port->serial->dev, port->interrupt_out_endpointAddress),
buffer_cmd, sizeof(cmd),
&actual_len, USB_CTRL_SET_TIMEOUT);
kfree(buffer_cmd);
if (ret < 0)
return ret;
else if (actual_len != sizeof(cmd))
return -EIO;
return 0;
}
static void metrousb_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct metrousb_private *metro_priv = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
unsigned long flags;
int throttled = 0;
int result = 0;
dev_dbg(&port->dev, "%s\n", __func__);
switch (urb->status) {
case 0:
/* Success status, read from the port. */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* urb has been terminated. */
dev_dbg(&port->dev,
"%s - urb shutting down, error code=%d\n",
__func__, urb->status);
return;
default:
dev_dbg(&port->dev,
"%s - non-zero urb received, error code=%d\n",
__func__, urb->status);
goto exit;
}
/* Set the data read from the usb port into the serial port buffer. */
if (urb->actual_length) {
/* Loop through the data copying each byte to the tty layer. */
tty_insert_flip_string(&port->port, data, urb->actual_length);
/* Force the data to the tty layer. */
tty_flip_buffer_push(&port->port);
}
/* Set any port variables. */
spin_lock_irqsave(&metro_priv->lock, flags);
throttled = metro_priv->throttled;
spin_unlock_irqrestore(&metro_priv->lock, flags);
if (throttled)
return;
exit:
/* Try to resubmit the urb. */
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
dev_err(&port->dev,
"%s - failed submitting interrupt in urb, error code=%d\n",
__func__, result);
}
static void metrousb_cleanup(struct usb_serial_port *port)
{
usb_kill_urb(port->interrupt_in_urb);
metrousb_send_unidirectional_cmd(UNI_CMD_CLOSE, port);
}
static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct metrousb_private *metro_priv = usb_get_serial_port_data(port);
unsigned long flags;
int result = 0;
/* Set the private data information for the port. */
spin_lock_irqsave(&metro_priv->lock, flags);
metro_priv->control_state = 0;
metro_priv->throttled = 0;
spin_unlock_irqrestore(&metro_priv->lock, flags);
/* Clear the urb pipe. */
usb_clear_halt(serial->dev, port->interrupt_in_urb->pipe);
/* Start reading from the device */
usb_fill_int_urb(port->interrupt_in_urb, serial->dev,
usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress),
port->interrupt_in_urb->transfer_buffer,
port->interrupt_in_urb->transfer_buffer_length,
metrousb_read_int_callback, port, 1);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev,
"%s - failed submitting interrupt in urb, error code=%d\n",
__func__, result);
return result;
}
/* Send activate cmd to device */
result = metrousb_send_unidirectional_cmd(UNI_CMD_OPEN, port);
if (result) {
dev_err(&port->dev,
"%s - failed to configure device, error code=%d\n",
__func__, result);
goto err_kill_urb;
}
return 0;
err_kill_urb:
usb_kill_urb(port->interrupt_in_urb);
return result;
}
static int metrousb_set_modem_ctrl(struct usb_serial *serial, unsigned int control_state)
{
int retval = 0;
unsigned char mcr = METROUSB_MCR_NONE;
dev_dbg(&serial->dev->dev, "%s - control state = %d\n",
__func__, control_state);
/* Set the modem control value. */
if (control_state & TIOCM_DTR)
mcr |= METROUSB_MCR_DTR;
if (control_state & TIOCM_RTS)
mcr |= METROUSB_MCR_RTS;
/* Send the command to the usb port. */
retval = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
METROUSB_SET_REQUEST_TYPE, METROUSB_SET_MODEM_CTRL_REQUEST,
control_state, 0, NULL, 0, WDR_TIMEOUT);
if (retval < 0)
dev_err(&serial->dev->dev,
"%s - set modem ctrl=0x%x failed, error code=%d\n",
__func__, mcr, retval);
return retval;
}
static int metrousb_port_probe(struct usb_serial_port *port)
{
struct metrousb_private *metro_priv;
metro_priv = kzalloc(sizeof(*metro_priv), GFP_KERNEL);
if (!metro_priv)
return -ENOMEM;
spin_lock_init(&metro_priv->lock);
usb_set_serial_port_data(port, metro_priv);
return 0;
}
static void metrousb_port_remove(struct usb_serial_port *port)
{
struct metrousb_private *metro_priv;
metro_priv = usb_get_serial_port_data(port);
kfree(metro_priv);
}
static void metrousb_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct metrousb_private *metro_priv = usb_get_serial_port_data(port);
unsigned long flags;
/* Set the private information for the port to stop reading data. */
spin_lock_irqsave(&metro_priv->lock, flags);
metro_priv->throttled = 1;
spin_unlock_irqrestore(&metro_priv->lock, flags);
}
static int metrousb_tiocmget(struct tty_struct *tty)
{
unsigned long control_state = 0;
struct usb_serial_port *port = tty->driver_data;
struct metrousb_private *metro_priv = usb_get_serial_port_data(port);
unsigned long flags;
spin_lock_irqsave(&metro_priv->lock, flags);
control_state = metro_priv->control_state;
spin_unlock_irqrestore(&metro_priv->lock, flags);
return control_state;
}
static int metrousb_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
struct metrousb_private *metro_priv = usb_get_serial_port_data(port);
unsigned long flags;
unsigned long control_state = 0;
dev_dbg(&port->dev, "%s - set=%d, clear=%d\n", __func__, set, clear);
spin_lock_irqsave(&metro_priv->lock, flags);
control_state = metro_priv->control_state;
/* Set the RTS and DTR values. */
if (set & TIOCM_RTS)
control_state |= TIOCM_RTS;
if (set & TIOCM_DTR)
control_state |= TIOCM_DTR;
if (clear & TIOCM_RTS)
control_state &= ~TIOCM_RTS;
if (clear & TIOCM_DTR)
control_state &= ~TIOCM_DTR;
metro_priv->control_state = control_state;
spin_unlock_irqrestore(&metro_priv->lock, flags);
return metrousb_set_modem_ctrl(serial, control_state);
}
static void metrousb_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct metrousb_private *metro_priv = usb_get_serial_port_data(port);
unsigned long flags;
int result = 0;
/* Set the private information for the port to resume reading data. */
spin_lock_irqsave(&metro_priv->lock, flags);
metro_priv->throttled = 0;
spin_unlock_irqrestore(&metro_priv->lock, flags);
/* Submit the urb to read from the port. */
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
if (result)
dev_err(&port->dev,
"failed submitting interrupt in urb error code=%d\n",
result);
}
static struct usb_serial_driver metrousb_device = {
.driver = {
.owner = THIS_MODULE,
.name = "metro-usb",
},
.description = "Metrologic USB to Serial",
.id_table = id_table,
.num_interrupt_in = 1,
.calc_num_ports = metrousb_calc_num_ports,
.open = metrousb_open,
.close = metrousb_cleanup,
.read_int_callback = metrousb_read_int_callback,
.port_probe = metrousb_port_probe,
.port_remove = metrousb_port_remove,
.throttle = metrousb_throttle,
.unthrottle = metrousb_unthrottle,
.tiocmget = metrousb_tiocmget,
.tiocmset = metrousb_tiocmset,
};
static struct usb_serial_driver * const serial_drivers[] = {
&metrousb_device,
NULL,
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Philip Nicastro");
MODULE_AUTHOR("Aleksey Babahin <[email protected]>");
MODULE_DESCRIPTION(DRIVER_DESC);
| linux-master | drivers/usb/serial/metro-usb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Qualcomm USB Auxiliary Serial Port driver
*
* Copyright (C) 2008 Greg Kroah-Hartman <[email protected]>
* Copyright (C) 2010 Dan Williams <[email protected]>
*
* Devices listed here usually provide a CDC ACM port on which normal modem
* AT commands and PPP can be used. But when that port is in-use by PPP it
* cannot be used simultaneously for status or signal strength. Instead, the
* ports here can be queried for that information using the Qualcomm DM
* protocol.
*/
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
/* NOTE: for now, only use this driver for devices that provide a CDC-ACM port
* for normal AT commands, but also provide secondary USB interfaces for the
* QCDM-capable ports. Devices that do not provide a CDC-ACM port should
* probably be driven by option.ko.
*/
/* UTStarcom/Pantech/Curitel devices */
#define UTSTARCOM_VENDOR_ID 0x106c
#define UTSTARCOM_PRODUCT_PC5740 0x3701
#define UTSTARCOM_PRODUCT_PC5750 0x3702 /* aka Pantech PX-500 */
#define UTSTARCOM_PRODUCT_UM150 0x3711
#define UTSTARCOM_PRODUCT_UM175_V1 0x3712
#define UTSTARCOM_PRODUCT_UM175_V2 0x3714
#define UTSTARCOM_PRODUCT_UM175_ALLTEL 0x3715
/* CMOTECH devices */
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_CDU550 0x5553
#define CMOTECH_PRODUCT_CDX650 0x6512
/* LG devices */
#define LG_VENDOR_ID 0x1004
#define LG_PRODUCT_VX4400_6000 0x6000 /* VX4400/VX6000/Rumor */
/* Sanyo devices */
#define SANYO_VENDOR_ID 0x0474
#define SANYO_PRODUCT_KATANA_LX 0x0754 /* SCP-3800 (Katana LX) */
/* Samsung devices */
#define SAMSUNG_VENDOR_ID 0x04e8
#define SAMSUNG_PRODUCT_U520 0x6640 /* SCH-U520 */
static const struct usb_device_id id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM150, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_V1, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_V2, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_ALLTEL, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU550, 0xff, 0xff, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) },
{ USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfd, 0xff) }, /* NMEA */
{ USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfe, 0xff) }, /* WMC */
{ USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xff, 0xff) }, /* DIAG */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1fac, 0x0151, 0xff, 0xff, 0xff) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver qcaux_device = {
.driver = {
.owner = THIS_MODULE,
.name = "qcaux",
},
.id_table = id_table,
.num_ports = 1,
};
static struct usb_serial_driver * const serial_drivers[] = {
&qcaux_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/qcaux.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* IPWireless 3G UMTS TDD Modem driver (USB connected)
*
* Copyright (C) 2004 Roelf Diedericks <[email protected]>
* Copyright (C) 2004 Greg Kroah-Hartman <[email protected]>
*
* All information about the device was acquired using SnoopyPro
* on MSFT's O/S, and examing the MSFT drivers' debug output
* (insanely left _on_ in the enduser version)
*
* It was written out of frustration with the IPWireless USB modem
* supplied by Axity3G/Sentech South Africa not supporting
* Linux whatsoever.
*
* Nobody provided any proprietary information that was not already
* available for this device.
*
* The modem adheres to the "3GPP TS 27.007 AT command set for 3G
* User Equipment (UE)" standard, available from
* http://www.3gpp.org/ftp/Specs/html-info/27007.htm
*
* The code was only tested the IPWireless handheld modem distributed
* in South Africa by Sentech.
*
* It may work for Woosh Inc in .nz too, as it appears they use the
* same kit.
*
* There is still some work to be done in terms of handling
* DCD, DTR, RTS, CTS which are currently faked.
* It's good enough for PPP at this point. It's based off all kinds of
* code found in usb/serial and usb/class
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
#include "usb-wwan.h"
#define DRIVER_AUTHOR "Roelf Diedericks"
#define DRIVER_DESC "IPWireless tty driver"
#define IPW_TTY_MAJOR 240 /* real device node major id, experimental range */
#define IPW_TTY_MINORS 256 /* we support 256 devices, dunno why, it'd be insane :) */
#define USB_IPW_MAGIC 0x6d02 /* magic number for ipw struct */
/* Message sizes */
#define EVENT_BUFFER_SIZE 0xFF
#define CHAR2INT16(c1, c0) (((u32)((c1) & 0xff) << 8) + (u32)((c0) & 0xff))
/* vendor/product pairs that are known work with this driver*/
#define IPW_VID 0x0bc3
#define IPW_PID 0x0001
/* Vendor commands: */
/* baud rates */
enum {
ipw_sio_b256000 = 0x000e,
ipw_sio_b128000 = 0x001d,
ipw_sio_b115200 = 0x0020,
ipw_sio_b57600 = 0x0040,
ipw_sio_b56000 = 0x0042,
ipw_sio_b38400 = 0x0060,
ipw_sio_b19200 = 0x00c0,
ipw_sio_b14400 = 0x0100,
ipw_sio_b9600 = 0x0180,
ipw_sio_b4800 = 0x0300,
ipw_sio_b2400 = 0x0600,
ipw_sio_b1200 = 0x0c00,
ipw_sio_b600 = 0x1800
};
/* data bits */
#define ipw_dtb_7 0x700
#define ipw_dtb_8 0x810 /* ok so the define is misleading, I know, but forces 8,n,1 */
/* I mean, is there a point to any other setting these days? :) */
/* usb control request types : */
#define IPW_SIO_RXCTL 0x00 /* control bulk rx channel transmissions, value=1/0 (on/off) */
#define IPW_SIO_SET_BAUD 0x01 /* set baud, value=requested ipw_sio_bxxxx */
#define IPW_SIO_SET_LINE 0x03 /* set databits, parity. value=ipw_dtb_x */
#define IPW_SIO_SET_PIN 0x03 /* set/clear dtr/rts value=ipw_pin_xxx */
#define IPW_SIO_POLL 0x08 /* get serial port status byte, call with value=0 */
#define IPW_SIO_INIT 0x11 /* initializes ? value=0 (appears as first thing todo on open) */
#define IPW_SIO_PURGE 0x12 /* purge all transmissions?, call with value=numchar_to_purge */
#define IPW_SIO_HANDFLOW 0x13 /* set xon/xoff limits value=0, and a buffer of 0x10 bytes */
#define IPW_SIO_SETCHARS 0x13 /* set the flowcontrol special chars, value=0, buf=6 bytes, */
/* last 2 bytes contain flowcontrol chars e.g. 00 00 00 00 11 13 */
/* values used for request IPW_SIO_SET_PIN */
#define IPW_PIN_SETDTR 0x101
#define IPW_PIN_SETRTS 0x202
#define IPW_PIN_CLRDTR 0x100
#define IPW_PIN_CLRRTS 0x200 /* unconfirmed */
/* values used for request IPW_SIO_RXCTL */
#define IPW_RXBULK_ON 1
#define IPW_RXBULK_OFF 0
/* various 16 byte hardcoded transferbuffers used by flow control */
#define IPW_BYTES_FLOWINIT { 0x01, 0, 0, 0, 0x40, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0 }
/* Interpretation of modem status lines */
/* These need sorting out by individually connecting pins and checking
* results. FIXME!
* When data is being sent we see 0x30 in the lower byte; this must
* contain DSR and CTS ...
*/
#define IPW_DSR ((1<<4) | (1<<5))
#define IPW_CTS ((1<<5) | (1<<4))
#define IPW_WANTS_TO_SEND 0x30
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(IPW_VID, IPW_PID) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
static int ipw_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_device *udev = port->serial->dev;
struct device *dev = &port->dev;
u8 buf_flow_static[16] = IPW_BYTES_FLOWINIT;
u8 *buf_flow_init;
int result;
buf_flow_init = kmemdup(buf_flow_static, 16, GFP_KERNEL);
if (!buf_flow_init)
return -ENOMEM;
/* --1: Tell the modem to initialize (we think) From sniffs this is
* always the first thing that gets sent to the modem during
* opening of the device */
dev_dbg(dev, "%s: Sending SIO_INIT (we guess)\n", __func__);
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
IPW_SIO_INIT,
USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
0,
0, /* index */
NULL,
0,
100000);
if (result < 0)
dev_err(dev, "Init of modem failed (error = %d)\n", result);
/* reset the bulk pipes */
usb_clear_halt(udev, usb_rcvbulkpipe(udev, port->bulk_in_endpointAddress));
usb_clear_halt(udev, usb_sndbulkpipe(udev, port->bulk_out_endpointAddress));
/*--2: Start reading from the device */
dev_dbg(dev, "%s: setting up bulk read callback\n", __func__);
usb_wwan_open(tty, port);
/*--3: Tell the modem to open the floodgates on the rx bulk channel */
dev_dbg(dev, "%s:asking modem for RxRead (RXBULK_ON)\n", __func__);
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
IPW_SIO_RXCTL,
USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
IPW_RXBULK_ON,
0, /* index */
NULL,
0,
100000);
if (result < 0)
dev_err(dev, "Enabling bulk RxRead failed (error = %d)\n", result);
/*--4: setup the initial flowcontrol */
dev_dbg(dev, "%s:setting init flowcontrol (%s)\n", __func__, buf_flow_init);
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
IPW_SIO_HANDFLOW,
USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
0,
0,
buf_flow_init,
0x10,
200000);
if (result < 0)
dev_err(dev, "initial flowcontrol failed (error = %d)\n", result);
kfree(buf_flow_init);
return 0;
}
static int ipw_attach(struct usb_serial *serial)
{
struct usb_wwan_intf_private *data;
data = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL);
if (!data)
return -ENOMEM;
spin_lock_init(&data->susp_lock);
usb_set_serial_data(serial, data);
return 0;
}
static void ipw_release(struct usb_serial *serial)
{
struct usb_wwan_intf_private *data = usb_get_serial_data(serial);
usb_set_serial_data(serial, NULL);
kfree(data);
}
static void ipw_dtr_rts(struct usb_serial_port *port, int on)
{
struct usb_device *udev = port->serial->dev;
struct device *dev = &port->dev;
int result;
dev_dbg(dev, "%s: on = %d\n", __func__, on);
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
IPW_SIO_SET_PIN,
USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
on ? IPW_PIN_SETDTR : IPW_PIN_CLRDTR,
0,
NULL,
0,
200000);
if (result < 0)
dev_err(dev, "setting dtr failed (error = %d)\n", result);
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
IPW_SIO_SET_PIN, USB_TYPE_VENDOR |
USB_RECIP_INTERFACE | USB_DIR_OUT,
on ? IPW_PIN_SETRTS : IPW_PIN_CLRRTS,
0,
NULL,
0,
200000);
if (result < 0)
dev_err(dev, "setting rts failed (error = %d)\n", result);
}
static void ipw_close(struct usb_serial_port *port)
{
struct usb_device *udev = port->serial->dev;
struct device *dev = &port->dev;
int result;
/*--3: purge */
dev_dbg(dev, "%s:sending purge\n", __func__);
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
IPW_SIO_PURGE, USB_TYPE_VENDOR |
USB_RECIP_INTERFACE | USB_DIR_OUT,
0x03,
0,
NULL,
0,
200000);
if (result < 0)
dev_err(dev, "purge failed (error = %d)\n", result);
/* send RXBULK_off (tell modem to stop transmitting bulk data on
rx chan) */
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
IPW_SIO_RXCTL,
USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
IPW_RXBULK_OFF,
0, /* index */
NULL,
0,
100000);
if (result < 0)
dev_err(dev, "Disabling bulk RxRead failed (error = %d)\n", result);
usb_wwan_close(port);
}
static struct usb_serial_driver ipw_device = {
.driver = {
.owner = THIS_MODULE,
.name = "ipw",
},
.description = "IPWireless converter",
.id_table = id_table,
.num_ports = 1,
.open = ipw_open,
.close = ipw_close,
.attach = ipw_attach,
.release = ipw_release,
.port_probe = usb_wwan_port_probe,
.port_remove = usb_wwan_port_remove,
.dtr_rts = ipw_dtr_rts,
.write = usb_wwan_write,
};
static struct usb_serial_driver * const serial_drivers[] = {
&ipw_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
/* Module information */
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/serial/ipw.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB ZyXEL omni.net driver
*
* Copyright (C) 2013,2017 Johan Hovold <[email protected]>
*
* See Documentation/usb/usb-serial.rst for more information on using this
* driver
*
* Please report both successes and troubles to the author at [email protected]
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#define DRIVER_AUTHOR "Alessandro Zummo"
#define DRIVER_DESC "USB ZyXEL omni.net Driver"
#define ZYXEL_VENDOR_ID 0x0586
#define ZYXEL_OMNINET_ID 0x1000
#define ZYXEL_OMNI_56K_PLUS_ID 0x1500
/* This one seems to be a re-branded ZyXEL device */
#define BT_IGNITIONPRO_ID 0x2000
/* function prototypes */
static void omninet_process_read_urb(struct urb *urb);
static int omninet_prepare_write_buffer(struct usb_serial_port *port,
void *buf, size_t count);
static int omninet_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds);
static int omninet_port_probe(struct usb_serial_port *port);
static void omninet_port_remove(struct usb_serial_port *port);
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) },
{ USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNI_56K_PLUS_ID) },
{ USB_DEVICE(ZYXEL_VENDOR_ID, BT_IGNITIONPRO_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver zyxel_omninet_device = {
.driver = {
.owner = THIS_MODULE,
.name = "omninet",
},
.description = "ZyXEL - omni.net usb",
.id_table = id_table,
.num_bulk_out = 2,
.calc_num_ports = omninet_calc_num_ports,
.port_probe = omninet_port_probe,
.port_remove = omninet_port_remove,
.process_read_urb = omninet_process_read_urb,
.prepare_write_buffer = omninet_prepare_write_buffer,
};
static struct usb_serial_driver * const serial_drivers[] = {
&zyxel_omninet_device, NULL
};
/*
* The protocol.
*
* The omni.net always exchange 64 bytes of data with the host. The first
* four bytes are the control header.
*
* oh_seq is a sequence number. Don't know if/how it's used.
* oh_len is the length of the data bytes in the packet.
* oh_xxx Bit-mapped, related to handshaking and status info.
* I normally set it to 0x03 in transmitted frames.
* 7: Active when the TA is in a CONNECTed state.
* 6: unknown
* 5: handshaking, unknown
* 4: handshaking, unknown
* 3: unknown, usually 0
* 2: unknown, usually 0
* 1: handshaking, unknown, usually set to 1 in transmitted frames
* 0: handshaking, unknown, usually set to 1 in transmitted frames
* oh_pad Probably a pad byte.
*
* After the header you will find data bytes if oh_len was greater than zero.
*/
struct omninet_header {
__u8 oh_seq;
__u8 oh_len;
__u8 oh_xxx;
__u8 oh_pad;
};
struct omninet_data {
__u8 od_outseq; /* Sequence number for bulk_out URBs */
};
static int omninet_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds)
{
/* We need only the second bulk-out for our single-port device. */
epds->bulk_out[0] = epds->bulk_out[1];
epds->num_bulk_out = 1;
return 1;
}
static int omninet_port_probe(struct usb_serial_port *port)
{
struct omninet_data *od;
od = kzalloc(sizeof(*od), GFP_KERNEL);
if (!od)
return -ENOMEM;
usb_set_serial_port_data(port, od);
return 0;
}
static void omninet_port_remove(struct usb_serial_port *port)
{
struct omninet_data *od;
od = usb_get_serial_port_data(port);
kfree(od);
}
#define OMNINET_HEADERLEN 4
#define OMNINET_BULKOUTSIZE 64
#define OMNINET_PAYLOADSIZE (OMNINET_BULKOUTSIZE - OMNINET_HEADERLEN)
static void omninet_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
const struct omninet_header *hdr = urb->transfer_buffer;
const unsigned char *data;
size_t data_len;
if (urb->actual_length <= OMNINET_HEADERLEN || !hdr->oh_len)
return;
data = (char *)urb->transfer_buffer + OMNINET_HEADERLEN;
data_len = min_t(size_t, urb->actual_length - OMNINET_HEADERLEN,
hdr->oh_len);
tty_insert_flip_string(&port->port, data, data_len);
tty_flip_buffer_push(&port->port);
}
static int omninet_prepare_write_buffer(struct usb_serial_port *port,
void *buf, size_t count)
{
struct omninet_data *od = usb_get_serial_port_data(port);
struct omninet_header *header = buf;
count = min_t(size_t, count, OMNINET_PAYLOADSIZE);
count = kfifo_out_locked(&port->write_fifo, buf + OMNINET_HEADERLEN,
count, &port->lock);
header->oh_seq = od->od_outseq++;
header->oh_len = count;
header->oh_xxx = 0x03;
header->oh_pad = 0x00;
/* always 64 bytes */
return OMNINET_BULKOUTSIZE;
}
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/omninet.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* MCT (Magic Control Technology Corp.) USB RS232 Converter Driver
*
* Copyright (C) 2000 Wolfgang Grandegger ([email protected])
*
* This program is largely derived from the Belkin USB Serial Adapter Driver
* (see belkin_sa.[ch]). All of the information about the device was acquired
* by using SniffUSB on Windows98. For technical details see mct_u232.h.
*
* William G. Greathouse and Greg Kroah-Hartman provided great help on how to
* do the reverse engineering and how to write a USB serial device driver.
*
* TO BE DONE, TO BE CHECKED:
* DTR/RTS signal handling may be incomplete or incorrect. I have mainly
* implemented what I have seen with SniffUSB or found in belkin_sa.c.
* For further TODOs check also belkin_sa.c.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/serial.h>
#include "mct_u232.h"
#define DRIVER_AUTHOR "Wolfgang Grandegger <[email protected]>"
#define DRIVER_DESC "Magic Control Technology USB-RS232 converter driver"
/*
* Function prototypes
*/
static int mct_u232_port_probe(struct usb_serial_port *port);
static void mct_u232_port_remove(struct usb_serial_port *remove);
static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port);
static void mct_u232_close(struct usb_serial_port *port);
static void mct_u232_dtr_rts(struct usb_serial_port *port, int on);
static void mct_u232_read_int_callback(struct urb *urb);
static void mct_u232_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios);
static int mct_u232_break_ctl(struct tty_struct *tty, int break_state);
static int mct_u232_tiocmget(struct tty_struct *tty);
static int mct_u232_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
static void mct_u232_throttle(struct tty_struct *tty);
static void mct_u232_unthrottle(struct tty_struct *tty);
/*
* All of the device info needed for the MCT USB-RS232 converter.
*/
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(MCT_U232_VID, MCT_U232_PID) },
{ USB_DEVICE(MCT_U232_VID, MCT_U232_SITECOM_PID) },
{ USB_DEVICE(MCT_U232_VID, MCT_U232_DU_H3SP_PID) },
{ USB_DEVICE(MCT_U232_BELKIN_F5U109_VID, MCT_U232_BELKIN_F5U109_PID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver mct_u232_device = {
.driver = {
.owner = THIS_MODULE,
.name = "mct_u232",
},
.description = "MCT U232",
.id_table = id_table,
.num_ports = 1,
.open = mct_u232_open,
.close = mct_u232_close,
.dtr_rts = mct_u232_dtr_rts,
.throttle = mct_u232_throttle,
.unthrottle = mct_u232_unthrottle,
.read_int_callback = mct_u232_read_int_callback,
.set_termios = mct_u232_set_termios,
.break_ctl = mct_u232_break_ctl,
.tiocmget = mct_u232_tiocmget,
.tiocmset = mct_u232_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.port_probe = mct_u232_port_probe,
.port_remove = mct_u232_port_remove,
.get_icount = usb_serial_generic_get_icount,
};
static struct usb_serial_driver * const serial_drivers[] = {
&mct_u232_device, NULL
};
struct mct_u232_private {
struct urb *read_urb;
spinlock_t lock;
unsigned int control_state; /* Modem Line Setting (TIOCM) */
unsigned char last_lcr; /* Line Control Register */
unsigned char last_lsr; /* Line Status Register */
unsigned char last_msr; /* Modem Status Register */
unsigned int rx_flags; /* Throttling flags */
};
#define THROTTLED 0x01
/*
* Handle vendor specific USB requests
*/
#define WDR_TIMEOUT 5000 /* default urb timeout */
/*
* Later day 2.6.0-test kernels have new baud rates like B230400 which
* we do not know how to support. We ignore them for the moment.
*/
static int mct_u232_calculate_baud_rate(struct usb_serial *serial,
speed_t value, speed_t *result)
{
*result = value;
if (le16_to_cpu(serial->dev->descriptor.idProduct) == MCT_U232_SITECOM_PID
|| le16_to_cpu(serial->dev->descriptor.idProduct) == MCT_U232_BELKIN_F5U109_PID) {
switch (value) {
case 300:
return 0x01;
case 600:
return 0x02; /* this one not tested */
case 1200:
return 0x03;
case 2400:
return 0x04;
case 4800:
return 0x06;
case 9600:
return 0x08;
case 19200:
return 0x09;
case 38400:
return 0x0a;
case 57600:
return 0x0b;
case 115200:
return 0x0c;
default:
*result = 9600;
return 0x08;
}
} else {
/* FIXME: Can we use any divider - should we do
divider = 115200/value;
real baud = 115200/divider */
switch (value) {
case 300: break;
case 600: break;
case 1200: break;
case 2400: break;
case 4800: break;
case 9600: break;
case 19200: break;
case 38400: break;
case 57600: break;
case 115200: break;
default:
value = 9600;
*result = 9600;
}
return 115200/value;
}
}
static int mct_u232_set_baud_rate(struct tty_struct *tty,
struct usb_serial *serial, struct usb_serial_port *port, speed_t value)
{
unsigned int divisor;
int rc;
unsigned char *buf;
unsigned char cts_enable_byte = 0;
speed_t speed;
buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
divisor = mct_u232_calculate_baud_rate(serial, value, &speed);
put_unaligned_le32(divisor, buf);
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCT_U232_SET_BAUD_RATE_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
0, 0, buf, MCT_U232_SET_BAUD_RATE_SIZE,
WDR_TIMEOUT);
if (rc < 0) /*FIXME: What value speed results */
dev_err(&port->dev, "Set BAUD RATE %d failed (error = %d)\n",
value, rc);
else
tty_encode_baud_rate(tty, speed, speed);
dev_dbg(&port->dev, "set_baud_rate: value: 0x%x, divisor: 0x%x\n", value, divisor);
/* Mimic the MCT-supplied Windows driver (version 1.21P.0104), which
always sends two extra USB 'device request' messages after the
'baud rate change' message. The actual functionality of the
request codes in these messages is not fully understood but these
particular codes are never seen in any operation besides a baud
rate change. Both of these messages send a single byte of data.
In the first message, the value of this byte is always zero.
The second message has been determined experimentally to control
whether data will be transmitted to a device which is not asserting
the 'CTS' signal. If the second message's data byte is zero, data
will be transmitted even if 'CTS' is not asserted (i.e. no hardware
flow control). if the second message's data byte is nonzero (a
value of 1 is used by this driver), data will not be transmitted to
a device which is not asserting 'CTS'.
*/
buf[0] = 0;
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCT_U232_SET_UNKNOWN1_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
0, 0, buf, MCT_U232_SET_UNKNOWN1_SIZE,
WDR_TIMEOUT);
if (rc < 0)
dev_err(&port->dev, "Sending USB device request code %d "
"failed (error = %d)\n", MCT_U232_SET_UNKNOWN1_REQUEST,
rc);
if (port && C_CRTSCTS(tty))
cts_enable_byte = 1;
dev_dbg(&port->dev, "set_baud_rate: send second control message, data = %02X\n",
cts_enable_byte);
buf[0] = cts_enable_byte;
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCT_U232_SET_CTS_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
0, 0, buf, MCT_U232_SET_CTS_SIZE,
WDR_TIMEOUT);
if (rc < 0)
dev_err(&port->dev, "Sending USB device request code %d "
"failed (error = %d)\n", MCT_U232_SET_CTS_REQUEST, rc);
kfree(buf);
return rc;
} /* mct_u232_set_baud_rate */
static int mct_u232_set_line_ctrl(struct usb_serial_port *port,
unsigned char lcr)
{
int rc;
unsigned char *buf;
buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
buf[0] = lcr;
rc = usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0),
MCT_U232_SET_LINE_CTRL_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
0, 0, buf, MCT_U232_SET_LINE_CTRL_SIZE,
WDR_TIMEOUT);
if (rc < 0)
dev_err(&port->dev, "Set LINE CTRL 0x%x failed (error = %d)\n", lcr, rc);
dev_dbg(&port->dev, "set_line_ctrl: 0x%x\n", lcr);
kfree(buf);
return rc;
} /* mct_u232_set_line_ctrl */
static int mct_u232_set_modem_ctrl(struct usb_serial_port *port,
unsigned int control_state)
{
int rc;
unsigned char mcr;
unsigned char *buf;
buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
mcr = MCT_U232_MCR_NONE;
if (control_state & TIOCM_DTR)
mcr |= MCT_U232_MCR_DTR;
if (control_state & TIOCM_RTS)
mcr |= MCT_U232_MCR_RTS;
buf[0] = mcr;
rc = usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0),
MCT_U232_SET_MODEM_CTRL_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE,
WDR_TIMEOUT);
kfree(buf);
dev_dbg(&port->dev, "set_modem_ctrl: state=0x%x ==> mcr=0x%x\n", control_state, mcr);
if (rc < 0) {
dev_err(&port->dev, "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
return rc;
}
return 0;
} /* mct_u232_set_modem_ctrl */
static int mct_u232_get_modem_stat(struct usb_serial_port *port,
unsigned char *msr)
{
int rc;
unsigned char *buf;
buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
if (buf == NULL) {
*msr = 0;
return -ENOMEM;
}
rc = usb_control_msg(port->serial->dev, usb_rcvctrlpipe(port->serial->dev, 0),
MCT_U232_GET_MODEM_STAT_REQUEST,
MCT_U232_GET_REQUEST_TYPE,
0, 0, buf, MCT_U232_GET_MODEM_STAT_SIZE,
WDR_TIMEOUT);
if (rc < MCT_U232_GET_MODEM_STAT_SIZE) {
dev_err(&port->dev, "Get MODEM STATus failed (error = %d)\n", rc);
if (rc >= 0)
rc = -EIO;
*msr = 0;
} else {
*msr = buf[0];
}
dev_dbg(&port->dev, "get_modem_stat: 0x%x\n", *msr);
kfree(buf);
return rc;
} /* mct_u232_get_modem_stat */
static void mct_u232_msr_to_icount(struct async_icount *icount,
unsigned char msr)
{
/* Translate Control Line states */
if (msr & MCT_U232_MSR_DDSR)
icount->dsr++;
if (msr & MCT_U232_MSR_DCTS)
icount->cts++;
if (msr & MCT_U232_MSR_DRI)
icount->rng++;
if (msr & MCT_U232_MSR_DCD)
icount->dcd++;
} /* mct_u232_msr_to_icount */
static void mct_u232_msr_to_state(struct usb_serial_port *port,
unsigned int *control_state, unsigned char msr)
{
/* Translate Control Line states */
if (msr & MCT_U232_MSR_DSR)
*control_state |= TIOCM_DSR;
else
*control_state &= ~TIOCM_DSR;
if (msr & MCT_U232_MSR_CTS)
*control_state |= TIOCM_CTS;
else
*control_state &= ~TIOCM_CTS;
if (msr & MCT_U232_MSR_RI)
*control_state |= TIOCM_RI;
else
*control_state &= ~TIOCM_RI;
if (msr & MCT_U232_MSR_CD)
*control_state |= TIOCM_CD;
else
*control_state &= ~TIOCM_CD;
dev_dbg(&port->dev, "msr_to_state: msr=0x%x ==> state=0x%x\n", msr, *control_state);
} /* mct_u232_msr_to_state */
/*
* Driver's tty interface functions
*/
static int mct_u232_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct mct_u232_private *priv;
/* check first to simplify error handling */
if (!serial->port[1] || !serial->port[1]->interrupt_in_urb) {
dev_err(&port->dev, "expected endpoint missing\n");
return -ENODEV;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
/* Use second interrupt-in endpoint for reading. */
priv->read_urb = serial->port[1]->interrupt_in_urb;
priv->read_urb->context = port;
spin_lock_init(&priv->lock);
usb_set_serial_port_data(port, priv);
return 0;
}
static void mct_u232_port_remove(struct usb_serial_port *port)
{
struct mct_u232_private *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
}
static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
int retval = 0;
unsigned int control_state;
unsigned long flags;
unsigned char last_lcr;
unsigned char last_msr;
/* Compensate for a hardware bug: although the Sitecom U232-P25
* device reports a maximum output packet size of 32 bytes,
* it seems to be able to accept only 16 bytes (and that's what
* SniffUSB says too...)
*/
if (le16_to_cpu(serial->dev->descriptor.idProduct)
== MCT_U232_SITECOM_PID)
port->bulk_out_size = 16;
/* Do a defined restart: the normal serial device seems to
* always turn on DTR and RTS here, so do the same. I'm not
* sure if this is really necessary. But it should not harm
* either.
*/
spin_lock_irqsave(&priv->lock, flags);
if (tty && C_BAUD(tty))
priv->control_state = TIOCM_DTR | TIOCM_RTS;
else
priv->control_state = 0;
priv->last_lcr = (MCT_U232_DATA_BITS_8 |
MCT_U232_PARITY_NONE |
MCT_U232_STOP_BITS_1);
control_state = priv->control_state;
last_lcr = priv->last_lcr;
spin_unlock_irqrestore(&priv->lock, flags);
mct_u232_set_modem_ctrl(port, control_state);
mct_u232_set_line_ctrl(port, last_lcr);
/* Read modem status and update control state */
mct_u232_get_modem_stat(port, &last_msr);
spin_lock_irqsave(&priv->lock, flags);
priv->last_msr = last_msr;
mct_u232_msr_to_state(port, &priv->control_state, priv->last_msr);
spin_unlock_irqrestore(&priv->lock, flags);
retval = usb_submit_urb(priv->read_urb, GFP_KERNEL);
if (retval) {
dev_err(&port->dev,
"usb_submit_urb(read) failed pipe 0x%x err %d\n",
port->read_urb->pipe, retval);
goto error;
}
retval = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (retval) {
usb_kill_urb(priv->read_urb);
dev_err(&port->dev,
"usb_submit_urb(read int) failed pipe 0x%x err %d",
port->interrupt_in_urb->pipe, retval);
goto error;
}
return 0;
error:
return retval;
} /* mct_u232_open */
static void mct_u232_dtr_rts(struct usb_serial_port *port, int on)
{
unsigned int control_state;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
spin_lock_irq(&priv->lock);
if (on)
priv->control_state |= TIOCM_DTR | TIOCM_RTS;
else
priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS);
control_state = priv->control_state;
spin_unlock_irq(&priv->lock);
mct_u232_set_modem_ctrl(port, control_state);
}
static void mct_u232_close(struct usb_serial_port *port)
{
struct mct_u232_private *priv = usb_get_serial_port_data(port);
usb_kill_urb(priv->read_urb);
usb_kill_urb(port->interrupt_in_urb);
usb_serial_generic_close(port);
} /* mct_u232_close */
static void mct_u232_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
int retval;
int status = urb->status;
unsigned long flags;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&port->dev, "%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_dbg(&port->dev, "%s - nonzero urb status received: %d\n",
__func__, status);
goto exit;
}
usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
/*
* Work-a-round: handle the 'usual' bulk-in pipe here
*/
if (urb->transfer_buffer_length > 2) {
if (urb->actual_length) {
tty_insert_flip_string(&port->port, data,
urb->actual_length);
tty_flip_buffer_push(&port->port);
}
goto exit;
}
/*
* The interrupt-in pipe signals exceptional conditions (modem line
* signal changes and errors). data[0] holds MSR, data[1] holds LSR.
*/
spin_lock_irqsave(&priv->lock, flags);
priv->last_msr = data[MCT_U232_MSR_INDEX];
/* Record Control Line states */
mct_u232_msr_to_state(port, &priv->control_state, priv->last_msr);
mct_u232_msr_to_icount(&port->icount, priv->last_msr);
#if 0
/* Not yet handled. See belkin_sa.c for further information */
/* Now to report any errors */
priv->last_lsr = data[MCT_U232_LSR_INDEX];
/*
* fill in the flip buffer here, but I do not know the relation
* to the current/next receive buffer or characters. I need
* to look in to this before committing any code.
*/
if (priv->last_lsr & MCT_U232_LSR_ERR) {
tty = tty_port_tty_get(&port->port);
/* Overrun Error */
if (priv->last_lsr & MCT_U232_LSR_OE) {
}
/* Parity Error */
if (priv->last_lsr & MCT_U232_LSR_PE) {
}
/* Framing Error */
if (priv->last_lsr & MCT_U232_LSR_FE) {
}
/* Break Indicator */
if (priv->last_lsr & MCT_U232_LSR_BI) {
}
tty_kref_put(tty);
}
#endif
wake_up_interruptible(&port->port.delta_msr_wait);
spin_unlock_irqrestore(&priv->lock, flags);
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&port->dev,
"%s - usb_submit_urb failed with result %d\n",
__func__, retval);
} /* mct_u232_read_int_callback */
static void mct_u232_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
struct ktermios *termios = &tty->termios;
unsigned int cflag = termios->c_cflag;
unsigned int old_cflag = old_termios->c_cflag;
unsigned long flags;
unsigned int control_state;
unsigned char last_lcr;
/* get a local copy of the current port settings */
spin_lock_irqsave(&priv->lock, flags);
control_state = priv->control_state;
spin_unlock_irqrestore(&priv->lock, flags);
last_lcr = 0;
/*
* Update baud rate.
* Do not attempt to cache old rates and skip settings,
* disconnects screw such tricks up completely.
* Premature optimization is the root of all evil.
*/
/* reassert DTR and RTS on transition from B0 */
if ((old_cflag & CBAUD) == B0) {
dev_dbg(&port->dev, "%s: baud was B0\n", __func__);
control_state |= TIOCM_DTR | TIOCM_RTS;
mct_u232_set_modem_ctrl(port, control_state);
}
mct_u232_set_baud_rate(tty, serial, port, tty_get_baud_rate(tty));
if ((cflag & CBAUD) == B0) {
dev_dbg(&port->dev, "%s: baud is B0\n", __func__);
/* Drop RTS and DTR */
control_state &= ~(TIOCM_DTR | TIOCM_RTS);
mct_u232_set_modem_ctrl(port, control_state);
}
/*
* Update line control register (LCR)
*/
/* set the parity */
if (cflag & PARENB)
last_lcr |= (cflag & PARODD) ?
MCT_U232_PARITY_ODD : MCT_U232_PARITY_EVEN;
else
last_lcr |= MCT_U232_PARITY_NONE;
/* set the number of data bits */
switch (cflag & CSIZE) {
case CS5:
last_lcr |= MCT_U232_DATA_BITS_5; break;
case CS6:
last_lcr |= MCT_U232_DATA_BITS_6; break;
case CS7:
last_lcr |= MCT_U232_DATA_BITS_7; break;
case CS8:
last_lcr |= MCT_U232_DATA_BITS_8; break;
default:
dev_err(&port->dev,
"CSIZE was not CS5-CS8, using default of 8\n");
last_lcr |= MCT_U232_DATA_BITS_8;
break;
}
termios->c_cflag &= ~CMSPAR;
/* set the number of stop bits */
last_lcr |= (cflag & CSTOPB) ?
MCT_U232_STOP_BITS_2 : MCT_U232_STOP_BITS_1;
mct_u232_set_line_ctrl(port, last_lcr);
/* save off the modified port settings */
spin_lock_irqsave(&priv->lock, flags);
priv->control_state = control_state;
priv->last_lcr = last_lcr;
spin_unlock_irqrestore(&priv->lock, flags);
} /* mct_u232_set_termios */
static int mct_u232_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
unsigned char lcr;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
lcr = priv->last_lcr;
if (break_state)
lcr |= MCT_U232_SET_BREAK;
spin_unlock_irqrestore(&priv->lock, flags);
return mct_u232_set_line_ctrl(port, lcr);
} /* mct_u232_break_ctl */
static int mct_u232_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
unsigned int control_state;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
control_state = priv->control_state;
spin_unlock_irqrestore(&priv->lock, flags);
return control_state;
}
static int mct_u232_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
unsigned int control_state;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
control_state = priv->control_state;
if (set & TIOCM_RTS)
control_state |= TIOCM_RTS;
if (set & TIOCM_DTR)
control_state |= TIOCM_DTR;
if (clear & TIOCM_RTS)
control_state &= ~TIOCM_RTS;
if (clear & TIOCM_DTR)
control_state &= ~TIOCM_DTR;
priv->control_state = control_state;
spin_unlock_irqrestore(&priv->lock, flags);
return mct_u232_set_modem_ctrl(port, control_state);
}
static void mct_u232_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
unsigned int control_state;
spin_lock_irq(&priv->lock);
priv->rx_flags |= THROTTLED;
if (C_CRTSCTS(tty)) {
priv->control_state &= ~TIOCM_RTS;
control_state = priv->control_state;
spin_unlock_irq(&priv->lock);
mct_u232_set_modem_ctrl(port, control_state);
} else {
spin_unlock_irq(&priv->lock);
}
}
static void mct_u232_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
unsigned int control_state;
spin_lock_irq(&priv->lock);
if ((priv->rx_flags & THROTTLED) && C_CRTSCTS(tty)) {
priv->rx_flags &= ~THROTTLED;
priv->control_state |= TIOCM_RTS;
control_state = priv->control_state;
spin_unlock_irq(&priv->lock);
mct_u232_set_modem_ctrl(port, control_state);
} else {
spin_unlock_irq(&priv->lock);
}
}
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/serial/mct_u232.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Digi AccelePort USB-4 and USB-2 Serial Converters
*
* Copyright 2000 by Digi International
*
* Shamelessly based on Brian Warner's keyspan_pda.c and Greg Kroah-Hartman's
* usb-serial driver.
*
* Peter Berger ([email protected])
* Al Borchers ([email protected])
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/wait.h>
#include <linux/sched/signal.h>
#include <linux/usb/serial.h>
/* Defines */
#define DRIVER_AUTHOR "Peter Berger <[email protected]>, Al Borchers <[email protected]>"
#define DRIVER_DESC "Digi AccelePort USB-2/USB-4 Serial Converter driver"
/* port output buffer length -- must be <= transfer buffer length - 2 */
/* so we can be sure to send the full buffer in one urb */
#define DIGI_OUT_BUF_SIZE 8
/* port input buffer length -- must be >= transfer buffer length - 3 */
/* so we can be sure to hold at least one full buffer from one urb */
#define DIGI_IN_BUF_SIZE 64
/* retry timeout while sleeping */
#define DIGI_RETRY_TIMEOUT (HZ/10)
/* timeout while waiting for tty output to drain in close */
/* this delay is used twice in close, so the total delay could */
/* be twice this value */
#define DIGI_CLOSE_TIMEOUT (5*HZ)
/* AccelePort USB Defines */
/* ids */
#define DIGI_VENDOR_ID 0x05c5
#define DIGI_2_ID 0x0002 /* USB-2 */
#define DIGI_4_ID 0x0004 /* USB-4 */
/* commands
* "INB": can be used on the in-band endpoint
* "OOB": can be used on the out-of-band endpoint
*/
#define DIGI_CMD_SET_BAUD_RATE 0 /* INB, OOB */
#define DIGI_CMD_SET_WORD_SIZE 1 /* INB, OOB */
#define DIGI_CMD_SET_PARITY 2 /* INB, OOB */
#define DIGI_CMD_SET_STOP_BITS 3 /* INB, OOB */
#define DIGI_CMD_SET_INPUT_FLOW_CONTROL 4 /* INB, OOB */
#define DIGI_CMD_SET_OUTPUT_FLOW_CONTROL 5 /* INB, OOB */
#define DIGI_CMD_SET_DTR_SIGNAL 6 /* INB, OOB */
#define DIGI_CMD_SET_RTS_SIGNAL 7 /* INB, OOB */
#define DIGI_CMD_READ_INPUT_SIGNALS 8 /* OOB */
#define DIGI_CMD_IFLUSH_FIFO 9 /* OOB */
#define DIGI_CMD_RECEIVE_ENABLE 10 /* INB, OOB */
#define DIGI_CMD_BREAK_CONTROL 11 /* INB, OOB */
#define DIGI_CMD_LOCAL_LOOPBACK 12 /* INB, OOB */
#define DIGI_CMD_TRANSMIT_IDLE 13 /* INB, OOB */
#define DIGI_CMD_READ_UART_REGISTER 14 /* OOB */
#define DIGI_CMD_WRITE_UART_REGISTER 15 /* INB, OOB */
#define DIGI_CMD_AND_UART_REGISTER 16 /* INB, OOB */
#define DIGI_CMD_OR_UART_REGISTER 17 /* INB, OOB */
#define DIGI_CMD_SEND_DATA 18 /* INB */
#define DIGI_CMD_RECEIVE_DATA 19 /* INB */
#define DIGI_CMD_RECEIVE_DISABLE 20 /* INB */
#define DIGI_CMD_GET_PORT_TYPE 21 /* OOB */
/* baud rates */
#define DIGI_BAUD_50 0
#define DIGI_BAUD_75 1
#define DIGI_BAUD_110 2
#define DIGI_BAUD_150 3
#define DIGI_BAUD_200 4
#define DIGI_BAUD_300 5
#define DIGI_BAUD_600 6
#define DIGI_BAUD_1200 7
#define DIGI_BAUD_1800 8
#define DIGI_BAUD_2400 9
#define DIGI_BAUD_4800 10
#define DIGI_BAUD_7200 11
#define DIGI_BAUD_9600 12
#define DIGI_BAUD_14400 13
#define DIGI_BAUD_19200 14
#define DIGI_BAUD_28800 15
#define DIGI_BAUD_38400 16
#define DIGI_BAUD_57600 17
#define DIGI_BAUD_76800 18
#define DIGI_BAUD_115200 19
#define DIGI_BAUD_153600 20
#define DIGI_BAUD_230400 21
#define DIGI_BAUD_460800 22
/* arguments */
#define DIGI_WORD_SIZE_5 0
#define DIGI_WORD_SIZE_6 1
#define DIGI_WORD_SIZE_7 2
#define DIGI_WORD_SIZE_8 3
#define DIGI_PARITY_NONE 0
#define DIGI_PARITY_ODD 1
#define DIGI_PARITY_EVEN 2
#define DIGI_PARITY_MARK 3
#define DIGI_PARITY_SPACE 4
#define DIGI_STOP_BITS_1 0
#define DIGI_STOP_BITS_2 1
#define DIGI_INPUT_FLOW_CONTROL_XON_XOFF 1
#define DIGI_INPUT_FLOW_CONTROL_RTS 2
#define DIGI_INPUT_FLOW_CONTROL_DTR 4
#define DIGI_OUTPUT_FLOW_CONTROL_XON_XOFF 1
#define DIGI_OUTPUT_FLOW_CONTROL_CTS 2
#define DIGI_OUTPUT_FLOW_CONTROL_DSR 4
#define DIGI_DTR_INACTIVE 0
#define DIGI_DTR_ACTIVE 1
#define DIGI_DTR_INPUT_FLOW_CONTROL 2
#define DIGI_RTS_INACTIVE 0
#define DIGI_RTS_ACTIVE 1
#define DIGI_RTS_INPUT_FLOW_CONTROL 2
#define DIGI_RTS_TOGGLE 3
#define DIGI_FLUSH_TX 1
#define DIGI_FLUSH_RX 2
#define DIGI_RESUME_TX 4 /* clears xoff condition */
#define DIGI_TRANSMIT_NOT_IDLE 0
#define DIGI_TRANSMIT_IDLE 1
#define DIGI_DISABLE 0
#define DIGI_ENABLE 1
#define DIGI_DEASSERT 0
#define DIGI_ASSERT 1
/* in band status codes */
#define DIGI_OVERRUN_ERROR 4
#define DIGI_PARITY_ERROR 8
#define DIGI_FRAMING_ERROR 16
#define DIGI_BREAK_ERROR 32
/* out of band status */
#define DIGI_NO_ERROR 0
#define DIGI_BAD_FIRST_PARAMETER 1
#define DIGI_BAD_SECOND_PARAMETER 2
#define DIGI_INVALID_LINE 3
#define DIGI_INVALID_OPCODE 4
/* input signals */
#define DIGI_READ_INPUT_SIGNALS_SLOT 1
#define DIGI_READ_INPUT_SIGNALS_ERR 2
#define DIGI_READ_INPUT_SIGNALS_BUSY 4
#define DIGI_READ_INPUT_SIGNALS_PE 8
#define DIGI_READ_INPUT_SIGNALS_CTS 16
#define DIGI_READ_INPUT_SIGNALS_DSR 32
#define DIGI_READ_INPUT_SIGNALS_RI 64
#define DIGI_READ_INPUT_SIGNALS_DCD 128
/* Structures */
struct digi_serial {
spinlock_t ds_serial_lock;
struct usb_serial_port *ds_oob_port; /* out-of-band port */
int ds_oob_port_num; /* index of out-of-band port */
int ds_device_started;
};
struct digi_port {
spinlock_t dp_port_lock;
int dp_port_num;
int dp_out_buf_len;
unsigned char dp_out_buf[DIGI_OUT_BUF_SIZE];
int dp_write_urb_in_use;
unsigned int dp_modem_signals;
int dp_transmit_idle;
wait_queue_head_t dp_transmit_idle_wait;
int dp_throttled;
int dp_throttle_restart;
wait_queue_head_t dp_flush_wait;
wait_queue_head_t dp_close_wait; /* wait queue for close */
wait_queue_head_t write_wait;
struct usb_serial_port *dp_port;
};
/* Local Function Declarations */
static int digi_write_oob_command(struct usb_serial_port *port,
unsigned char *buf, int count, int interruptible);
static int digi_write_inb_command(struct usb_serial_port *port,
unsigned char *buf, int count, unsigned long timeout);
static int digi_set_modem_signals(struct usb_serial_port *port,
unsigned int modem_signals, int interruptible);
static int digi_transmit_idle(struct usb_serial_port *port,
unsigned long timeout);
static void digi_rx_throttle(struct tty_struct *tty);
static void digi_rx_unthrottle(struct tty_struct *tty);
static void digi_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios);
static int digi_break_ctl(struct tty_struct *tty, int break_state);
static int digi_tiocmget(struct tty_struct *tty);
static int digi_tiocmset(struct tty_struct *tty, unsigned int set,
unsigned int clear);
static int digi_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static void digi_write_bulk_callback(struct urb *urb);
static unsigned int digi_write_room(struct tty_struct *tty);
static unsigned int digi_chars_in_buffer(struct tty_struct *tty);
static int digi_open(struct tty_struct *tty, struct usb_serial_port *port);
static void digi_close(struct usb_serial_port *port);
static void digi_dtr_rts(struct usb_serial_port *port, int on);
static int digi_startup_device(struct usb_serial *serial);
static int digi_startup(struct usb_serial *serial);
static void digi_disconnect(struct usb_serial *serial);
static void digi_release(struct usb_serial *serial);
static int digi_port_probe(struct usb_serial_port *port);
static void digi_port_remove(struct usb_serial_port *port);
static void digi_read_bulk_callback(struct urb *urb);
static int digi_read_inb_callback(struct urb *urb);
static int digi_read_oob_callback(struct urb *urb);
static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) },
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) },
{ } /* Terminating entry */
};
static const struct usb_device_id id_table_2[] = {
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) },
{ } /* Terminating entry */
};
static const struct usb_device_id id_table_4[] = {
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table_combined);
/* device info needed for the Digi serial converter */
static struct usb_serial_driver digi_acceleport_2_device = {
.driver = {
.owner = THIS_MODULE,
.name = "digi_2",
},
.description = "Digi 2 port USB adapter",
.id_table = id_table_2,
.num_ports = 3,
.num_bulk_in = 4,
.num_bulk_out = 4,
.open = digi_open,
.close = digi_close,
.dtr_rts = digi_dtr_rts,
.write = digi_write,
.write_room = digi_write_room,
.write_bulk_callback = digi_write_bulk_callback,
.read_bulk_callback = digi_read_bulk_callback,
.chars_in_buffer = digi_chars_in_buffer,
.throttle = digi_rx_throttle,
.unthrottle = digi_rx_unthrottle,
.set_termios = digi_set_termios,
.break_ctl = digi_break_ctl,
.tiocmget = digi_tiocmget,
.tiocmset = digi_tiocmset,
.attach = digi_startup,
.disconnect = digi_disconnect,
.release = digi_release,
.port_probe = digi_port_probe,
.port_remove = digi_port_remove,
};
static struct usb_serial_driver digi_acceleport_4_device = {
.driver = {
.owner = THIS_MODULE,
.name = "digi_4",
},
.description = "Digi 4 port USB adapter",
.id_table = id_table_4,
.num_ports = 4,
.num_bulk_in = 5,
.num_bulk_out = 5,
.open = digi_open,
.close = digi_close,
.write = digi_write,
.write_room = digi_write_room,
.write_bulk_callback = digi_write_bulk_callback,
.read_bulk_callback = digi_read_bulk_callback,
.chars_in_buffer = digi_chars_in_buffer,
.throttle = digi_rx_throttle,
.unthrottle = digi_rx_unthrottle,
.set_termios = digi_set_termios,
.break_ctl = digi_break_ctl,
.tiocmget = digi_tiocmget,
.tiocmset = digi_tiocmset,
.attach = digi_startup,
.disconnect = digi_disconnect,
.release = digi_release,
.port_probe = digi_port_probe,
.port_remove = digi_port_remove,
};
static struct usb_serial_driver * const serial_drivers[] = {
&digi_acceleport_2_device, &digi_acceleport_4_device, NULL
};
/* Functions */
/*
* Cond Wait Interruptible Timeout Irqrestore
*
* Do spin_unlock_irqrestore and interruptible_sleep_on_timeout
* so that wake ups are not lost if they occur between the unlock
* and the sleep. In other words, spin_unlock_irqrestore and
* interruptible_sleep_on_timeout are "atomic" with respect to
* wake ups. This is used to implement condition variables.
*
* interruptible_sleep_on_timeout is deprecated and has been replaced
* with the equivalent code.
*/
static long cond_wait_interruptible_timeout_irqrestore(
wait_queue_head_t *q, long timeout,
spinlock_t *lock, unsigned long flags)
__releases(lock)
{
DEFINE_WAIT(wait);
prepare_to_wait(q, &wait, TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(lock, flags);
timeout = schedule_timeout(timeout);
finish_wait(q, &wait);
return timeout;
}
/*
* Digi Write OOB Command
*
* Write commands on the out of band port. Commands are 4
* bytes each, multiple commands can be sent at once, and
* no command will be split across USB packets. Returns 0
* if successful, -EINTR if interrupted while sleeping and
* the interruptible flag is true, or a negative error
* returned by usb_submit_urb.
*/
static int digi_write_oob_command(struct usb_serial_port *port,
unsigned char *buf, int count, int interruptible)
{
int ret = 0;
int len;
struct usb_serial_port *oob_port = (struct usb_serial_port *)((struct digi_serial *)(usb_get_serial_data(port->serial)))->ds_oob_port;
struct digi_port *oob_priv = usb_get_serial_port_data(oob_port);
unsigned long flags;
dev_dbg(&port->dev,
"digi_write_oob_command: TOP: port=%d, count=%d\n",
oob_priv->dp_port_num, count);
spin_lock_irqsave(&oob_priv->dp_port_lock, flags);
while (count > 0) {
while (oob_priv->dp_write_urb_in_use) {
cond_wait_interruptible_timeout_irqrestore(
&oob_priv->write_wait, DIGI_RETRY_TIMEOUT,
&oob_priv->dp_port_lock, flags);
if (interruptible && signal_pending(current))
return -EINTR;
spin_lock_irqsave(&oob_priv->dp_port_lock, flags);
}
/* len must be a multiple of 4, so commands are not split */
len = min(count, oob_port->bulk_out_size);
if (len > 4)
len &= ~3;
memcpy(oob_port->write_urb->transfer_buffer, buf, len);
oob_port->write_urb->transfer_buffer_length = len;
ret = usb_submit_urb(oob_port->write_urb, GFP_ATOMIC);
if (ret == 0) {
oob_priv->dp_write_urb_in_use = 1;
count -= len;
buf += len;
}
}
spin_unlock_irqrestore(&oob_priv->dp_port_lock, flags);
if (ret)
dev_err(&port->dev, "%s: usb_submit_urb failed, ret=%d\n",
__func__, ret);
return ret;
}
/*
* Digi Write In Band Command
*
* Write commands on the given port. Commands are 4
* bytes each, multiple commands can be sent at once, and
* no command will be split across USB packets. If timeout
* is non-zero, write in band command will return after
* waiting unsuccessfully for the URB status to clear for
* timeout ticks. Returns 0 if successful, or a negative
* error returned by digi_write.
*/
static int digi_write_inb_command(struct usb_serial_port *port,
unsigned char *buf, int count, unsigned long timeout)
{
int ret = 0;
int len;
struct digi_port *priv = usb_get_serial_port_data(port);
unsigned char *data = port->write_urb->transfer_buffer;
unsigned long flags;
dev_dbg(&port->dev, "digi_write_inb_command: TOP: port=%d, count=%d\n",
priv->dp_port_num, count);
if (timeout)
timeout += jiffies;
else
timeout = ULONG_MAX;
spin_lock_irqsave(&priv->dp_port_lock, flags);
while (count > 0 && ret == 0) {
while (priv->dp_write_urb_in_use &&
time_before(jiffies, timeout)) {
cond_wait_interruptible_timeout_irqrestore(
&priv->write_wait, DIGI_RETRY_TIMEOUT,
&priv->dp_port_lock, flags);
if (signal_pending(current))
return -EINTR;
spin_lock_irqsave(&priv->dp_port_lock, flags);
}
/* len must be a multiple of 4 and small enough to */
/* guarantee the write will send buffered data first, */
/* so commands are in order with data and not split */
len = min(count, port->bulk_out_size-2-priv->dp_out_buf_len);
if (len > 4)
len &= ~3;
/* write any buffered data first */
if (priv->dp_out_buf_len > 0) {
data[0] = DIGI_CMD_SEND_DATA;
data[1] = priv->dp_out_buf_len;
memcpy(data + 2, priv->dp_out_buf,
priv->dp_out_buf_len);
memcpy(data + 2 + priv->dp_out_buf_len, buf, len);
port->write_urb->transfer_buffer_length
= priv->dp_out_buf_len + 2 + len;
} else {
memcpy(data, buf, len);
port->write_urb->transfer_buffer_length = len;
}
ret = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (ret == 0) {
priv->dp_write_urb_in_use = 1;
priv->dp_out_buf_len = 0;
count -= len;
buf += len;
}
}
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
if (ret)
dev_err(&port->dev,
"%s: usb_submit_urb failed, ret=%d, port=%d\n",
__func__, ret, priv->dp_port_num);
return ret;
}
/*
* Digi Set Modem Signals
*
* Sets or clears DTR and RTS on the port, according to the
* modem_signals argument. Use TIOCM_DTR and TIOCM_RTS flags
* for the modem_signals argument. Returns 0 if successful,
* -EINTR if interrupted while sleeping, or a non-zero error
* returned by usb_submit_urb.
*/
static int digi_set_modem_signals(struct usb_serial_port *port,
unsigned int modem_signals, int interruptible)
{
int ret;
struct digi_port *port_priv = usb_get_serial_port_data(port);
struct usb_serial_port *oob_port = (struct usb_serial_port *) ((struct digi_serial *)(usb_get_serial_data(port->serial)))->ds_oob_port;
struct digi_port *oob_priv = usb_get_serial_port_data(oob_port);
unsigned char *data = oob_port->write_urb->transfer_buffer;
unsigned long flags;
dev_dbg(&port->dev,
"digi_set_modem_signals: TOP: port=%d, modem_signals=0x%x\n",
port_priv->dp_port_num, modem_signals);
spin_lock_irqsave(&oob_priv->dp_port_lock, flags);
spin_lock(&port_priv->dp_port_lock);
while (oob_priv->dp_write_urb_in_use) {
spin_unlock(&port_priv->dp_port_lock);
cond_wait_interruptible_timeout_irqrestore(
&oob_priv->write_wait, DIGI_RETRY_TIMEOUT,
&oob_priv->dp_port_lock, flags);
if (interruptible && signal_pending(current))
return -EINTR;
spin_lock_irqsave(&oob_priv->dp_port_lock, flags);
spin_lock(&port_priv->dp_port_lock);
}
data[0] = DIGI_CMD_SET_DTR_SIGNAL;
data[1] = port_priv->dp_port_num;
data[2] = (modem_signals & TIOCM_DTR) ?
DIGI_DTR_ACTIVE : DIGI_DTR_INACTIVE;
data[3] = 0;
data[4] = DIGI_CMD_SET_RTS_SIGNAL;
data[5] = port_priv->dp_port_num;
data[6] = (modem_signals & TIOCM_RTS) ?
DIGI_RTS_ACTIVE : DIGI_RTS_INACTIVE;
data[7] = 0;
oob_port->write_urb->transfer_buffer_length = 8;
ret = usb_submit_urb(oob_port->write_urb, GFP_ATOMIC);
if (ret == 0) {
oob_priv->dp_write_urb_in_use = 1;
port_priv->dp_modem_signals &= ~(TIOCM_DTR | TIOCM_RTS);
port_priv->dp_modem_signals |=
modem_signals & (TIOCM_DTR | TIOCM_RTS);
}
spin_unlock(&port_priv->dp_port_lock);
spin_unlock_irqrestore(&oob_priv->dp_port_lock, flags);
if (ret)
dev_err(&port->dev, "%s: usb_submit_urb failed, ret=%d\n",
__func__, ret);
return ret;
}
/*
* Digi Transmit Idle
*
* Digi transmit idle waits, up to timeout ticks, for the transmitter
* to go idle. It returns 0 if successful or a negative error.
*
* There are race conditions here if more than one process is calling
* digi_transmit_idle on the same port at the same time. However, this
* is only called from close, and only one process can be in close on a
* port at a time, so its ok.
*/
static int digi_transmit_idle(struct usb_serial_port *port,
unsigned long timeout)
{
int ret;
unsigned char buf[2];
struct digi_port *priv = usb_get_serial_port_data(port);
unsigned long flags;
spin_lock_irqsave(&priv->dp_port_lock, flags);
priv->dp_transmit_idle = 0;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
buf[0] = DIGI_CMD_TRANSMIT_IDLE;
buf[1] = 0;
timeout += jiffies;
ret = digi_write_inb_command(port, buf, 2, timeout - jiffies);
if (ret != 0)
return ret;
spin_lock_irqsave(&priv->dp_port_lock, flags);
while (time_before(jiffies, timeout) && !priv->dp_transmit_idle) {
cond_wait_interruptible_timeout_irqrestore(
&priv->dp_transmit_idle_wait, DIGI_RETRY_TIMEOUT,
&priv->dp_port_lock, flags);
if (signal_pending(current))
return -EINTR;
spin_lock_irqsave(&priv->dp_port_lock, flags);
}
priv->dp_transmit_idle = 0;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
return 0;
}
static void digi_rx_throttle(struct tty_struct *tty)
{
unsigned long flags;
struct usb_serial_port *port = tty->driver_data;
struct digi_port *priv = usb_get_serial_port_data(port);
/* stop receiving characters by not resubmitting the read urb */
spin_lock_irqsave(&priv->dp_port_lock, flags);
priv->dp_throttled = 1;
priv->dp_throttle_restart = 0;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
}
static void digi_rx_unthrottle(struct tty_struct *tty)
{
int ret = 0;
unsigned long flags;
struct usb_serial_port *port = tty->driver_data;
struct digi_port *priv = usb_get_serial_port_data(port);
spin_lock_irqsave(&priv->dp_port_lock, flags);
/* restart read chain */
if (priv->dp_throttle_restart)
ret = usb_submit_urb(port->read_urb, GFP_ATOMIC);
/* turn throttle off */
priv->dp_throttled = 0;
priv->dp_throttle_restart = 0;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
if (ret)
dev_err(&port->dev,
"%s: usb_submit_urb failed, ret=%d, port=%d\n",
__func__, ret, priv->dp_port_num);
}
static void digi_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct digi_port *priv = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
unsigned int iflag = tty->termios.c_iflag;
unsigned int cflag = tty->termios.c_cflag;
unsigned int old_iflag = old_termios->c_iflag;
unsigned int old_cflag = old_termios->c_cflag;
unsigned char buf[32];
unsigned int modem_signals;
int arg, ret;
int i = 0;
speed_t baud;
dev_dbg(dev,
"digi_set_termios: TOP: port=%d, iflag=0x%x, old_iflag=0x%x, cflag=0x%x, old_cflag=0x%x\n",
priv->dp_port_num, iflag, old_iflag, cflag, old_cflag);
/* set baud rate */
baud = tty_get_baud_rate(tty);
if (baud != tty_termios_baud_rate(old_termios)) {
arg = -1;
/* reassert DTR and (maybe) RTS on transition from B0 */
if ((old_cflag & CBAUD) == B0) {
/* don't set RTS if using hardware flow control */
/* and throttling input */
modem_signals = TIOCM_DTR;
if (!C_CRTSCTS(tty) || !tty_throttled(tty))
modem_signals |= TIOCM_RTS;
digi_set_modem_signals(port, modem_signals, 1);
}
switch (baud) {
/* drop DTR and RTS on transition to B0 */
case 0: digi_set_modem_signals(port, 0, 1); break;
case 50: arg = DIGI_BAUD_50; break;
case 75: arg = DIGI_BAUD_75; break;
case 110: arg = DIGI_BAUD_110; break;
case 150: arg = DIGI_BAUD_150; break;
case 200: arg = DIGI_BAUD_200; break;
case 300: arg = DIGI_BAUD_300; break;
case 600: arg = DIGI_BAUD_600; break;
case 1200: arg = DIGI_BAUD_1200; break;
case 1800: arg = DIGI_BAUD_1800; break;
case 2400: arg = DIGI_BAUD_2400; break;
case 4800: arg = DIGI_BAUD_4800; break;
case 9600: arg = DIGI_BAUD_9600; break;
case 19200: arg = DIGI_BAUD_19200; break;
case 38400: arg = DIGI_BAUD_38400; break;
case 57600: arg = DIGI_BAUD_57600; break;
case 115200: arg = DIGI_BAUD_115200; break;
case 230400: arg = DIGI_BAUD_230400; break;
case 460800: arg = DIGI_BAUD_460800; break;
default:
arg = DIGI_BAUD_9600;
baud = 9600;
break;
}
if (arg != -1) {
buf[i++] = DIGI_CMD_SET_BAUD_RATE;
buf[i++] = priv->dp_port_num;
buf[i++] = arg;
buf[i++] = 0;
}
}
/* set parity */
tty->termios.c_cflag &= ~CMSPAR;
if ((cflag & (PARENB | PARODD)) != (old_cflag & (PARENB | PARODD))) {
if (cflag & PARENB) {
if (cflag & PARODD)
arg = DIGI_PARITY_ODD;
else
arg = DIGI_PARITY_EVEN;
} else {
arg = DIGI_PARITY_NONE;
}
buf[i++] = DIGI_CMD_SET_PARITY;
buf[i++] = priv->dp_port_num;
buf[i++] = arg;
buf[i++] = 0;
}
/* set word size */
if ((cflag & CSIZE) != (old_cflag & CSIZE)) {
arg = -1;
switch (cflag & CSIZE) {
case CS5: arg = DIGI_WORD_SIZE_5; break;
case CS6: arg = DIGI_WORD_SIZE_6; break;
case CS7: arg = DIGI_WORD_SIZE_7; break;
case CS8: arg = DIGI_WORD_SIZE_8; break;
default:
dev_dbg(dev,
"digi_set_termios: can't handle word size %d\n",
cflag & CSIZE);
break;
}
if (arg != -1) {
buf[i++] = DIGI_CMD_SET_WORD_SIZE;
buf[i++] = priv->dp_port_num;
buf[i++] = arg;
buf[i++] = 0;
}
}
/* set stop bits */
if ((cflag & CSTOPB) != (old_cflag & CSTOPB)) {
if ((cflag & CSTOPB))
arg = DIGI_STOP_BITS_2;
else
arg = DIGI_STOP_BITS_1;
buf[i++] = DIGI_CMD_SET_STOP_BITS;
buf[i++] = priv->dp_port_num;
buf[i++] = arg;
buf[i++] = 0;
}
/* set input flow control */
if ((iflag & IXOFF) != (old_iflag & IXOFF) ||
(cflag & CRTSCTS) != (old_cflag & CRTSCTS)) {
arg = 0;
if (iflag & IXOFF)
arg |= DIGI_INPUT_FLOW_CONTROL_XON_XOFF;
else
arg &= ~DIGI_INPUT_FLOW_CONTROL_XON_XOFF;
if (cflag & CRTSCTS) {
arg |= DIGI_INPUT_FLOW_CONTROL_RTS;
/* On USB-4 it is necessary to assert RTS prior */
/* to selecting RTS input flow control. */
buf[i++] = DIGI_CMD_SET_RTS_SIGNAL;
buf[i++] = priv->dp_port_num;
buf[i++] = DIGI_RTS_ACTIVE;
buf[i++] = 0;
} else {
arg &= ~DIGI_INPUT_FLOW_CONTROL_RTS;
}
buf[i++] = DIGI_CMD_SET_INPUT_FLOW_CONTROL;
buf[i++] = priv->dp_port_num;
buf[i++] = arg;
buf[i++] = 0;
}
/* set output flow control */
if ((iflag & IXON) != (old_iflag & IXON) ||
(cflag & CRTSCTS) != (old_cflag & CRTSCTS)) {
arg = 0;
if (iflag & IXON)
arg |= DIGI_OUTPUT_FLOW_CONTROL_XON_XOFF;
else
arg &= ~DIGI_OUTPUT_FLOW_CONTROL_XON_XOFF;
if (cflag & CRTSCTS)
arg |= DIGI_OUTPUT_FLOW_CONTROL_CTS;
else
arg &= ~DIGI_OUTPUT_FLOW_CONTROL_CTS;
buf[i++] = DIGI_CMD_SET_OUTPUT_FLOW_CONTROL;
buf[i++] = priv->dp_port_num;
buf[i++] = arg;
buf[i++] = 0;
}
/* set receive enable/disable */
if ((cflag & CREAD) != (old_cflag & CREAD)) {
if (cflag & CREAD)
arg = DIGI_ENABLE;
else
arg = DIGI_DISABLE;
buf[i++] = DIGI_CMD_RECEIVE_ENABLE;
buf[i++] = priv->dp_port_num;
buf[i++] = arg;
buf[i++] = 0;
}
ret = digi_write_oob_command(port, buf, i, 1);
if (ret != 0)
dev_dbg(dev, "digi_set_termios: write oob failed, ret=%d\n", ret);
tty_encode_baud_rate(tty, baud, baud);
}
static int digi_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
unsigned char buf[4];
buf[0] = DIGI_CMD_BREAK_CONTROL;
buf[1] = 2; /* length */
buf[2] = break_state ? 1 : 0;
buf[3] = 0; /* pad */
return digi_write_inb_command(port, buf, 4, 0);
}
static int digi_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct digi_port *priv = usb_get_serial_port_data(port);
unsigned int val;
unsigned long flags;
spin_lock_irqsave(&priv->dp_port_lock, flags);
val = priv->dp_modem_signals;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
return val;
}
static int digi_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct digi_port *priv = usb_get_serial_port_data(port);
unsigned int val;
unsigned long flags;
spin_lock_irqsave(&priv->dp_port_lock, flags);
val = (priv->dp_modem_signals & ~clear) | set;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
return digi_set_modem_signals(port, val, 1);
}
static int digi_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
int ret, data_len, new_len;
struct digi_port *priv = usb_get_serial_port_data(port);
unsigned char *data = port->write_urb->transfer_buffer;
unsigned long flags;
dev_dbg(&port->dev, "digi_write: TOP: port=%d, count=%d\n",
priv->dp_port_num, count);
/* copy user data (which can sleep) before getting spin lock */
count = min(count, port->bulk_out_size-2);
count = min(64, count);
/* be sure only one write proceeds at a time */
/* there are races on the port private buffer */
spin_lock_irqsave(&priv->dp_port_lock, flags);
/* wait for urb status clear to submit another urb */
if (priv->dp_write_urb_in_use) {
/* buffer data if count is 1 (probably put_char) if possible */
if (count == 1 && priv->dp_out_buf_len < DIGI_OUT_BUF_SIZE) {
priv->dp_out_buf[priv->dp_out_buf_len++] = *buf;
new_len = 1;
} else {
new_len = 0;
}
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
return new_len;
}
/* allow space for any buffered data and for new data, up to */
/* transfer buffer size - 2 (for command and length bytes) */
new_len = min(count, port->bulk_out_size-2-priv->dp_out_buf_len);
data_len = new_len + priv->dp_out_buf_len;
if (data_len == 0) {
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
return 0;
}
port->write_urb->transfer_buffer_length = data_len+2;
*data++ = DIGI_CMD_SEND_DATA;
*data++ = data_len;
/* copy in buffered data first */
memcpy(data, priv->dp_out_buf, priv->dp_out_buf_len);
data += priv->dp_out_buf_len;
/* copy in new data */
memcpy(data, buf, new_len);
ret = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (ret == 0) {
priv->dp_write_urb_in_use = 1;
ret = new_len;
priv->dp_out_buf_len = 0;
}
/* return length of new data written, or error */
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
if (ret < 0)
dev_err_console(port,
"%s: usb_submit_urb failed, ret=%d, port=%d\n",
__func__, ret, priv->dp_port_num);
dev_dbg(&port->dev, "digi_write: returning %d\n", ret);
return ret;
}
static void digi_write_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct usb_serial *serial;
struct digi_port *priv;
struct digi_serial *serial_priv;
unsigned long flags;
int ret = 0;
int status = urb->status;
bool wakeup;
/* port and serial sanity check */
if (port == NULL || (priv = usb_get_serial_port_data(port)) == NULL) {
pr_err("%s: port or port->private is NULL, status=%d\n",
__func__, status);
return;
}
serial = port->serial;
if (serial == NULL || (serial_priv = usb_get_serial_data(serial)) == NULL) {
dev_err(&port->dev,
"%s: serial or serial->private is NULL, status=%d\n",
__func__, status);
return;
}
/* handle oob callback */
if (priv->dp_port_num == serial_priv->ds_oob_port_num) {
dev_dbg(&port->dev, "digi_write_bulk_callback: oob callback\n");
spin_lock_irqsave(&priv->dp_port_lock, flags);
priv->dp_write_urb_in_use = 0;
wake_up_interruptible(&priv->write_wait);
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
return;
}
/* try to send any buffered data on this port */
wakeup = true;
spin_lock_irqsave(&priv->dp_port_lock, flags);
priv->dp_write_urb_in_use = 0;
if (priv->dp_out_buf_len > 0) {
*((unsigned char *)(port->write_urb->transfer_buffer))
= (unsigned char)DIGI_CMD_SEND_DATA;
*((unsigned char *)(port->write_urb->transfer_buffer) + 1)
= (unsigned char)priv->dp_out_buf_len;
port->write_urb->transfer_buffer_length =
priv->dp_out_buf_len + 2;
memcpy(port->write_urb->transfer_buffer + 2, priv->dp_out_buf,
priv->dp_out_buf_len);
ret = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (ret == 0) {
priv->dp_write_urb_in_use = 1;
priv->dp_out_buf_len = 0;
wakeup = false;
}
}
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
if (ret && ret != -EPERM)
dev_err_console(port,
"%s: usb_submit_urb failed, ret=%d, port=%d\n",
__func__, ret, priv->dp_port_num);
if (wakeup)
tty_port_tty_wakeup(&port->port);
}
static unsigned int digi_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct digi_port *priv = usb_get_serial_port_data(port);
unsigned long flags;
unsigned int room;
spin_lock_irqsave(&priv->dp_port_lock, flags);
if (priv->dp_write_urb_in_use)
room = 0;
else
room = port->bulk_out_size - 2 - priv->dp_out_buf_len;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
dev_dbg(&port->dev, "digi_write_room: port=%d, room=%u\n", priv->dp_port_num, room);
return room;
}
static unsigned int digi_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct digi_port *priv = usb_get_serial_port_data(port);
unsigned long flags;
unsigned int chars;
spin_lock_irqsave(&priv->dp_port_lock, flags);
if (priv->dp_write_urb_in_use)
chars = port->bulk_out_size - 2;
else
chars = priv->dp_out_buf_len;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
dev_dbg(&port->dev, "%s: port=%d, chars=%d\n", __func__,
priv->dp_port_num, chars);
return chars;
}
static void digi_dtr_rts(struct usb_serial_port *port, int on)
{
/* Adjust DTR and RTS */
digi_set_modem_signals(port, on * (TIOCM_DTR | TIOCM_RTS), 1);
}
static int digi_open(struct tty_struct *tty, struct usb_serial_port *port)
{
int ret;
unsigned char buf[32];
struct digi_port *priv = usb_get_serial_port_data(port);
struct ktermios not_termios;
/* be sure the device is started up */
if (digi_startup_device(port->serial) != 0)
return -ENXIO;
/* read modem signals automatically whenever they change */
buf[0] = DIGI_CMD_READ_INPUT_SIGNALS;
buf[1] = priv->dp_port_num;
buf[2] = DIGI_ENABLE;
buf[3] = 0;
/* flush fifos */
buf[4] = DIGI_CMD_IFLUSH_FIFO;
buf[5] = priv->dp_port_num;
buf[6] = DIGI_FLUSH_TX | DIGI_FLUSH_RX;
buf[7] = 0;
ret = digi_write_oob_command(port, buf, 8, 1);
if (ret != 0)
dev_dbg(&port->dev, "digi_open: write oob failed, ret=%d\n", ret);
/* set termios settings */
if (tty) {
not_termios.c_cflag = ~tty->termios.c_cflag;
not_termios.c_iflag = ~tty->termios.c_iflag;
digi_set_termios(tty, port, ¬_termios);
}
return 0;
}
static void digi_close(struct usb_serial_port *port)
{
DEFINE_WAIT(wait);
int ret;
unsigned char buf[32];
struct digi_port *priv = usb_get_serial_port_data(port);
mutex_lock(&port->serial->disc_mutex);
/* if disconnected, just clear flags */
if (port->serial->disconnected)
goto exit;
/* FIXME: Transmit idle belongs in the wait_unti_sent path */
digi_transmit_idle(port, DIGI_CLOSE_TIMEOUT);
/* disable input flow control */
buf[0] = DIGI_CMD_SET_INPUT_FLOW_CONTROL;
buf[1] = priv->dp_port_num;
buf[2] = DIGI_DISABLE;
buf[3] = 0;
/* disable output flow control */
buf[4] = DIGI_CMD_SET_OUTPUT_FLOW_CONTROL;
buf[5] = priv->dp_port_num;
buf[6] = DIGI_DISABLE;
buf[7] = 0;
/* disable reading modem signals automatically */
buf[8] = DIGI_CMD_READ_INPUT_SIGNALS;
buf[9] = priv->dp_port_num;
buf[10] = DIGI_DISABLE;
buf[11] = 0;
/* disable receive */
buf[12] = DIGI_CMD_RECEIVE_ENABLE;
buf[13] = priv->dp_port_num;
buf[14] = DIGI_DISABLE;
buf[15] = 0;
/* flush fifos */
buf[16] = DIGI_CMD_IFLUSH_FIFO;
buf[17] = priv->dp_port_num;
buf[18] = DIGI_FLUSH_TX | DIGI_FLUSH_RX;
buf[19] = 0;
ret = digi_write_oob_command(port, buf, 20, 0);
if (ret != 0)
dev_dbg(&port->dev, "digi_close: write oob failed, ret=%d\n",
ret);
/* wait for final commands on oob port to complete */
prepare_to_wait(&priv->dp_flush_wait, &wait,
TASK_INTERRUPTIBLE);
schedule_timeout(DIGI_CLOSE_TIMEOUT);
finish_wait(&priv->dp_flush_wait, &wait);
/* shutdown any outstanding bulk writes */
usb_kill_urb(port->write_urb);
exit:
spin_lock_irq(&priv->dp_port_lock);
priv->dp_write_urb_in_use = 0;
wake_up_interruptible(&priv->dp_close_wait);
spin_unlock_irq(&priv->dp_port_lock);
mutex_unlock(&port->serial->disc_mutex);
}
/*
* Digi Startup Device
*
* Starts reads on all ports. Must be called AFTER startup, with
* urbs initialized. Returns 0 if successful, non-zero error otherwise.
*/
static int digi_startup_device(struct usb_serial *serial)
{
int i, ret = 0;
struct digi_serial *serial_priv = usb_get_serial_data(serial);
struct usb_serial_port *port;
/* be sure this happens exactly once */
spin_lock(&serial_priv->ds_serial_lock);
if (serial_priv->ds_device_started) {
spin_unlock(&serial_priv->ds_serial_lock);
return 0;
}
serial_priv->ds_device_started = 1;
spin_unlock(&serial_priv->ds_serial_lock);
/* start reading from each bulk in endpoint for the device */
/* set USB_DISABLE_SPD flag for write bulk urbs */
for (i = 0; i < serial->type->num_ports + 1; i++) {
port = serial->port[i];
ret = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (ret != 0) {
dev_err(&port->dev,
"%s: usb_submit_urb failed, ret=%d, port=%d\n",
__func__, ret, i);
break;
}
}
return ret;
}
static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
{
struct digi_port *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
spin_lock_init(&priv->dp_port_lock);
priv->dp_port_num = port_num;
init_waitqueue_head(&priv->dp_transmit_idle_wait);
init_waitqueue_head(&priv->dp_flush_wait);
init_waitqueue_head(&priv->dp_close_wait);
init_waitqueue_head(&priv->write_wait);
priv->dp_port = port;
usb_set_serial_port_data(port, priv);
return 0;
}
static int digi_startup(struct usb_serial *serial)
{
struct digi_serial *serial_priv;
int ret;
serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
if (!serial_priv)
return -ENOMEM;
spin_lock_init(&serial_priv->ds_serial_lock);
serial_priv->ds_oob_port_num = serial->type->num_ports;
serial_priv->ds_oob_port = serial->port[serial_priv->ds_oob_port_num];
ret = digi_port_init(serial_priv->ds_oob_port,
serial_priv->ds_oob_port_num);
if (ret) {
kfree(serial_priv);
return ret;
}
usb_set_serial_data(serial, serial_priv);
return 0;
}
static void digi_disconnect(struct usb_serial *serial)
{
int i;
/* stop reads and writes on all ports */
for (i = 0; i < serial->type->num_ports + 1; i++) {
usb_kill_urb(serial->port[i]->read_urb);
usb_kill_urb(serial->port[i]->write_urb);
}
}
static void digi_release(struct usb_serial *serial)
{
struct digi_serial *serial_priv;
struct digi_port *priv;
serial_priv = usb_get_serial_data(serial);
priv = usb_get_serial_port_data(serial_priv->ds_oob_port);
kfree(priv);
kfree(serial_priv);
}
static int digi_port_probe(struct usb_serial_port *port)
{
return digi_port_init(port, port->port_number);
}
static void digi_port_remove(struct usb_serial_port *port)
{
struct digi_port *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
}
static void digi_read_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct digi_port *priv;
struct digi_serial *serial_priv;
int ret;
int status = urb->status;
/* port sanity check, do not resubmit if port is not valid */
if (port == NULL)
return;
priv = usb_get_serial_port_data(port);
if (priv == NULL) {
dev_err(&port->dev, "%s: port->private is NULL, status=%d\n",
__func__, status);
return;
}
if (port->serial == NULL ||
(serial_priv = usb_get_serial_data(port->serial)) == NULL) {
dev_err(&port->dev, "%s: serial is bad or serial->private "
"is NULL, status=%d\n", __func__, status);
return;
}
/* do not resubmit urb if it has any status error */
if (status) {
dev_err(&port->dev,
"%s: nonzero read bulk status: status=%d, port=%d\n",
__func__, status, priv->dp_port_num);
return;
}
/* handle oob or inb callback, do not resubmit if error */
if (priv->dp_port_num == serial_priv->ds_oob_port_num) {
if (digi_read_oob_callback(urb) != 0)
return;
} else {
if (digi_read_inb_callback(urb) != 0)
return;
}
/* continue read */
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret != 0 && ret != -EPERM) {
dev_err(&port->dev,
"%s: failed resubmitting urb, ret=%d, port=%d\n",
__func__, ret, priv->dp_port_num);
}
}
/*
* Digi Read INB Callback
*
* Digi Read INB Callback handles reads on the in band ports, sending
* the data on to the tty subsystem. When called we know port and
* port->private are not NULL and port->serial has been validated.
* It returns 0 if successful, 1 if successful but the port is
* throttled, and -1 if the sanity checks failed.
*/
static int digi_read_inb_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct digi_port *priv = usb_get_serial_port_data(port);
unsigned char *buf = urb->transfer_buffer;
unsigned long flags;
int opcode;
int len;
int port_status;
unsigned char *data;
int tty_flag, throttled;
/* short/multiple packet check */
if (urb->actual_length < 2) {
dev_warn(&port->dev, "short packet received\n");
return -1;
}
opcode = buf[0];
len = buf[1];
if (urb->actual_length != len + 2) {
dev_err(&port->dev, "malformed packet received: port=%d, opcode=%d, len=%d, actual_length=%u\n",
priv->dp_port_num, opcode, len, urb->actual_length);
return -1;
}
if (opcode == DIGI_CMD_RECEIVE_DATA && len < 1) {
dev_err(&port->dev, "malformed data packet received\n");
return -1;
}
spin_lock_irqsave(&priv->dp_port_lock, flags);
/* check for throttle; if set, do not resubmit read urb */
/* indicate the read chain needs to be restarted on unthrottle */
throttled = priv->dp_throttled;
if (throttled)
priv->dp_throttle_restart = 1;
/* receive data */
if (opcode == DIGI_CMD_RECEIVE_DATA) {
port_status = buf[2];
data = &buf[3];
/* get flag from port_status */
tty_flag = 0;
/* overrun is special, not associated with a char */
if (port_status & DIGI_OVERRUN_ERROR)
tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
/* break takes precedence over parity, */
/* which takes precedence over framing errors */
if (port_status & DIGI_BREAK_ERROR)
tty_flag = TTY_BREAK;
else if (port_status & DIGI_PARITY_ERROR)
tty_flag = TTY_PARITY;
else if (port_status & DIGI_FRAMING_ERROR)
tty_flag = TTY_FRAME;
/* data length is len-1 (one byte of len is port_status) */
--len;
if (len > 0) {
tty_insert_flip_string_fixed_flag(&port->port, data,
tty_flag, len);
tty_flip_buffer_push(&port->port);
}
}
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
if (opcode == DIGI_CMD_RECEIVE_DISABLE)
dev_dbg(&port->dev, "%s: got RECEIVE_DISABLE\n", __func__);
else if (opcode != DIGI_CMD_RECEIVE_DATA)
dev_dbg(&port->dev, "%s: unknown opcode: %d\n", __func__, opcode);
return throttled ? 1 : 0;
}
/*
* Digi Read OOB Callback
*
* Digi Read OOB Callback handles reads on the out of band port.
* When called we know port and port->private are not NULL and
* the port->serial is valid. It returns 0 if successful, and
* -1 if the sanity checks failed.
*/
static int digi_read_oob_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct usb_serial *serial = port->serial;
struct tty_struct *tty;
struct digi_port *priv;
unsigned char *buf = urb->transfer_buffer;
int opcode, line, status, val;
unsigned long flags;
int i;
unsigned int rts;
if (urb->actual_length < 4)
return -1;
/* handle each oob command */
for (i = 0; i < urb->actual_length - 3; i += 4) {
opcode = buf[i];
line = buf[i + 1];
status = buf[i + 2];
val = buf[i + 3];
dev_dbg(&port->dev, "digi_read_oob_callback: opcode=%d, line=%d, status=%d, val=%d\n",
opcode, line, status, val);
if (status != 0 || line >= serial->type->num_ports)
continue;
port = serial->port[line];
priv = usb_get_serial_port_data(port);
if (priv == NULL)
return -1;
tty = tty_port_tty_get(&port->port);
rts = 0;
if (tty)
rts = C_CRTSCTS(tty);
if (tty && opcode == DIGI_CMD_READ_INPUT_SIGNALS) {
bool wakeup = false;
spin_lock_irqsave(&priv->dp_port_lock, flags);
/* convert from digi flags to termiox flags */
if (val & DIGI_READ_INPUT_SIGNALS_CTS) {
priv->dp_modem_signals |= TIOCM_CTS;
if (rts)
wakeup = true;
} else {
priv->dp_modem_signals &= ~TIOCM_CTS;
/* port must be open to use tty struct */
}
if (val & DIGI_READ_INPUT_SIGNALS_DSR)
priv->dp_modem_signals |= TIOCM_DSR;
else
priv->dp_modem_signals &= ~TIOCM_DSR;
if (val & DIGI_READ_INPUT_SIGNALS_RI)
priv->dp_modem_signals |= TIOCM_RI;
else
priv->dp_modem_signals &= ~TIOCM_RI;
if (val & DIGI_READ_INPUT_SIGNALS_DCD)
priv->dp_modem_signals |= TIOCM_CD;
else
priv->dp_modem_signals &= ~TIOCM_CD;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
if (wakeup)
tty_port_tty_wakeup(&port->port);
} else if (opcode == DIGI_CMD_TRANSMIT_IDLE) {
spin_lock_irqsave(&priv->dp_port_lock, flags);
priv->dp_transmit_idle = 1;
wake_up_interruptible(&priv->dp_transmit_idle_wait);
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
} else if (opcode == DIGI_CMD_IFLUSH_FIFO) {
wake_up_interruptible(&priv->dp_flush_wait);
}
tty_kref_put(tty);
}
return 0;
}
module_usb_serial_driver(serial_drivers, id_table_combined);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/serial/digi_acceleport.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Edgeport USB Serial Converter driver
*
* Copyright (C) 2000 Inside Out Networks, All rights reserved.
* Copyright (C) 2001-2002 Greg Kroah-Hartman <[email protected]>
*
* Supports the following devices:
* Edgeport/4
* Edgeport/4t
* Edgeport/2
* Edgeport/4i
* Edgeport/2i
* Edgeport/421
* Edgeport/21
* Rapidport/4
* Edgeport/8
* Edgeport/2D8
* Edgeport/4D8
* Edgeport/8i
*
* For questions or problems with this driver, contact Inside Out
* Networks technical support, or Peter Berger <[email protected]>,
* or Al Borchers <[email protected]>.
*
*/
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/serial.h>
#include <linux/ioctl.h>
#include <linux/wait.h>
#include <linux/firmware.h>
#include <linux/ihex.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include "io_edgeport.h"
#include "io_ionsp.h" /* info for the iosp messages */
#include "io_16654.h" /* 16654 UART defines */
#define DRIVER_AUTHOR "Greg Kroah-Hartman <[email protected]> and David Iacovelli"
#define DRIVER_DESC "Edgeport USB Serial Driver"
#define MAX_NAME_LEN 64
#define OPEN_TIMEOUT (5*HZ) /* 5 seconds */
static const struct usb_device_id edgeport_2port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_421) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_21) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2_DIN) },
{ }
};
static const struct usb_device_id edgeport_4port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_RAPIDPORT_4) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4T) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_MT4X56USB) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8_DUAL_CPU) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4_DIN) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_22I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_412_4) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_COMPATIBLE) },
{ }
};
static const struct usb_device_id edgeport_8port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_16_DUAL_CPU) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8R) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8RR) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_412_8) },
{ }
};
static const struct usb_device_id Epic_port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0202) },
{ USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0203) },
{ USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0310) },
{ USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0311) },
{ USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0312) },
{ USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A758) },
{ USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A794) },
{ USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A225) },
{ }
};
/* Devices that this driver supports */
static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_RAPIDPORT_4) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4T) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_MT4X56USB) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_421) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_21) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8_DUAL_CPU) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2_DIN) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4_DIN) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_16_DUAL_CPU) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_22I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_412_4) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_COMPATIBLE) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8R) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8RR) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_412_8) },
{ USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0202) },
{ USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0203) },
{ USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0310) },
{ USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0311) },
{ USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0312) },
{ USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A758) },
{ USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A794) },
{ USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A225) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table_combined);
/* receive port state */
enum RXSTATE {
EXPECT_HDR1 = 0, /* Expect header byte 1 */
EXPECT_HDR2 = 1, /* Expect header byte 2 */
EXPECT_DATA = 2, /* Expect 'RxBytesRemaining' data */
EXPECT_HDR3 = 3, /* Expect header byte 3 (for status hdrs only) */
};
/* Transmit Fifo
* This Transmit queue is an extension of the edgeport Rx buffer.
* The maximum amount of data buffered in both the edgeport
* Rx buffer (maxTxCredits) and this buffer will never exceed maxTxCredits.
*/
struct TxFifo {
unsigned int head; /* index to head pointer (write) */
unsigned int tail; /* index to tail pointer (read) */
unsigned int count; /* Bytes in queue */
unsigned int size; /* Max size of queue (equal to Max number of TxCredits) */
unsigned char *fifo; /* allocated Buffer */
};
/* This structure holds all of the local port information */
struct edgeport_port {
__u16 txCredits; /* our current credits for this port */
__u16 maxTxCredits; /* the max size of the port */
struct TxFifo txfifo; /* transmit fifo -- size will be maxTxCredits */
struct urb *write_urb; /* write URB for this port */
bool write_in_progress; /* 'true' while a write URB is outstanding */
spinlock_t ep_lock;
__u8 shadowLCR; /* last LCR value received */
__u8 shadowMCR; /* last MCR value received */
__u8 shadowMSR; /* last MSR value received */
__u8 shadowLSR; /* last LSR value received */
__u8 shadowXonChar; /* last value set as XON char in Edgeport */
__u8 shadowXoffChar; /* last value set as XOFF char in Edgeport */
__u8 validDataMask;
__u32 baudRate;
bool open;
bool openPending;
bool commandPending;
bool closePending;
bool chaseResponsePending;
wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */
wait_queue_head_t wait_open; /* for handling sleeping while waiting for open to finish */
wait_queue_head_t wait_command; /* for handling sleeping while waiting for command to finish */
struct usb_serial_port *port; /* loop back to the owner of this object */
};
/* This structure holds all of the individual device information */
struct edgeport_serial {
char name[MAX_NAME_LEN+2]; /* string name of this device */
struct edge_manuf_descriptor manuf_descriptor; /* the manufacturer descriptor */
struct edge_boot_descriptor boot_descriptor; /* the boot firmware descriptor */
struct edgeport_product_info product_info; /* Product Info */
struct edge_compatibility_descriptor epic_descriptor; /* Edgeport compatible descriptor */
int is_epic; /* flag if EPiC device or not */
__u8 interrupt_in_endpoint; /* the interrupt endpoint handle */
unsigned char *interrupt_in_buffer; /* the buffer we use for the interrupt endpoint */
struct urb *interrupt_read_urb; /* our interrupt urb */
__u8 bulk_in_endpoint; /* the bulk in endpoint handle */
unsigned char *bulk_in_buffer; /* the buffer we use for the bulk in endpoint */
struct urb *read_urb; /* our bulk read urb */
bool read_in_progress;
spinlock_t es_lock;
__u8 bulk_out_endpoint; /* the bulk out endpoint handle */
__s16 rxBytesAvail; /* the number of bytes that we need to read from this device */
enum RXSTATE rxState; /* the current state of the bulk receive processor */
__u8 rxHeader1; /* receive header byte 1 */
__u8 rxHeader2; /* receive header byte 2 */
__u8 rxHeader3; /* receive header byte 3 */
__u8 rxPort; /* the port that we are currently receiving data for */
__u8 rxStatusCode; /* the receive status code */
__u8 rxStatusParam; /* the receive status parameter */
__s16 rxBytesRemaining; /* the number of port bytes left to read */
struct usb_serial *serial; /* loop back to the owner of this object */
};
/* baud rate information */
struct divisor_table_entry {
__u32 BaudRate;
__u16 Divisor;
};
/*
* Define table of divisors for Rev A EdgePort/4 hardware
* These assume a 3.6864MHz crystal, the standard /16, and
* MCR.7 = 0.
*/
static const struct divisor_table_entry divisor_table[] = {
{ 50, 4608},
{ 75, 3072},
{ 110, 2095}, /* 2094.545455 => 230450 => .0217 % over */
{ 134, 1713}, /* 1713.011152 => 230398.5 => .00065% under */
{ 150, 1536},
{ 300, 768},
{ 600, 384},
{ 1200, 192},
{ 1800, 128},
{ 2400, 96},
{ 4800, 48},
{ 7200, 32},
{ 9600, 24},
{ 14400, 16},
{ 19200, 12},
{ 38400, 6},
{ 57600, 4},
{ 115200, 2},
{ 230400, 1},
};
/* Number of outstanding Command Write Urbs */
static atomic_t CmdUrbs = ATOMIC_INIT(0);
/* function prototypes */
static void edge_close(struct usb_serial_port *port);
static void process_rcvd_data(struct edgeport_serial *edge_serial,
unsigned char *buffer, __u16 bufferLength);
static void process_rcvd_status(struct edgeport_serial *edge_serial,
__u8 byte2, __u8 byte3);
static void edge_tty_recv(struct usb_serial_port *port, unsigned char *data,
int length);
static void handle_new_msr(struct edgeport_port *edge_port, __u8 newMsr);
static void handle_new_lsr(struct edgeport_port *edge_port, __u8 lsrData,
__u8 lsr, __u8 data);
static int send_iosp_ext_cmd(struct edgeport_port *edge_port, __u8 command,
__u8 param);
static int calc_baud_rate_divisor(struct device *dev, int baud_rate, int *divisor);
static void change_port_settings(struct tty_struct *tty,
struct edgeport_port *edge_port,
const struct ktermios *old_termios);
static int send_cmd_write_uart_register(struct edgeport_port *edge_port,
__u8 regNum, __u8 regValue);
static int write_cmd_usb(struct edgeport_port *edge_port,
unsigned char *buffer, int writeLength);
static void send_more_port_data(struct edgeport_serial *edge_serial,
struct edgeport_port *edge_port);
static int rom_write(struct usb_serial *serial, __u16 extAddr, __u16 addr,
__u16 length, const __u8 *data);
/* ************************************************************************ */
/* ************************************************************************ */
/* ************************************************************************ */
/* ************************************************************************ */
/************************************************************************
* *
* update_edgeport_E2PROM() Compare current versions of *
* Boot ROM and Manufacture *
* Descriptors with versions *
* embedded in this driver *
* *
************************************************************************/
static void update_edgeport_E2PROM(struct edgeport_serial *edge_serial)
{
struct device *dev = &edge_serial->serial->dev->dev;
__u32 BootCurVer;
__u32 BootNewVer;
__u8 BootMajorVersion;
__u8 BootMinorVersion;
__u16 BootBuildNumber;
__u32 Bootaddr;
const struct ihex_binrec *rec;
const struct firmware *fw;
const char *fw_name;
int response;
switch (edge_serial->product_info.iDownloadFile) {
case EDGE_DOWNLOAD_FILE_I930:
fw_name = "edgeport/boot.fw";
break;
case EDGE_DOWNLOAD_FILE_80251:
fw_name = "edgeport/boot2.fw";
break;
default:
return;
}
response = request_ihex_firmware(&fw, fw_name,
&edge_serial->serial->dev->dev);
if (response) {
dev_err(dev, "Failed to load image \"%s\" err %d\n",
fw_name, response);
return;
}
rec = (const struct ihex_binrec *)fw->data;
BootMajorVersion = rec->data[0];
BootMinorVersion = rec->data[1];
BootBuildNumber = (rec->data[2] << 8) | rec->data[3];
/* Check Boot Image Version */
BootCurVer = (edge_serial->boot_descriptor.MajorVersion << 24) +
(edge_serial->boot_descriptor.MinorVersion << 16) +
le16_to_cpu(edge_serial->boot_descriptor.BuildNumber);
BootNewVer = (BootMajorVersion << 24) +
(BootMinorVersion << 16) +
BootBuildNumber;
dev_dbg(dev, "Current Boot Image version %d.%d.%d\n",
edge_serial->boot_descriptor.MajorVersion,
edge_serial->boot_descriptor.MinorVersion,
le16_to_cpu(edge_serial->boot_descriptor.BuildNumber));
if (BootNewVer > BootCurVer) {
dev_dbg(dev, "**Update Boot Image from %d.%d.%d to %d.%d.%d\n",
edge_serial->boot_descriptor.MajorVersion,
edge_serial->boot_descriptor.MinorVersion,
le16_to_cpu(edge_serial->boot_descriptor.BuildNumber),
BootMajorVersion, BootMinorVersion, BootBuildNumber);
dev_dbg(dev, "Downloading new Boot Image\n");
for (rec = ihex_next_binrec(rec); rec;
rec = ihex_next_binrec(rec)) {
Bootaddr = be32_to_cpu(rec->addr);
response = rom_write(edge_serial->serial,
Bootaddr >> 16,
Bootaddr & 0xFFFF,
be16_to_cpu(rec->len),
&rec->data[0]);
if (response < 0) {
dev_err(&edge_serial->serial->dev->dev,
"rom_write failed (%x, %x, %d)\n",
Bootaddr >> 16, Bootaddr & 0xFFFF,
be16_to_cpu(rec->len));
break;
}
}
} else {
dev_dbg(dev, "Boot Image -- already up to date\n");
}
release_firmware(fw);
}
static void dump_product_info(struct edgeport_serial *edge_serial,
struct edgeport_product_info *product_info)
{
struct device *dev = &edge_serial->serial->dev->dev;
/* Dump Product Info structure */
dev_dbg(dev, "**Product Information:\n");
dev_dbg(dev, " ProductId %x\n", product_info->ProductId);
dev_dbg(dev, " NumPorts %d\n", product_info->NumPorts);
dev_dbg(dev, " ProdInfoVer %d\n", product_info->ProdInfoVer);
dev_dbg(dev, " IsServer %d\n", product_info->IsServer);
dev_dbg(dev, " IsRS232 %d\n", product_info->IsRS232);
dev_dbg(dev, " IsRS422 %d\n", product_info->IsRS422);
dev_dbg(dev, " IsRS485 %d\n", product_info->IsRS485);
dev_dbg(dev, " RomSize %d\n", product_info->RomSize);
dev_dbg(dev, " RamSize %d\n", product_info->RamSize);
dev_dbg(dev, " CpuRev %x\n", product_info->CpuRev);
dev_dbg(dev, " BoardRev %x\n", product_info->BoardRev);
dev_dbg(dev, " BootMajorVersion %d.%d.%d\n",
product_info->BootMajorVersion,
product_info->BootMinorVersion,
le16_to_cpu(product_info->BootBuildNumber));
dev_dbg(dev, " FirmwareMajorVersion %d.%d.%d\n",
product_info->FirmwareMajorVersion,
product_info->FirmwareMinorVersion,
le16_to_cpu(product_info->FirmwareBuildNumber));
dev_dbg(dev, " ManufactureDescDate %d/%d/%d\n",
product_info->ManufactureDescDate[0],
product_info->ManufactureDescDate[1],
product_info->ManufactureDescDate[2]+1900);
dev_dbg(dev, " iDownloadFile 0x%x\n",
product_info->iDownloadFile);
dev_dbg(dev, " EpicVer %d\n", product_info->EpicVer);
}
static void get_product_info(struct edgeport_serial *edge_serial)
{
struct edgeport_product_info *product_info = &edge_serial->product_info;
memset(product_info, 0, sizeof(struct edgeport_product_info));
product_info->ProductId = (__u16)(le16_to_cpu(edge_serial->serial->dev->descriptor.idProduct) & ~ION_DEVICE_ID_80251_NETCHIP);
product_info->NumPorts = edge_serial->manuf_descriptor.NumPorts;
product_info->ProdInfoVer = 0;
product_info->RomSize = edge_serial->manuf_descriptor.RomSize;
product_info->RamSize = edge_serial->manuf_descriptor.RamSize;
product_info->CpuRev = edge_serial->manuf_descriptor.CpuRev;
product_info->BoardRev = edge_serial->manuf_descriptor.BoardRev;
product_info->BootMajorVersion =
edge_serial->boot_descriptor.MajorVersion;
product_info->BootMinorVersion =
edge_serial->boot_descriptor.MinorVersion;
product_info->BootBuildNumber =
edge_serial->boot_descriptor.BuildNumber;
memcpy(product_info->ManufactureDescDate,
edge_serial->manuf_descriptor.DescDate,
sizeof(edge_serial->manuf_descriptor.DescDate));
/* check if this is 2nd generation hardware */
if (le16_to_cpu(edge_serial->serial->dev->descriptor.idProduct)
& ION_DEVICE_ID_80251_NETCHIP)
product_info->iDownloadFile = EDGE_DOWNLOAD_FILE_80251;
else
product_info->iDownloadFile = EDGE_DOWNLOAD_FILE_I930;
/* Determine Product type and set appropriate flags */
switch (DEVICE_ID_FROM_USB_PRODUCT_ID(product_info->ProductId)) {
case ION_DEVICE_ID_EDGEPORT_COMPATIBLE:
case ION_DEVICE_ID_EDGEPORT_4T:
case ION_DEVICE_ID_EDGEPORT_4:
case ION_DEVICE_ID_EDGEPORT_2:
case ION_DEVICE_ID_EDGEPORT_8_DUAL_CPU:
case ION_DEVICE_ID_EDGEPORT_8:
case ION_DEVICE_ID_EDGEPORT_421:
case ION_DEVICE_ID_EDGEPORT_21:
case ION_DEVICE_ID_EDGEPORT_2_DIN:
case ION_DEVICE_ID_EDGEPORT_4_DIN:
case ION_DEVICE_ID_EDGEPORT_16_DUAL_CPU:
product_info->IsRS232 = 1;
break;
case ION_DEVICE_ID_EDGEPORT_2I: /* Edgeport/2 RS422/RS485 */
product_info->IsRS422 = 1;
product_info->IsRS485 = 1;
break;
case ION_DEVICE_ID_EDGEPORT_8I: /* Edgeport/4 RS422 */
case ION_DEVICE_ID_EDGEPORT_4I: /* Edgeport/4 RS422 */
product_info->IsRS422 = 1;
break;
}
dump_product_info(edge_serial, product_info);
}
static int get_epic_descriptor(struct edgeport_serial *ep)
{
int result;
struct usb_serial *serial = ep->serial;
struct edgeport_product_info *product_info = &ep->product_info;
struct edge_compatibility_descriptor *epic;
struct edge_compatibility_bits *bits;
struct device *dev = &serial->dev->dev;
ep->is_epic = 0;
epic = kmalloc(sizeof(*epic), GFP_KERNEL);
if (!epic)
return -ENOMEM;
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
USB_REQUEST_ION_GET_EPIC_DESC,
0xC0, 0x00, 0x00,
epic, sizeof(*epic),
300);
if (result == sizeof(*epic)) {
ep->is_epic = 1;
memcpy(&ep->epic_descriptor, epic, sizeof(*epic));
memset(product_info, 0, sizeof(struct edgeport_product_info));
product_info->NumPorts = epic->NumPorts;
product_info->ProdInfoVer = 0;
product_info->FirmwareMajorVersion = epic->MajorVersion;
product_info->FirmwareMinorVersion = epic->MinorVersion;
product_info->FirmwareBuildNumber = epic->BuildNumber;
product_info->iDownloadFile = epic->iDownloadFile;
product_info->EpicVer = epic->EpicVer;
product_info->Epic = epic->Supports;
product_info->ProductId = ION_DEVICE_ID_EDGEPORT_COMPATIBLE;
dump_product_info(ep, product_info);
bits = &ep->epic_descriptor.Supports;
dev_dbg(dev, "**EPIC descriptor:\n");
dev_dbg(dev, " VendEnableSuspend: %s\n", bits->VendEnableSuspend ? "TRUE": "FALSE");
dev_dbg(dev, " IOSPOpen : %s\n", bits->IOSPOpen ? "TRUE": "FALSE");
dev_dbg(dev, " IOSPClose : %s\n", bits->IOSPClose ? "TRUE": "FALSE");
dev_dbg(dev, " IOSPChase : %s\n", bits->IOSPChase ? "TRUE": "FALSE");
dev_dbg(dev, " IOSPSetRxFlow : %s\n", bits->IOSPSetRxFlow ? "TRUE": "FALSE");
dev_dbg(dev, " IOSPSetTxFlow : %s\n", bits->IOSPSetTxFlow ? "TRUE": "FALSE");
dev_dbg(dev, " IOSPSetXChar : %s\n", bits->IOSPSetXChar ? "TRUE": "FALSE");
dev_dbg(dev, " IOSPRxCheck : %s\n", bits->IOSPRxCheck ? "TRUE": "FALSE");
dev_dbg(dev, " IOSPSetClrBreak : %s\n", bits->IOSPSetClrBreak ? "TRUE": "FALSE");
dev_dbg(dev, " IOSPWriteMCR : %s\n", bits->IOSPWriteMCR ? "TRUE": "FALSE");
dev_dbg(dev, " IOSPWriteLCR : %s\n", bits->IOSPWriteLCR ? "TRUE": "FALSE");
dev_dbg(dev, " IOSPSetBaudRate : %s\n", bits->IOSPSetBaudRate ? "TRUE": "FALSE");
dev_dbg(dev, " TrueEdgeport : %s\n", bits->TrueEdgeport ? "TRUE": "FALSE");
result = 0;
} else if (result >= 0) {
dev_warn(&serial->interface->dev, "short epic descriptor received: %d\n",
result);
result = -EIO;
}
kfree(epic);
return result;
}
/************************************************************************/
/************************************************************************/
/* U S B C A L L B A C K F U N C T I O N S */
/* U S B C A L L B A C K F U N C T I O N S */
/************************************************************************/
/************************************************************************/
/*****************************************************************************
* edge_interrupt_callback
* this is the callback function for when we have received data on the
* interrupt endpoint.
*****************************************************************************/
static void edge_interrupt_callback(struct urb *urb)
{
struct edgeport_serial *edge_serial = urb->context;
struct device *dev;
struct edgeport_port *edge_port;
struct usb_serial_port *port;
unsigned char *data = urb->transfer_buffer;
int length = urb->actual_length;
unsigned long flags;
int bytes_avail;
int position;
int txCredits;
int portNumber;
int result;
int status = urb->status;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n", __func__, status);
return;
default:
dev_dbg(&urb->dev->dev, "%s - nonzero urb status received: %d\n", __func__, status);
goto exit;
}
dev = &edge_serial->serial->dev->dev;
/* process this interrupt-read even if there are no ports open */
if (length) {
usb_serial_debug_data(dev, __func__, length, data);
if (length > 1) {
bytes_avail = data[0] | (data[1] << 8);
if (bytes_avail) {
spin_lock_irqsave(&edge_serial->es_lock, flags);
edge_serial->rxBytesAvail += bytes_avail;
dev_dbg(dev,
"%s - bytes_avail=%d, rxBytesAvail=%d, read_in_progress=%d\n",
__func__, bytes_avail,
edge_serial->rxBytesAvail,
edge_serial->read_in_progress);
if (edge_serial->rxBytesAvail > 0 &&
!edge_serial->read_in_progress) {
dev_dbg(dev, "%s - posting a read\n", __func__);
edge_serial->read_in_progress = true;
/* we have pending bytes on the
bulk in pipe, send a request */
result = usb_submit_urb(edge_serial->read_urb, GFP_ATOMIC);
if (result) {
dev_err(dev,
"%s - usb_submit_urb(read bulk) failed with result = %d\n",
__func__, result);
edge_serial->read_in_progress = false;
}
}
spin_unlock_irqrestore(&edge_serial->es_lock,
flags);
}
}
/* grab the txcredits for the ports if available */
position = 2;
portNumber = 0;
while ((position < length - 1) &&
(portNumber < edge_serial->serial->num_ports)) {
txCredits = data[position] | (data[position+1] << 8);
if (txCredits) {
port = edge_serial->serial->port[portNumber];
edge_port = usb_get_serial_port_data(port);
if (edge_port && edge_port->open) {
spin_lock_irqsave(&edge_port->ep_lock,
flags);
edge_port->txCredits += txCredits;
spin_unlock_irqrestore(&edge_port->ep_lock,
flags);
dev_dbg(dev, "%s - txcredits for port%d = %d\n",
__func__, portNumber,
edge_port->txCredits);
/* tell the tty driver that something
has changed */
tty_port_tty_wakeup(&edge_port->port->port);
/* Since we have more credit, check
if more data can be sent */
send_more_port_data(edge_serial,
edge_port);
}
}
position += 2;
++portNumber;
}
}
exit:
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
dev_err(&urb->dev->dev,
"%s - Error %d submitting control urb\n",
__func__, result);
}
/*****************************************************************************
* edge_bulk_in_callback
* this is the callback function for when we have received data on the
* bulk in endpoint.
*****************************************************************************/
static void edge_bulk_in_callback(struct urb *urb)
{
struct edgeport_serial *edge_serial = urb->context;
struct device *dev;
unsigned char *data = urb->transfer_buffer;
int retval;
__u16 raw_data_length;
int status = urb->status;
unsigned long flags;
if (status) {
dev_dbg(&urb->dev->dev, "%s - nonzero read bulk status received: %d\n",
__func__, status);
edge_serial->read_in_progress = false;
return;
}
if (urb->actual_length == 0) {
dev_dbg(&urb->dev->dev, "%s - read bulk callback with no data\n", __func__);
edge_serial->read_in_progress = false;
return;
}
dev = &edge_serial->serial->dev->dev;
raw_data_length = urb->actual_length;
usb_serial_debug_data(dev, __func__, raw_data_length, data);
spin_lock_irqsave(&edge_serial->es_lock, flags);
/* decrement our rxBytes available by the number that we just got */
edge_serial->rxBytesAvail -= raw_data_length;
dev_dbg(dev, "%s - Received = %d, rxBytesAvail %d\n", __func__,
raw_data_length, edge_serial->rxBytesAvail);
process_rcvd_data(edge_serial, data, urb->actual_length);
/* check to see if there's any more data for us to read */
if (edge_serial->rxBytesAvail > 0) {
dev_dbg(dev, "%s - posting a read\n", __func__);
retval = usb_submit_urb(edge_serial->read_urb, GFP_ATOMIC);
if (retval) {
dev_err(dev,
"%s - usb_submit_urb(read bulk) failed, retval = %d\n",
__func__, retval);
edge_serial->read_in_progress = false;
}
} else {
edge_serial->read_in_progress = false;
}
spin_unlock_irqrestore(&edge_serial->es_lock, flags);
}
/*****************************************************************************
* edge_bulk_out_data_callback
* this is the callback function for when we have finished sending
* serial data on the bulk out endpoint.
*****************************************************************************/
static void edge_bulk_out_data_callback(struct urb *urb)
{
struct edgeport_port *edge_port = urb->context;
int status = urb->status;
if (status) {
dev_dbg(&urb->dev->dev,
"%s - nonzero write bulk status received: %d\n",
__func__, status);
}
if (edge_port->open)
tty_port_tty_wakeup(&edge_port->port->port);
/* Release the Write URB */
edge_port->write_in_progress = false;
/* Check if more data needs to be sent */
send_more_port_data((struct edgeport_serial *)
(usb_get_serial_data(edge_port->port->serial)), edge_port);
}
/*****************************************************************************
* BulkOutCmdCallback
* this is the callback function for when we have finished sending a
* command on the bulk out endpoint.
*****************************************************************************/
static void edge_bulk_out_cmd_callback(struct urb *urb)
{
struct edgeport_port *edge_port = urb->context;
int status = urb->status;
atomic_dec(&CmdUrbs);
dev_dbg(&urb->dev->dev, "%s - FREE URB %p (outstanding %d)\n",
__func__, urb, atomic_read(&CmdUrbs));
/* clean up the transfer buffer */
kfree(urb->transfer_buffer);
/* Free the command urb */
usb_free_urb(urb);
if (status) {
dev_dbg(&urb->dev->dev,
"%s - nonzero write bulk status received: %d\n",
__func__, status);
return;
}
/* tell the tty driver that something has changed */
if (edge_port->open)
tty_port_tty_wakeup(&edge_port->port->port);
/* we have completed the command */
edge_port->commandPending = false;
wake_up(&edge_port->wait_command);
}
/*****************************************************************************
* Driver tty interface functions
*****************************************************************************/
/*****************************************************************************
* SerialOpen
* this function is called by the tty driver when a port is opened
* If successful, we return 0
* Otherwise we return a negative error number.
*****************************************************************************/
static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
struct usb_serial *serial;
struct edgeport_serial *edge_serial;
int response;
if (edge_port == NULL)
return -ENODEV;
/* see if we've set up our endpoint info yet (can't set it up
in edge_startup as the structures were not set up at that time.) */
serial = port->serial;
edge_serial = usb_get_serial_data(serial);
if (edge_serial == NULL)
return -ENODEV;
if (edge_serial->interrupt_in_buffer == NULL) {
struct usb_serial_port *port0 = serial->port[0];
/* not set up yet, so do it now */
edge_serial->interrupt_in_buffer =
port0->interrupt_in_buffer;
edge_serial->interrupt_in_endpoint =
port0->interrupt_in_endpointAddress;
edge_serial->interrupt_read_urb = port0->interrupt_in_urb;
edge_serial->bulk_in_buffer = port0->bulk_in_buffer;
edge_serial->bulk_in_endpoint =
port0->bulk_in_endpointAddress;
edge_serial->read_urb = port0->read_urb;
edge_serial->bulk_out_endpoint =
port0->bulk_out_endpointAddress;
/* set up our interrupt urb */
usb_fill_int_urb(edge_serial->interrupt_read_urb,
serial->dev,
usb_rcvintpipe(serial->dev,
port0->interrupt_in_endpointAddress),
port0->interrupt_in_buffer,
edge_serial->interrupt_read_urb->transfer_buffer_length,
edge_interrupt_callback, edge_serial,
edge_serial->interrupt_read_urb->interval);
/* set up our bulk in urb */
usb_fill_bulk_urb(edge_serial->read_urb, serial->dev,
usb_rcvbulkpipe(serial->dev,
port0->bulk_in_endpointAddress),
port0->bulk_in_buffer,
edge_serial->read_urb->transfer_buffer_length,
edge_bulk_in_callback, edge_serial);
edge_serial->read_in_progress = false;
/* start interrupt read for this edgeport
* this interrupt will continue as long
* as the edgeport is connected */
response = usb_submit_urb(edge_serial->interrupt_read_urb,
GFP_KERNEL);
if (response) {
dev_err(dev, "%s - Error %d submitting control urb\n",
__func__, response);
}
}
/* initialize our wait queues */
init_waitqueue_head(&edge_port->wait_open);
init_waitqueue_head(&edge_port->wait_chase);
init_waitqueue_head(&edge_port->wait_command);
/* initialize our port settings */
edge_port->txCredits = 0; /* Can't send any data yet */
/* Must always set this bit to enable ints! */
edge_port->shadowMCR = MCR_MASTER_IE;
edge_port->chaseResponsePending = false;
/* send a open port command */
edge_port->openPending = true;
edge_port->open = false;
response = send_iosp_ext_cmd(edge_port, IOSP_CMD_OPEN_PORT, 0);
if (response < 0) {
dev_err(dev, "%s - error sending open port command\n", __func__);
edge_port->openPending = false;
return -ENODEV;
}
/* now wait for the port to be completely opened */
wait_event_timeout(edge_port->wait_open, !edge_port->openPending,
OPEN_TIMEOUT);
if (!edge_port->open) {
/* open timed out */
dev_dbg(dev, "%s - open timeout\n", __func__);
edge_port->openPending = false;
return -ENODEV;
}
/* create the txfifo */
edge_port->txfifo.head = 0;
edge_port->txfifo.tail = 0;
edge_port->txfifo.count = 0;
edge_port->txfifo.size = edge_port->maxTxCredits;
edge_port->txfifo.fifo = kmalloc(edge_port->maxTxCredits, GFP_KERNEL);
if (!edge_port->txfifo.fifo) {
edge_close(port);
return -ENOMEM;
}
/* Allocate a URB for the write */
edge_port->write_urb = usb_alloc_urb(0, GFP_KERNEL);
edge_port->write_in_progress = false;
if (!edge_port->write_urb) {
edge_close(port);
return -ENOMEM;
}
dev_dbg(dev, "%s - Initialize TX fifo to %d bytes\n",
__func__, edge_port->maxTxCredits);
return 0;
}
/************************************************************************
*
* block_until_chase_response
*
* This function will block the close until one of the following:
* 1. Response to our Chase comes from Edgeport
* 2. A timeout of 10 seconds without activity has expired
* (1K of Edgeport data @ 2400 baud ==> 4 sec to empty)
*
************************************************************************/
static void block_until_chase_response(struct edgeport_port *edge_port)
{
struct device *dev = &edge_port->port->dev;
DEFINE_WAIT(wait);
__u16 lastCredits;
int timeout = 1*HZ;
int loop = 10;
while (1) {
/* Save Last credits */
lastCredits = edge_port->txCredits;
/* Did we get our Chase response */
if (!edge_port->chaseResponsePending) {
dev_dbg(dev, "%s - Got Chase Response\n", __func__);
/* did we get all of our credit back? */
if (edge_port->txCredits == edge_port->maxTxCredits) {
dev_dbg(dev, "%s - Got all credits\n", __func__);
return;
}
}
/* Block the thread for a while */
prepare_to_wait(&edge_port->wait_chase, &wait,
TASK_UNINTERRUPTIBLE);
schedule_timeout(timeout);
finish_wait(&edge_port->wait_chase, &wait);
if (lastCredits == edge_port->txCredits) {
/* No activity.. count down. */
loop--;
if (loop == 0) {
edge_port->chaseResponsePending = false;
dev_dbg(dev, "%s - Chase TIMEOUT\n", __func__);
return;
}
} else {
/* Reset timeout value back to 10 seconds */
dev_dbg(dev, "%s - Last %d, Current %d\n", __func__,
lastCredits, edge_port->txCredits);
loop = 10;
}
}
}
/************************************************************************
*
* block_until_tx_empty
*
* This function will block the close until one of the following:
* 1. TX count are 0
* 2. The edgeport has stopped
* 3. A timeout of 3 seconds without activity has expired
*
************************************************************************/
static void block_until_tx_empty(struct edgeport_port *edge_port)
{
struct device *dev = &edge_port->port->dev;
DEFINE_WAIT(wait);
struct TxFifo *fifo = &edge_port->txfifo;
__u32 lastCount;
int timeout = HZ/10;
int loop = 30;
while (1) {
/* Save Last count */
lastCount = fifo->count;
/* Is the Edgeport Buffer empty? */
if (lastCount == 0) {
dev_dbg(dev, "%s - TX Buffer Empty\n", __func__);
return;
}
/* Block the thread for a while */
prepare_to_wait(&edge_port->wait_chase, &wait,
TASK_UNINTERRUPTIBLE);
schedule_timeout(timeout);
finish_wait(&edge_port->wait_chase, &wait);
dev_dbg(dev, "%s wait\n", __func__);
if (lastCount == fifo->count) {
/* No activity.. count down. */
loop--;
if (loop == 0) {
dev_dbg(dev, "%s - TIMEOUT\n", __func__);
return;
}
} else {
/* Reset timeout value back to seconds */
loop = 30;
}
}
}
/*****************************************************************************
* edge_close
* this function is called by the tty driver when a port is closed
*****************************************************************************/
static void edge_close(struct usb_serial_port *port)
{
struct edgeport_serial *edge_serial;
struct edgeport_port *edge_port;
int status;
edge_serial = usb_get_serial_data(port->serial);
edge_port = usb_get_serial_port_data(port);
if (edge_serial == NULL || edge_port == NULL)
return;
/* block until tx is empty */
block_until_tx_empty(edge_port);
edge_port->closePending = true;
if (!edge_serial->is_epic ||
edge_serial->epic_descriptor.Supports.IOSPChase) {
/* flush and chase */
edge_port->chaseResponsePending = true;
dev_dbg(&port->dev, "%s - Sending IOSP_CMD_CHASE_PORT\n", __func__);
status = send_iosp_ext_cmd(edge_port, IOSP_CMD_CHASE_PORT, 0);
if (status == 0)
/* block until chase finished */
block_until_chase_response(edge_port);
else
edge_port->chaseResponsePending = false;
}
if (!edge_serial->is_epic ||
edge_serial->epic_descriptor.Supports.IOSPClose) {
/* close the port */
dev_dbg(&port->dev, "%s - Sending IOSP_CMD_CLOSE_PORT\n", __func__);
send_iosp_ext_cmd(edge_port, IOSP_CMD_CLOSE_PORT, 0);
}
/* port->close = true; */
edge_port->closePending = false;
edge_port->open = false;
edge_port->openPending = false;
usb_kill_urb(edge_port->write_urb);
if (edge_port->write_urb) {
/* if this urb had a transfer buffer already
(old transfer) free it */
kfree(edge_port->write_urb->transfer_buffer);
usb_free_urb(edge_port->write_urb);
edge_port->write_urb = NULL;
}
kfree(edge_port->txfifo.fifo);
edge_port->txfifo.fifo = NULL;
}
/*****************************************************************************
* SerialWrite
* this function is called by the tty driver when data should be written
* to the port.
* If successful, we return the number of bytes written, otherwise we
* return a negative error number.
*****************************************************************************/
static int edge_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *data, int count)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
struct TxFifo *fifo;
int copySize;
int bytesleft;
int firsthalf;
int secondhalf;
unsigned long flags;
if (edge_port == NULL)
return -ENODEV;
/* get a pointer to the Tx fifo */
fifo = &edge_port->txfifo;
spin_lock_irqsave(&edge_port->ep_lock, flags);
/* calculate number of bytes to put in fifo */
copySize = min((unsigned int)count,
(edge_port->txCredits - fifo->count));
dev_dbg(&port->dev, "%s of %d byte(s) Fifo room %d -- will copy %d bytes\n",
__func__, count, edge_port->txCredits - fifo->count, copySize);
/* catch writes of 0 bytes which the tty driver likes to give us,
and when txCredits is empty */
if (copySize == 0) {
dev_dbg(&port->dev, "%s - copySize = Zero\n", __func__);
goto finish_write;
}
/* queue the data
* since we can never overflow the buffer we do not have to check for a
* full condition
*
* the copy is done is two parts -- first fill to the end of the buffer
* then copy the reset from the start of the buffer
*/
bytesleft = fifo->size - fifo->head;
firsthalf = min(bytesleft, copySize);
dev_dbg(&port->dev, "%s - copy %d bytes of %d into fifo \n", __func__,
firsthalf, bytesleft);
/* now copy our data */
memcpy(&fifo->fifo[fifo->head], data, firsthalf);
usb_serial_debug_data(&port->dev, __func__, firsthalf, &fifo->fifo[fifo->head]);
/* update the index and size */
fifo->head += firsthalf;
fifo->count += firsthalf;
/* wrap the index */
if (fifo->head == fifo->size)
fifo->head = 0;
secondhalf = copySize-firsthalf;
if (secondhalf) {
dev_dbg(&port->dev, "%s - copy rest of data %d\n", __func__, secondhalf);
memcpy(&fifo->fifo[fifo->head], &data[firsthalf], secondhalf);
usb_serial_debug_data(&port->dev, __func__, secondhalf, &fifo->fifo[fifo->head]);
/* update the index and size */
fifo->count += secondhalf;
fifo->head += secondhalf;
/* No need to check for wrap since we can not get to end of
* the fifo in this part
*/
}
finish_write:
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
send_more_port_data((struct edgeport_serial *)
usb_get_serial_data(port->serial), edge_port);
dev_dbg(&port->dev, "%s wrote %d byte(s) TxCredits %d, Fifo %d\n",
__func__, copySize, edge_port->txCredits, fifo->count);
return copySize;
}
/************************************************************************
*
* send_more_port_data()
*
* This routine attempts to write additional UART transmit data
* to a port over the USB bulk pipe. It is called (1) when new
* data has been written to a port's TxBuffer from higher layers
* (2) when the peripheral sends us additional TxCredits indicating
* that it can accept more Tx data for a given port; and (3) when
* a bulk write completes successfully and we want to see if we
* can transmit more.
*
************************************************************************/
static void send_more_port_data(struct edgeport_serial *edge_serial,
struct edgeport_port *edge_port)
{
struct TxFifo *fifo = &edge_port->txfifo;
struct device *dev = &edge_port->port->dev;
struct urb *urb;
unsigned char *buffer;
int status;
int count;
int bytesleft;
int firsthalf;
int secondhalf;
unsigned long flags;
spin_lock_irqsave(&edge_port->ep_lock, flags);
if (edge_port->write_in_progress ||
!edge_port->open ||
(fifo->count == 0)) {
dev_dbg(dev, "%s EXIT - fifo %d, PendingWrite = %d\n",
__func__, fifo->count, edge_port->write_in_progress);
goto exit_send;
}
/* since the amount of data in the fifo will always fit into the
* edgeport buffer we do not need to check the write length
*
* Do we have enough credits for this port to make it worthwhile
* to bother queueing a write. If it's too small, say a few bytes,
* it's better to wait for more credits so we can do a larger write.
*/
if (edge_port->txCredits < EDGE_FW_GET_TX_CREDITS_SEND_THRESHOLD(edge_port->maxTxCredits, EDGE_FW_BULK_MAX_PACKET_SIZE)) {
dev_dbg(dev, "%s Not enough credit - fifo %d TxCredit %d\n",
__func__, fifo->count, edge_port->txCredits);
goto exit_send;
}
/* lock this write */
edge_port->write_in_progress = true;
/* get a pointer to the write_urb */
urb = edge_port->write_urb;
/* make sure transfer buffer is freed */
kfree(urb->transfer_buffer);
urb->transfer_buffer = NULL;
/* build the data header for the buffer and port that we are about
to send out */
count = fifo->count;
buffer = kmalloc(count+2, GFP_ATOMIC);
if (!buffer) {
edge_port->write_in_progress = false;
goto exit_send;
}
buffer[0] = IOSP_BUILD_DATA_HDR1(edge_port->port->port_number, count);
buffer[1] = IOSP_BUILD_DATA_HDR2(edge_port->port->port_number, count);
/* now copy our data */
bytesleft = fifo->size - fifo->tail;
firsthalf = min(bytesleft, count);
memcpy(&buffer[2], &fifo->fifo[fifo->tail], firsthalf);
fifo->tail += firsthalf;
fifo->count -= firsthalf;
if (fifo->tail == fifo->size)
fifo->tail = 0;
secondhalf = count-firsthalf;
if (secondhalf) {
memcpy(&buffer[2+firsthalf], &fifo->fifo[fifo->tail],
secondhalf);
fifo->tail += secondhalf;
fifo->count -= secondhalf;
}
if (count)
usb_serial_debug_data(&edge_port->port->dev, __func__, count, &buffer[2]);
/* fill up the urb with all of our data and submit it */
usb_fill_bulk_urb(urb, edge_serial->serial->dev,
usb_sndbulkpipe(edge_serial->serial->dev,
edge_serial->bulk_out_endpoint),
buffer, count+2,
edge_bulk_out_data_callback, edge_port);
/* decrement the number of credits we have by the number we just sent */
edge_port->txCredits -= count;
edge_port->port->icount.tx += count;
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
/* something went wrong */
dev_err_console(edge_port->port,
"%s - usb_submit_urb(write bulk) failed, status = %d, data lost\n",
__func__, status);
edge_port->write_in_progress = false;
/* revert the credits as something bad happened. */
edge_port->txCredits += count;
edge_port->port->icount.tx -= count;
}
dev_dbg(dev, "%s wrote %d byte(s) TxCredit %d, Fifo %d\n",
__func__, count, edge_port->txCredits, fifo->count);
exit_send:
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
}
/*****************************************************************************
* edge_write_room
* this function is called by the tty driver when it wants to know how
* many bytes of data we can accept for a specific port.
*****************************************************************************/
static unsigned int edge_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned int room;
unsigned long flags;
/* total of both buffers is still txCredit */
spin_lock_irqsave(&edge_port->ep_lock, flags);
room = edge_port->txCredits - edge_port->txfifo.count;
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
dev_dbg(&port->dev, "%s - returns %u\n", __func__, room);
return room;
}
/*****************************************************************************
* edge_chars_in_buffer
* this function is called by the tty driver when it wants to know how
* many bytes of data we currently have outstanding in the port (data that
* has been written, but hasn't made it out the port yet)
*****************************************************************************/
static unsigned int edge_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned int num_chars;
unsigned long flags;
spin_lock_irqsave(&edge_port->ep_lock, flags);
num_chars = edge_port->maxTxCredits - edge_port->txCredits +
edge_port->txfifo.count;
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
if (num_chars) {
dev_dbg(&port->dev, "%s - returns %u\n", __func__, num_chars);
}
return num_chars;
}
/*****************************************************************************
* SerialThrottle
* this function is called by the tty driver when it wants to stop the data
* being read from the port.
*****************************************************************************/
static void edge_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int status;
if (edge_port == NULL)
return;
if (!edge_port->open) {
dev_dbg(&port->dev, "%s - port not opened\n", __func__);
return;
}
/* if we are implementing XON/XOFF, send the stop character */
if (I_IXOFF(tty)) {
unsigned char stop_char = STOP_CHAR(tty);
status = edge_write(tty, port, &stop_char, 1);
if (status <= 0)
return;
}
/* if we are implementing RTS/CTS, toggle that line */
if (C_CRTSCTS(tty)) {
edge_port->shadowMCR &= ~MCR_RTS;
status = send_cmd_write_uart_register(edge_port, MCR,
edge_port->shadowMCR);
if (status != 0)
return;
}
}
/*****************************************************************************
* edge_unthrottle
* this function is called by the tty driver when it wants to resume the
* data being read from the port (called after SerialThrottle is called)
*****************************************************************************/
static void edge_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int status;
if (edge_port == NULL)
return;
if (!edge_port->open) {
dev_dbg(&port->dev, "%s - port not opened\n", __func__);
return;
}
/* if we are implementing XON/XOFF, send the start character */
if (I_IXOFF(tty)) {
unsigned char start_char = START_CHAR(tty);
status = edge_write(tty, port, &start_char, 1);
if (status <= 0)
return;
}
/* if we are implementing RTS/CTS, toggle that line */
if (C_CRTSCTS(tty)) {
edge_port->shadowMCR |= MCR_RTS;
send_cmd_write_uart_register(edge_port, MCR,
edge_port->shadowMCR);
}
}
/*****************************************************************************
* SerialSetTermios
* this function is called by the tty driver when it wants to change
* the termios structure
*****************************************************************************/
static void edge_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
if (edge_port == NULL)
return;
if (!edge_port->open) {
dev_dbg(&port->dev, "%s - port not opened\n", __func__);
return;
}
/* change the port settings to the new ones specified */
change_port_settings(tty, edge_port, old_termios);
}
/*****************************************************************************
* get_lsr_info - get line status register info
*
* Purpose: Let user call ioctl() to get info when the UART physically
* is emptied. On bus types like RS485, the transmitter must
* release the bus after transmitting. This must be done when
* the transmit shift register is empty, not be done when the
* transmit holding register is empty. This functionality
* allows an RS485 driver to be written in user space.
*****************************************************************************/
static int get_lsr_info(struct edgeport_port *edge_port,
unsigned int __user *value)
{
unsigned int result = 0;
unsigned long flags;
spin_lock_irqsave(&edge_port->ep_lock, flags);
if (edge_port->maxTxCredits == edge_port->txCredits &&
edge_port->txfifo.count == 0) {
dev_dbg(&edge_port->port->dev, "%s -- Empty\n", __func__);
result = TIOCSER_TEMT;
}
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
if (copy_to_user(value, &result, sizeof(int)))
return -EFAULT;
return 0;
}
static int edge_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned int mcr;
mcr = edge_port->shadowMCR;
if (set & TIOCM_RTS)
mcr |= MCR_RTS;
if (set & TIOCM_DTR)
mcr |= MCR_DTR;
if (set & TIOCM_LOOP)
mcr |= MCR_LOOPBACK;
if (clear & TIOCM_RTS)
mcr &= ~MCR_RTS;
if (clear & TIOCM_DTR)
mcr &= ~MCR_DTR;
if (clear & TIOCM_LOOP)
mcr &= ~MCR_LOOPBACK;
edge_port->shadowMCR = mcr;
send_cmd_write_uart_register(edge_port, MCR, edge_port->shadowMCR);
return 0;
}
static int edge_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned int result = 0;
unsigned int msr;
unsigned int mcr;
msr = edge_port->shadowMSR;
mcr = edge_port->shadowMCR;
result = ((mcr & MCR_DTR) ? TIOCM_DTR: 0) /* 0x002 */
| ((mcr & MCR_RTS) ? TIOCM_RTS: 0) /* 0x004 */
| ((msr & EDGEPORT_MSR_CTS) ? TIOCM_CTS: 0) /* 0x020 */
| ((msr & EDGEPORT_MSR_CD) ? TIOCM_CAR: 0) /* 0x040 */
| ((msr & EDGEPORT_MSR_RI) ? TIOCM_RI: 0) /* 0x080 */
| ((msr & EDGEPORT_MSR_DSR) ? TIOCM_DSR: 0); /* 0x100 */
return result;
}
/*****************************************************************************
* SerialIoctl
* this function handles any ioctl calls to the driver
*****************************************************************************/
static int edge_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
switch (cmd) {
case TIOCSERGETLSR:
dev_dbg(&port->dev, "%s TIOCSERGETLSR\n", __func__);
return get_lsr_info(edge_port, (unsigned int __user *) arg);
}
return -ENOIOCTLCMD;
}
/*****************************************************************************
* SerialBreak
* this function sends a break to the port
*****************************************************************************/
static int edge_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
struct edgeport_serial *edge_serial = usb_get_serial_data(port->serial);
int status = 0;
if (!edge_serial->is_epic ||
edge_serial->epic_descriptor.Supports.IOSPChase) {
/* flush and chase */
edge_port->chaseResponsePending = true;
dev_dbg(&port->dev, "%s - Sending IOSP_CMD_CHASE_PORT\n", __func__);
status = send_iosp_ext_cmd(edge_port, IOSP_CMD_CHASE_PORT, 0);
if (status == 0) {
/* block until chase finished */
block_until_chase_response(edge_port);
} else {
edge_port->chaseResponsePending = false;
}
}
if (!edge_serial->is_epic ||
edge_serial->epic_descriptor.Supports.IOSPSetClrBreak) {
if (break_state == -1) {
dev_dbg(&port->dev, "%s - Sending IOSP_CMD_SET_BREAK\n", __func__);
status = send_iosp_ext_cmd(edge_port,
IOSP_CMD_SET_BREAK, 0);
} else {
dev_dbg(&port->dev, "%s - Sending IOSP_CMD_CLEAR_BREAK\n", __func__);
status = send_iosp_ext_cmd(edge_port,
IOSP_CMD_CLEAR_BREAK, 0);
}
if (status)
dev_dbg(&port->dev, "%s - error sending break set/clear command.\n",
__func__);
}
return status;
}
/*****************************************************************************
* process_rcvd_data
* this function handles the data received on the bulk in pipe.
*****************************************************************************/
static void process_rcvd_data(struct edgeport_serial *edge_serial,
unsigned char *buffer, __u16 bufferLength)
{
struct usb_serial *serial = edge_serial->serial;
struct device *dev = &serial->dev->dev;
struct usb_serial_port *port;
struct edgeport_port *edge_port;
__u16 lastBufferLength;
__u16 rxLen;
lastBufferLength = bufferLength + 1;
while (bufferLength > 0) {
/* failsafe incase we get a message that we don't understand */
if (lastBufferLength == bufferLength) {
dev_dbg(dev, "%s - stuck in loop, exiting it.\n", __func__);
break;
}
lastBufferLength = bufferLength;
switch (edge_serial->rxState) {
case EXPECT_HDR1:
edge_serial->rxHeader1 = *buffer;
++buffer;
--bufferLength;
if (bufferLength == 0) {
edge_serial->rxState = EXPECT_HDR2;
break;
}
fallthrough;
case EXPECT_HDR2:
edge_serial->rxHeader2 = *buffer;
++buffer;
--bufferLength;
dev_dbg(dev, "%s - Hdr1=%02X Hdr2=%02X\n", __func__,
edge_serial->rxHeader1, edge_serial->rxHeader2);
/* Process depending on whether this header is
* data or status */
if (IS_CMD_STAT_HDR(edge_serial->rxHeader1)) {
/* Decode this status header and go to
* EXPECT_HDR1 (if we can process the status
* with only 2 bytes), or go to EXPECT_HDR3 to
* get the third byte. */
edge_serial->rxPort =
IOSP_GET_HDR_PORT(edge_serial->rxHeader1);
edge_serial->rxStatusCode =
IOSP_GET_STATUS_CODE(
edge_serial->rxHeader1);
if (!IOSP_STATUS_IS_2BYTE(
edge_serial->rxStatusCode)) {
/* This status needs additional bytes.
* Save what we have and then wait for
* more data.
*/
edge_serial->rxStatusParam
= edge_serial->rxHeader2;
edge_serial->rxState = EXPECT_HDR3;
break;
}
/* We have all the header bytes, process the
status now */
process_rcvd_status(edge_serial,
edge_serial->rxHeader2, 0);
edge_serial->rxState = EXPECT_HDR1;
break;
}
edge_serial->rxPort = IOSP_GET_HDR_PORT(edge_serial->rxHeader1);
edge_serial->rxBytesRemaining = IOSP_GET_HDR_DATA_LEN(edge_serial->rxHeader1,
edge_serial->rxHeader2);
dev_dbg(dev, "%s - Data for Port %u Len %u\n", __func__,
edge_serial->rxPort,
edge_serial->rxBytesRemaining);
if (bufferLength == 0) {
edge_serial->rxState = EXPECT_DATA;
break;
}
fallthrough;
case EXPECT_DATA: /* Expect data */
if (bufferLength < edge_serial->rxBytesRemaining) {
rxLen = bufferLength;
/* Expect data to start next buffer */
edge_serial->rxState = EXPECT_DATA;
} else {
/* BufLen >= RxBytesRemaining */
rxLen = edge_serial->rxBytesRemaining;
/* Start another header next time */
edge_serial->rxState = EXPECT_HDR1;
}
bufferLength -= rxLen;
edge_serial->rxBytesRemaining -= rxLen;
/* spit this data back into the tty driver if this
port is open */
if (rxLen && edge_serial->rxPort < serial->num_ports) {
port = serial->port[edge_serial->rxPort];
edge_port = usb_get_serial_port_data(port);
if (edge_port && edge_port->open) {
dev_dbg(dev, "%s - Sending %d bytes to TTY for port %d\n",
__func__, rxLen,
edge_serial->rxPort);
edge_tty_recv(edge_port->port, buffer,
rxLen);
edge_port->port->icount.rx += rxLen;
}
}
buffer += rxLen;
break;
case EXPECT_HDR3: /* Expect 3rd byte of status header */
edge_serial->rxHeader3 = *buffer;
++buffer;
--bufferLength;
/* We have all the header bytes, process the
status now */
process_rcvd_status(edge_serial,
edge_serial->rxStatusParam,
edge_serial->rxHeader3);
edge_serial->rxState = EXPECT_HDR1;
break;
}
}
}
/*****************************************************************************
* process_rcvd_status
* this function handles the any status messages received on the
* bulk in pipe.
*****************************************************************************/
static void process_rcvd_status(struct edgeport_serial *edge_serial,
__u8 byte2, __u8 byte3)
{
struct usb_serial_port *port;
struct edgeport_port *edge_port;
struct tty_struct *tty;
struct device *dev;
__u8 code = edge_serial->rxStatusCode;
/* switch the port pointer to the one being currently talked about */
if (edge_serial->rxPort >= edge_serial->serial->num_ports)
return;
port = edge_serial->serial->port[edge_serial->rxPort];
edge_port = usb_get_serial_port_data(port);
if (edge_port == NULL) {
dev_err(&edge_serial->serial->dev->dev,
"%s - edge_port == NULL for port %d\n",
__func__, edge_serial->rxPort);
return;
}
dev = &port->dev;
if (code == IOSP_EXT_STATUS) {
switch (byte2) {
case IOSP_EXT_STATUS_CHASE_RSP:
/* we want to do EXT status regardless of port
* open/closed */
dev_dbg(dev, "%s - Port %u EXT CHASE_RSP Data = %02x\n",
__func__, edge_serial->rxPort, byte3);
/* Currently, the only EXT_STATUS is Chase, so process
* here instead of one more call to one more subroutine
* If/when more EXT_STATUS, there'll be more work to do
* Also, we currently clear flag and close the port
* regardless of content of above's Byte3.
* We could choose to do something else when Byte3 says
* Timeout on Chase from Edgeport, like wait longer in
* block_until_chase_response, but for now we don't.
*/
edge_port->chaseResponsePending = false;
wake_up(&edge_port->wait_chase);
return;
case IOSP_EXT_STATUS_RX_CHECK_RSP:
dev_dbg(dev, "%s ========== Port %u CHECK_RSP Sequence = %02x =============\n",
__func__, edge_serial->rxPort, byte3);
/* Port->RxCheckRsp = true; */
return;
}
}
if (code == IOSP_STATUS_OPEN_RSP) {
edge_port->txCredits = GET_TX_BUFFER_SIZE(byte3);
edge_port->maxTxCredits = edge_port->txCredits;
dev_dbg(dev, "%s - Port %u Open Response Initial MSR = %02x TxBufferSize = %d\n",
__func__, edge_serial->rxPort, byte2, edge_port->txCredits);
handle_new_msr(edge_port, byte2);
/* send the current line settings to the port so we are
in sync with any further termios calls */
tty = tty_port_tty_get(&edge_port->port->port);
if (tty) {
change_port_settings(tty,
edge_port, &tty->termios);
tty_kref_put(tty);
}
/* we have completed the open */
edge_port->openPending = false;
edge_port->open = true;
wake_up(&edge_port->wait_open);
return;
}
/* If port is closed, silently discard all rcvd status. We can
* have cases where buffered status is received AFTER the close
* port command is sent to the Edgeport.
*/
if (!edge_port->open || edge_port->closePending)
return;
switch (code) {
/* Not currently sent by Edgeport */
case IOSP_STATUS_LSR:
dev_dbg(dev, "%s - Port %u LSR Status = %02x\n",
__func__, edge_serial->rxPort, byte2);
handle_new_lsr(edge_port, false, byte2, 0);
break;
case IOSP_STATUS_LSR_DATA:
dev_dbg(dev, "%s - Port %u LSR Status = %02x, Data = %02x\n",
__func__, edge_serial->rxPort, byte2, byte3);
/* byte2 is LSR Register */
/* byte3 is broken data byte */
handle_new_lsr(edge_port, true, byte2, byte3);
break;
/*
* case IOSP_EXT_4_STATUS:
* dev_dbg(dev, "%s - Port %u LSR Status = %02x Data = %02x\n",
* __func__, edge_serial->rxPort, byte2, byte3);
* break;
*/
case IOSP_STATUS_MSR:
dev_dbg(dev, "%s - Port %u MSR Status = %02x\n",
__func__, edge_serial->rxPort, byte2);
/*
* Process this new modem status and generate appropriate
* events, etc, based on the new status. This routine
* also saves the MSR in Port->ShadowMsr.
*/
handle_new_msr(edge_port, byte2);
break;
default:
dev_dbg(dev, "%s - Unrecognized IOSP status code %u\n", __func__, code);
break;
}
}
/*****************************************************************************
* edge_tty_recv
* this function passes data on to the tty flip buffer
*****************************************************************************/
static void edge_tty_recv(struct usb_serial_port *port, unsigned char *data,
int length)
{
int cnt;
cnt = tty_insert_flip_string(&port->port, data, length);
if (cnt < length) {
dev_err(&port->dev, "%s - dropping data, %d bytes lost\n",
__func__, length - cnt);
}
data += cnt;
length -= cnt;
tty_flip_buffer_push(&port->port);
}
/*****************************************************************************
* handle_new_msr
* this function handles any change to the msr register for a port.
*****************************************************************************/
static void handle_new_msr(struct edgeport_port *edge_port, __u8 newMsr)
{
struct async_icount *icount;
if (newMsr & (EDGEPORT_MSR_DELTA_CTS | EDGEPORT_MSR_DELTA_DSR |
EDGEPORT_MSR_DELTA_RI | EDGEPORT_MSR_DELTA_CD)) {
icount = &edge_port->port->icount;
/* update input line counters */
if (newMsr & EDGEPORT_MSR_DELTA_CTS)
icount->cts++;
if (newMsr & EDGEPORT_MSR_DELTA_DSR)
icount->dsr++;
if (newMsr & EDGEPORT_MSR_DELTA_CD)
icount->dcd++;
if (newMsr & EDGEPORT_MSR_DELTA_RI)
icount->rng++;
wake_up_interruptible(&edge_port->port->port.delta_msr_wait);
}
/* Save the new modem status */
edge_port->shadowMSR = newMsr & 0xf0;
}
/*****************************************************************************
* handle_new_lsr
* this function handles any change to the lsr register for a port.
*****************************************************************************/
static void handle_new_lsr(struct edgeport_port *edge_port, __u8 lsrData,
__u8 lsr, __u8 data)
{
__u8 newLsr = (__u8) (lsr & (__u8)
(LSR_OVER_ERR | LSR_PAR_ERR | LSR_FRM_ERR | LSR_BREAK));
struct async_icount *icount;
edge_port->shadowLSR = lsr;
if (newLsr & LSR_BREAK) {
/*
* Parity and Framing errors only count if they
* occur exclusive of a break being
* received.
*/
newLsr &= (__u8)(LSR_OVER_ERR | LSR_BREAK);
}
/* Place LSR data byte into Rx buffer */
if (lsrData)
edge_tty_recv(edge_port->port, &data, 1);
/* update input line counters */
icount = &edge_port->port->icount;
if (newLsr & LSR_BREAK)
icount->brk++;
if (newLsr & LSR_OVER_ERR)
icount->overrun++;
if (newLsr & LSR_PAR_ERR)
icount->parity++;
if (newLsr & LSR_FRM_ERR)
icount->frame++;
}
/****************************************************************************
* sram_write
* writes a number of bytes to the Edgeport device's sram starting at the
* given address.
* If successful returns the number of bytes written, otherwise it returns
* a negative error number of the problem.
****************************************************************************/
static int sram_write(struct usb_serial *serial, __u16 extAddr, __u16 addr,
__u16 length, const __u8 *data)
{
int result;
__u16 current_length;
unsigned char *transfer_buffer;
dev_dbg(&serial->dev->dev, "%s - %x, %x, %d\n", __func__, extAddr, addr, length);
transfer_buffer = kmalloc(64, GFP_KERNEL);
if (!transfer_buffer)
return -ENOMEM;
/* need to split these writes up into 64 byte chunks */
result = 0;
while (length > 0) {
if (length > 64)
current_length = 64;
else
current_length = length;
/* dev_dbg(&serial->dev->dev, "%s - writing %x, %x, %d\n", __func__, extAddr, addr, current_length); */
memcpy(transfer_buffer, data, current_length);
result = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0),
USB_REQUEST_ION_WRITE_RAM,
0x40, addr, extAddr, transfer_buffer,
current_length, 300);
if (result < 0)
break;
length -= current_length;
addr += current_length;
data += current_length;
}
kfree(transfer_buffer);
return result;
}
/****************************************************************************
* rom_write
* writes a number of bytes to the Edgeport device's ROM starting at the
* given address.
* If successful returns the number of bytes written, otherwise it returns
* a negative error number of the problem.
****************************************************************************/
static int rom_write(struct usb_serial *serial, __u16 extAddr, __u16 addr,
__u16 length, const __u8 *data)
{
int result;
__u16 current_length;
unsigned char *transfer_buffer;
transfer_buffer = kmalloc(64, GFP_KERNEL);
if (!transfer_buffer)
return -ENOMEM;
/* need to split these writes up into 64 byte chunks */
result = 0;
while (length > 0) {
if (length > 64)
current_length = 64;
else
current_length = length;
memcpy(transfer_buffer, data, current_length);
result = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0),
USB_REQUEST_ION_WRITE_ROM, 0x40,
addr, extAddr,
transfer_buffer, current_length, 300);
if (result < 0)
break;
length -= current_length;
addr += current_length;
data += current_length;
}
kfree(transfer_buffer);
return result;
}
/****************************************************************************
* rom_read
* reads a number of bytes from the Edgeport device starting at the given
* address.
* Returns zero on success or a negative error number.
****************************************************************************/
static int rom_read(struct usb_serial *serial, __u16 extAddr,
__u16 addr, __u16 length, __u8 *data)
{
int result;
__u16 current_length;
unsigned char *transfer_buffer;
transfer_buffer = kmalloc(64, GFP_KERNEL);
if (!transfer_buffer)
return -ENOMEM;
/* need to split these reads up into 64 byte chunks */
result = 0;
while (length > 0) {
if (length > 64)
current_length = 64;
else
current_length = length;
result = usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
USB_REQUEST_ION_READ_ROM,
0xC0, addr, extAddr, transfer_buffer,
current_length, 300);
if (result < current_length) {
if (result >= 0)
result = -EIO;
break;
}
memcpy(data, transfer_buffer, current_length);
length -= current_length;
addr += current_length;
data += current_length;
result = 0;
}
kfree(transfer_buffer);
return result;
}
/****************************************************************************
* send_iosp_ext_cmd
* Is used to send a IOSP message to the Edgeport device
****************************************************************************/
static int send_iosp_ext_cmd(struct edgeport_port *edge_port,
__u8 command, __u8 param)
{
unsigned char *buffer;
unsigned char *currentCommand;
int length = 0;
int status = 0;
buffer = kmalloc(10, GFP_ATOMIC);
if (!buffer)
return -ENOMEM;
currentCommand = buffer;
MAKE_CMD_EXT_CMD(¤tCommand, &length, edge_port->port->port_number,
command, param);
status = write_cmd_usb(edge_port, buffer, length);
if (status) {
/* something bad happened, let's free up the memory */
kfree(buffer);
}
return status;
}
/*****************************************************************************
* write_cmd_usb
* this function writes the given buffer out to the bulk write endpoint.
*****************************************************************************/
static int write_cmd_usb(struct edgeport_port *edge_port,
unsigned char *buffer, int length)
{
struct edgeport_serial *edge_serial =
usb_get_serial_data(edge_port->port->serial);
struct device *dev = &edge_port->port->dev;
int status = 0;
struct urb *urb;
usb_serial_debug_data(dev, __func__, length, buffer);
/* Allocate our next urb */
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb)
return -ENOMEM;
atomic_inc(&CmdUrbs);
dev_dbg(dev, "%s - ALLOCATE URB %p (outstanding %d)\n",
__func__, urb, atomic_read(&CmdUrbs));
usb_fill_bulk_urb(urb, edge_serial->serial->dev,
usb_sndbulkpipe(edge_serial->serial->dev,
edge_serial->bulk_out_endpoint),
buffer, length, edge_bulk_out_cmd_callback, edge_port);
edge_port->commandPending = true;
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
/* something went wrong */
dev_err(dev, "%s - usb_submit_urb(write command) failed, status = %d\n",
__func__, status);
usb_free_urb(urb);
atomic_dec(&CmdUrbs);
return status;
}
#if 0
wait_event(&edge_port->wait_command, !edge_port->commandPending);
if (edge_port->commandPending) {
/* command timed out */
dev_dbg(dev, "%s - command timed out\n", __func__);
status = -EINVAL;
}
#endif
return status;
}
/*****************************************************************************
* send_cmd_write_baud_rate
* this function sends the proper command to change the baud rate of the
* specified port.
*****************************************************************************/
static int send_cmd_write_baud_rate(struct edgeport_port *edge_port,
int baudRate)
{
struct edgeport_serial *edge_serial =
usb_get_serial_data(edge_port->port->serial);
struct device *dev = &edge_port->port->dev;
unsigned char *cmdBuffer;
unsigned char *currCmd;
int cmdLen = 0;
int divisor;
int status;
u32 number = edge_port->port->port_number;
if (edge_serial->is_epic &&
!edge_serial->epic_descriptor.Supports.IOSPSetBaudRate) {
dev_dbg(dev, "SendCmdWriteBaudRate - NOT Setting baud rate for port, baud = %d\n",
baudRate);
return 0;
}
dev_dbg(dev, "%s - baud = %d\n", __func__, baudRate);
status = calc_baud_rate_divisor(dev, baudRate, &divisor);
if (status) {
dev_err(dev, "%s - bad baud rate\n", __func__);
return status;
}
/* Alloc memory for the string of commands. */
cmdBuffer = kmalloc(0x100, GFP_ATOMIC);
if (!cmdBuffer)
return -ENOMEM;
currCmd = cmdBuffer;
/* Enable access to divisor latch */
MAKE_CMD_WRITE_REG(&currCmd, &cmdLen, number, LCR, LCR_DL_ENABLE);
/* Write the divisor itself */
MAKE_CMD_WRITE_REG(&currCmd, &cmdLen, number, DLL, LOW8(divisor));
MAKE_CMD_WRITE_REG(&currCmd, &cmdLen, number, DLM, HIGH8(divisor));
/* Restore original value to disable access to divisor latch */
MAKE_CMD_WRITE_REG(&currCmd, &cmdLen, number, LCR,
edge_port->shadowLCR);
status = write_cmd_usb(edge_port, cmdBuffer, cmdLen);
if (status) {
/* something bad happened, let's free up the memory */
kfree(cmdBuffer);
}
return status;
}
/*****************************************************************************
* calc_baud_rate_divisor
* this function calculates the proper baud rate divisor for the specified
* baud rate.
*****************************************************************************/
static int calc_baud_rate_divisor(struct device *dev, int baudrate, int *divisor)
{
int i;
__u16 custom;
for (i = 0; i < ARRAY_SIZE(divisor_table); i++) {
if (divisor_table[i].BaudRate == baudrate) {
*divisor = divisor_table[i].Divisor;
return 0;
}
}
/* We have tried all of the standard baud rates
* lets try to calculate the divisor for this baud rate
* Make sure the baud rate is reasonable */
if (baudrate > 50 && baudrate < 230400) {
/* get divisor */
custom = (__u16)((230400L + baudrate/2) / baudrate);
*divisor = custom;
dev_dbg(dev, "%s - Baud %d = %d\n", __func__, baudrate, custom);
return 0;
}
return -1;
}
/*****************************************************************************
* send_cmd_write_uart_register
* this function builds up a uart register message and sends to the device.
*****************************************************************************/
static int send_cmd_write_uart_register(struct edgeport_port *edge_port,
__u8 regNum, __u8 regValue)
{
struct edgeport_serial *edge_serial =
usb_get_serial_data(edge_port->port->serial);
struct device *dev = &edge_port->port->dev;
unsigned char *cmdBuffer;
unsigned char *currCmd;
unsigned long cmdLen = 0;
int status;
dev_dbg(dev, "%s - write to %s register 0x%02x\n",
(regNum == MCR) ? "MCR" : "LCR", __func__, regValue);
if (edge_serial->is_epic &&
!edge_serial->epic_descriptor.Supports.IOSPWriteMCR &&
regNum == MCR) {
dev_dbg(dev, "SendCmdWriteUartReg - Not writing to MCR Register\n");
return 0;
}
if (edge_serial->is_epic &&
!edge_serial->epic_descriptor.Supports.IOSPWriteLCR &&
regNum == LCR) {
dev_dbg(dev, "SendCmdWriteUartReg - Not writing to LCR Register\n");
return 0;
}
/* Alloc memory for the string of commands. */
cmdBuffer = kmalloc(0x10, GFP_ATOMIC);
if (cmdBuffer == NULL)
return -ENOMEM;
currCmd = cmdBuffer;
/* Build a cmd in the buffer to write the given register */
MAKE_CMD_WRITE_REG(&currCmd, &cmdLen, edge_port->port->port_number,
regNum, regValue);
status = write_cmd_usb(edge_port, cmdBuffer, cmdLen);
if (status) {
/* something bad happened, let's free up the memory */
kfree(cmdBuffer);
}
return status;
}
/*****************************************************************************
* change_port_settings
* This routine is called to set the UART on the device to match the
* specified new settings.
*****************************************************************************/
static void change_port_settings(struct tty_struct *tty,
struct edgeport_port *edge_port, const struct ktermios *old_termios)
{
struct device *dev = &edge_port->port->dev;
struct edgeport_serial *edge_serial =
usb_get_serial_data(edge_port->port->serial);
int baud;
unsigned cflag;
__u8 mask = 0xff;
__u8 lData;
__u8 lParity;
__u8 lStop;
__u8 rxFlow;
__u8 txFlow;
int status;
if (!edge_port->open &&
!edge_port->openPending) {
dev_dbg(dev, "%s - port not opened\n", __func__);
return;
}
cflag = tty->termios.c_cflag;
switch (cflag & CSIZE) {
case CS5:
lData = LCR_BITS_5; mask = 0x1f;
dev_dbg(dev, "%s - data bits = 5\n", __func__);
break;
case CS6:
lData = LCR_BITS_6; mask = 0x3f;
dev_dbg(dev, "%s - data bits = 6\n", __func__);
break;
case CS7:
lData = LCR_BITS_7; mask = 0x7f;
dev_dbg(dev, "%s - data bits = 7\n", __func__);
break;
default:
case CS8:
lData = LCR_BITS_8;
dev_dbg(dev, "%s - data bits = 8\n", __func__);
break;
}
lParity = LCR_PAR_NONE;
if (cflag & PARENB) {
if (cflag & CMSPAR) {
if (cflag & PARODD) {
lParity = LCR_PAR_MARK;
dev_dbg(dev, "%s - parity = mark\n", __func__);
} else {
lParity = LCR_PAR_SPACE;
dev_dbg(dev, "%s - parity = space\n", __func__);
}
} else if (cflag & PARODD) {
lParity = LCR_PAR_ODD;
dev_dbg(dev, "%s - parity = odd\n", __func__);
} else {
lParity = LCR_PAR_EVEN;
dev_dbg(dev, "%s - parity = even\n", __func__);
}
} else {
dev_dbg(dev, "%s - parity = none\n", __func__);
}
if (cflag & CSTOPB) {
lStop = LCR_STOP_2;
dev_dbg(dev, "%s - stop bits = 2\n", __func__);
} else {
lStop = LCR_STOP_1;
dev_dbg(dev, "%s - stop bits = 1\n", __func__);
}
/* figure out the flow control settings */
rxFlow = txFlow = 0x00;
if (cflag & CRTSCTS) {
rxFlow |= IOSP_RX_FLOW_RTS;
txFlow |= IOSP_TX_FLOW_CTS;
dev_dbg(dev, "%s - RTS/CTS is enabled\n", __func__);
} else {
dev_dbg(dev, "%s - RTS/CTS is disabled\n", __func__);
}
/* if we are implementing XON/XOFF, set the start and stop character
in the device */
if (I_IXOFF(tty) || I_IXON(tty)) {
unsigned char stop_char = STOP_CHAR(tty);
unsigned char start_char = START_CHAR(tty);
if (!edge_serial->is_epic ||
edge_serial->epic_descriptor.Supports.IOSPSetXChar) {
send_iosp_ext_cmd(edge_port,
IOSP_CMD_SET_XON_CHAR, start_char);
send_iosp_ext_cmd(edge_port,
IOSP_CMD_SET_XOFF_CHAR, stop_char);
}
/* if we are implementing INBOUND XON/XOFF */
if (I_IXOFF(tty)) {
rxFlow |= IOSP_RX_FLOW_XON_XOFF;
dev_dbg(dev, "%s - INBOUND XON/XOFF is enabled, XON = %2x, XOFF = %2x\n",
__func__, start_char, stop_char);
} else {
dev_dbg(dev, "%s - INBOUND XON/XOFF is disabled\n", __func__);
}
/* if we are implementing OUTBOUND XON/XOFF */
if (I_IXON(tty)) {
txFlow |= IOSP_TX_FLOW_XON_XOFF;
dev_dbg(dev, "%s - OUTBOUND XON/XOFF is enabled, XON = %2x, XOFF = %2x\n",
__func__, start_char, stop_char);
} else {
dev_dbg(dev, "%s - OUTBOUND XON/XOFF is disabled\n", __func__);
}
}
/* Set flow control to the configured value */
if (!edge_serial->is_epic ||
edge_serial->epic_descriptor.Supports.IOSPSetRxFlow)
send_iosp_ext_cmd(edge_port, IOSP_CMD_SET_RX_FLOW, rxFlow);
if (!edge_serial->is_epic ||
edge_serial->epic_descriptor.Supports.IOSPSetTxFlow)
send_iosp_ext_cmd(edge_port, IOSP_CMD_SET_TX_FLOW, txFlow);
edge_port->shadowLCR &= ~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK);
edge_port->shadowLCR |= (lData | lParity | lStop);
edge_port->validDataMask = mask;
/* Send the updated LCR value to the EdgePort */
status = send_cmd_write_uart_register(edge_port, LCR,
edge_port->shadowLCR);
if (status != 0)
return;
/* set up the MCR register and send it to the EdgePort */
edge_port->shadowMCR = MCR_MASTER_IE;
if (cflag & CBAUD)
edge_port->shadowMCR |= (MCR_DTR | MCR_RTS);
status = send_cmd_write_uart_register(edge_port, MCR,
edge_port->shadowMCR);
if (status != 0)
return;
/* Determine divisor based on baud rate */
baud = tty_get_baud_rate(tty);
if (!baud) {
/* pick a default, any default... */
baud = 9600;
}
dev_dbg(dev, "%s - baud rate = %d\n", __func__, baud);
status = send_cmd_write_baud_rate(edge_port, baud);
if (status == -1) {
/* Speed change was not possible - put back the old speed */
baud = tty_termios_baud_rate(old_termios);
tty_encode_baud_rate(tty, baud, baud);
}
}
/****************************************************************************
* unicode_to_ascii
* Turns a string from Unicode into ASCII.
* Doesn't do a good job with any characters that are outside the normal
* ASCII range, but it's only for debugging...
* NOTE: expects the unicode in LE format
****************************************************************************/
static void unicode_to_ascii(char *string, int buflen,
__le16 *unicode, int unicode_size)
{
int i;
if (buflen <= 0) /* never happens, but... */
return;
--buflen; /* space for nul */
for (i = 0; i < unicode_size; i++) {
if (i >= buflen)
break;
string[i] = (char)(le16_to_cpu(unicode[i]));
}
string[i] = 0x00;
}
/****************************************************************************
* get_manufacturing_desc
* reads in the manufacturing descriptor and stores it into the serial
* structure.
****************************************************************************/
static void get_manufacturing_desc(struct edgeport_serial *edge_serial)
{
struct device *dev = &edge_serial->serial->dev->dev;
int response;
dev_dbg(dev, "getting manufacturer descriptor\n");
response = rom_read(edge_serial->serial,
(EDGE_MANUF_DESC_ADDR & 0xffff0000) >> 16,
(__u16)(EDGE_MANUF_DESC_ADDR & 0x0000ffff),
EDGE_MANUF_DESC_LEN,
(__u8 *)(&edge_serial->manuf_descriptor));
if (response < 0) {
dev_err(dev, "error in getting manufacturer descriptor: %d\n",
response);
} else {
char string[30];
dev_dbg(dev, "**Manufacturer Descriptor\n");
dev_dbg(dev, " RomSize: %dK\n",
edge_serial->manuf_descriptor.RomSize);
dev_dbg(dev, " RamSize: %dK\n",
edge_serial->manuf_descriptor.RamSize);
dev_dbg(dev, " CpuRev: %d\n",
edge_serial->manuf_descriptor.CpuRev);
dev_dbg(dev, " BoardRev: %d\n",
edge_serial->manuf_descriptor.BoardRev);
dev_dbg(dev, " NumPorts: %d\n",
edge_serial->manuf_descriptor.NumPorts);
dev_dbg(dev, " DescDate: %d/%d/%d\n",
edge_serial->manuf_descriptor.DescDate[0],
edge_serial->manuf_descriptor.DescDate[1],
edge_serial->manuf_descriptor.DescDate[2]+1900);
unicode_to_ascii(string, sizeof(string),
edge_serial->manuf_descriptor.SerialNumber,
edge_serial->manuf_descriptor.SerNumLength/2);
dev_dbg(dev, " SerialNumber: %s\n", string);
unicode_to_ascii(string, sizeof(string),
edge_serial->manuf_descriptor.AssemblyNumber,
edge_serial->manuf_descriptor.AssemblyNumLength/2);
dev_dbg(dev, " AssemblyNumber: %s\n", string);
unicode_to_ascii(string, sizeof(string),
edge_serial->manuf_descriptor.OemAssyNumber,
edge_serial->manuf_descriptor.OemAssyNumLength/2);
dev_dbg(dev, " OemAssyNumber: %s\n", string);
dev_dbg(dev, " UartType: %d\n",
edge_serial->manuf_descriptor.UartType);
dev_dbg(dev, " IonPid: %d\n",
edge_serial->manuf_descriptor.IonPid);
dev_dbg(dev, " IonConfig: %d\n",
edge_serial->manuf_descriptor.IonConfig);
}
}
/****************************************************************************
* get_boot_desc
* reads in the bootloader descriptor and stores it into the serial
* structure.
****************************************************************************/
static void get_boot_desc(struct edgeport_serial *edge_serial)
{
struct device *dev = &edge_serial->serial->dev->dev;
int response;
dev_dbg(dev, "getting boot descriptor\n");
response = rom_read(edge_serial->serial,
(EDGE_BOOT_DESC_ADDR & 0xffff0000) >> 16,
(__u16)(EDGE_BOOT_DESC_ADDR & 0x0000ffff),
EDGE_BOOT_DESC_LEN,
(__u8 *)(&edge_serial->boot_descriptor));
if (response < 0) {
dev_err(dev, "error in getting boot descriptor: %d\n",
response);
} else {
dev_dbg(dev, "**Boot Descriptor:\n");
dev_dbg(dev, " BootCodeLength: %d\n",
le16_to_cpu(edge_serial->boot_descriptor.BootCodeLength));
dev_dbg(dev, " MajorVersion: %d\n",
edge_serial->boot_descriptor.MajorVersion);
dev_dbg(dev, " MinorVersion: %d\n",
edge_serial->boot_descriptor.MinorVersion);
dev_dbg(dev, " BuildNumber: %d\n",
le16_to_cpu(edge_serial->boot_descriptor.BuildNumber));
dev_dbg(dev, " Capabilities: 0x%x\n",
le16_to_cpu(edge_serial->boot_descriptor.Capabilities));
dev_dbg(dev, " UConfig0: %d\n",
edge_serial->boot_descriptor.UConfig0);
dev_dbg(dev, " UConfig1: %d\n",
edge_serial->boot_descriptor.UConfig1);
}
}
/****************************************************************************
* load_application_firmware
* This is called to load the application firmware to the device
****************************************************************************/
static void load_application_firmware(struct edgeport_serial *edge_serial)
{
struct device *dev = &edge_serial->serial->dev->dev;
const struct ihex_binrec *rec;
const struct firmware *fw;
const char *fw_name;
const char *fw_info;
int response;
__u32 Operaddr;
__u16 build;
switch (edge_serial->product_info.iDownloadFile) {
case EDGE_DOWNLOAD_FILE_I930:
fw_info = "downloading firmware version (930)";
fw_name = "edgeport/down.fw";
break;
case EDGE_DOWNLOAD_FILE_80251:
fw_info = "downloading firmware version (80251)";
fw_name = "edgeport/down2.fw";
break;
case EDGE_DOWNLOAD_FILE_NONE:
dev_dbg(dev, "No download file specified, skipping download\n");
return;
default:
return;
}
response = request_ihex_firmware(&fw, fw_name,
&edge_serial->serial->dev->dev);
if (response) {
dev_err(dev, "Failed to load image \"%s\" err %d\n",
fw_name, response);
return;
}
rec = (const struct ihex_binrec *)fw->data;
build = (rec->data[2] << 8) | rec->data[3];
dev_dbg(dev, "%s %d.%d.%d\n", fw_info, rec->data[0], rec->data[1], build);
edge_serial->product_info.FirmwareMajorVersion = rec->data[0];
edge_serial->product_info.FirmwareMinorVersion = rec->data[1];
edge_serial->product_info.FirmwareBuildNumber = cpu_to_le16(build);
for (rec = ihex_next_binrec(rec); rec;
rec = ihex_next_binrec(rec)) {
Operaddr = be32_to_cpu(rec->addr);
response = sram_write(edge_serial->serial,
Operaddr >> 16,
Operaddr & 0xFFFF,
be16_to_cpu(rec->len),
&rec->data[0]);
if (response < 0) {
dev_err(&edge_serial->serial->dev->dev,
"sram_write failed (%x, %x, %d)\n",
Operaddr >> 16, Operaddr & 0xFFFF,
be16_to_cpu(rec->len));
break;
}
}
dev_dbg(dev, "sending exec_dl_code\n");
response = usb_control_msg (edge_serial->serial->dev,
usb_sndctrlpipe(edge_serial->serial->dev, 0),
USB_REQUEST_ION_EXEC_DL_CODE,
0x40, 0x4000, 0x0001, NULL, 0, 3000);
release_firmware(fw);
}
/****************************************************************************
* edge_startup
****************************************************************************/
static int edge_startup(struct usb_serial *serial)
{
struct edgeport_serial *edge_serial;
struct usb_device *dev;
struct device *ddev = &serial->dev->dev;
int i;
int response;
bool interrupt_in_found;
bool bulk_in_found;
bool bulk_out_found;
static const __u32 descriptor[3] = { EDGE_COMPATIBILITY_MASK0,
EDGE_COMPATIBILITY_MASK1,
EDGE_COMPATIBILITY_MASK2 };
dev = serial->dev;
/* create our private serial structure */
edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL);
if (!edge_serial)
return -ENOMEM;
spin_lock_init(&edge_serial->es_lock);
edge_serial->serial = serial;
usb_set_serial_data(serial, edge_serial);
/* get the name for the device from the device */
i = usb_string(dev, dev->descriptor.iManufacturer,
&edge_serial->name[0], MAX_NAME_LEN+1);
if (i < 0)
i = 0;
edge_serial->name[i++] = ' ';
usb_string(dev, dev->descriptor.iProduct,
&edge_serial->name[i], MAX_NAME_LEN+2 - i);
dev_info(&serial->dev->dev, "%s detected\n", edge_serial->name);
/* Read the epic descriptor */
if (get_epic_descriptor(edge_serial) < 0) {
/* memcpy descriptor to Supports structures */
memcpy(&edge_serial->epic_descriptor.Supports, descriptor,
sizeof(struct edge_compatibility_bits));
/* get the manufacturing descriptor for this device */
get_manufacturing_desc(edge_serial);
/* get the boot descriptor */
get_boot_desc(edge_serial);
get_product_info(edge_serial);
}
/* set the number of ports from the manufacturing description */
/* serial->num_ports = serial->product_info.NumPorts; */
if ((!edge_serial->is_epic) &&
(edge_serial->product_info.NumPorts != serial->num_ports)) {
dev_warn(ddev,
"Device Reported %d serial ports vs. core thinking we have %d ports, email [email protected] this information.\n",
edge_serial->product_info.NumPorts,
serial->num_ports);
}
dev_dbg(ddev, "%s - time 1 %ld\n", __func__, jiffies);
/* If not an EPiC device */
if (!edge_serial->is_epic) {
/* now load the application firmware into this device */
load_application_firmware(edge_serial);
dev_dbg(ddev, "%s - time 2 %ld\n", __func__, jiffies);
/* Check current Edgeport EEPROM and update if necessary */
update_edgeport_E2PROM(edge_serial);
dev_dbg(ddev, "%s - time 3 %ld\n", __func__, jiffies);
/* set the configuration to use #1 */
/* dev_dbg(ddev, "set_configuration 1\n"); */
/* usb_set_configuration (dev, 1); */
}
dev_dbg(ddev, " FirmwareMajorVersion %d.%d.%d\n",
edge_serial->product_info.FirmwareMajorVersion,
edge_serial->product_info.FirmwareMinorVersion,
le16_to_cpu(edge_serial->product_info.FirmwareBuildNumber));
/* we set up the pointers to the endpoints in the edge_open function,
* as the structures aren't created yet. */
response = 0;
if (edge_serial->is_epic) {
struct usb_host_interface *alt;
alt = serial->interface->cur_altsetting;
/* EPIC thing, set up our interrupt polling now and our read
* urb, so that the device knows it really is connected. */
interrupt_in_found = bulk_in_found = bulk_out_found = false;
for (i = 0; i < alt->desc.bNumEndpoints; ++i) {
struct usb_endpoint_descriptor *endpoint;
int buffer_size;
endpoint = &alt->endpoint[i].desc;
buffer_size = usb_endpoint_maxp(endpoint);
if (!interrupt_in_found &&
(usb_endpoint_is_int_in(endpoint))) {
/* we found a interrupt in endpoint */
dev_dbg(ddev, "found interrupt in\n");
/* not set up yet, so do it now */
edge_serial->interrupt_read_urb =
usb_alloc_urb(0, GFP_KERNEL);
if (!edge_serial->interrupt_read_urb) {
response = -ENOMEM;
break;
}
edge_serial->interrupt_in_buffer =
kmalloc(buffer_size, GFP_KERNEL);
if (!edge_serial->interrupt_in_buffer) {
response = -ENOMEM;
break;
}
edge_serial->interrupt_in_endpoint =
endpoint->bEndpointAddress;
/* set up our interrupt urb */
usb_fill_int_urb(
edge_serial->interrupt_read_urb,
dev,
usb_rcvintpipe(dev,
endpoint->bEndpointAddress),
edge_serial->interrupt_in_buffer,
buffer_size,
edge_interrupt_callback,
edge_serial,
endpoint->bInterval);
interrupt_in_found = true;
}
if (!bulk_in_found &&
(usb_endpoint_is_bulk_in(endpoint))) {
/* we found a bulk in endpoint */
dev_dbg(ddev, "found bulk in\n");
/* not set up yet, so do it now */
edge_serial->read_urb =
usb_alloc_urb(0, GFP_KERNEL);
if (!edge_serial->read_urb) {
response = -ENOMEM;
break;
}
edge_serial->bulk_in_buffer =
kmalloc(buffer_size, GFP_KERNEL);
if (!edge_serial->bulk_in_buffer) {
response = -ENOMEM;
break;
}
edge_serial->bulk_in_endpoint =
endpoint->bEndpointAddress;
/* set up our bulk in urb */
usb_fill_bulk_urb(edge_serial->read_urb, dev,
usb_rcvbulkpipe(dev,
endpoint->bEndpointAddress),
edge_serial->bulk_in_buffer,
usb_endpoint_maxp(endpoint),
edge_bulk_in_callback,
edge_serial);
bulk_in_found = true;
}
if (!bulk_out_found &&
(usb_endpoint_is_bulk_out(endpoint))) {
/* we found a bulk out endpoint */
dev_dbg(ddev, "found bulk out\n");
edge_serial->bulk_out_endpoint =
endpoint->bEndpointAddress;
bulk_out_found = true;
}
}
if (response || !interrupt_in_found || !bulk_in_found ||
!bulk_out_found) {
if (!response) {
dev_err(ddev, "expected endpoints not found\n");
response = -ENODEV;
}
goto error;
}
/* start interrupt read for this edgeport this interrupt will
* continue as long as the edgeport is connected */
response = usb_submit_urb(edge_serial->interrupt_read_urb,
GFP_KERNEL);
if (response) {
dev_err(ddev, "%s - Error %d submitting control urb\n",
__func__, response);
goto error;
}
}
return response;
error:
usb_free_urb(edge_serial->interrupt_read_urb);
kfree(edge_serial->interrupt_in_buffer);
usb_free_urb(edge_serial->read_urb);
kfree(edge_serial->bulk_in_buffer);
kfree(edge_serial);
return response;
}
/****************************************************************************
* edge_disconnect
* This function is called whenever the device is removed from the usb bus.
****************************************************************************/
static void edge_disconnect(struct usb_serial *serial)
{
struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
if (edge_serial->is_epic) {
usb_kill_urb(edge_serial->interrupt_read_urb);
usb_kill_urb(edge_serial->read_urb);
}
}
/****************************************************************************
* edge_release
* This function is called when the device structure is deallocated.
****************************************************************************/
static void edge_release(struct usb_serial *serial)
{
struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
if (edge_serial->is_epic) {
usb_kill_urb(edge_serial->interrupt_read_urb);
usb_free_urb(edge_serial->interrupt_read_urb);
kfree(edge_serial->interrupt_in_buffer);
usb_kill_urb(edge_serial->read_urb);
usb_free_urb(edge_serial->read_urb);
kfree(edge_serial->bulk_in_buffer);
}
kfree(edge_serial);
}
static int edge_port_probe(struct usb_serial_port *port)
{
struct edgeport_port *edge_port;
edge_port = kzalloc(sizeof(*edge_port), GFP_KERNEL);
if (!edge_port)
return -ENOMEM;
spin_lock_init(&edge_port->ep_lock);
edge_port->port = port;
usb_set_serial_port_data(port, edge_port);
return 0;
}
static void edge_port_remove(struct usb_serial_port *port)
{
struct edgeport_port *edge_port;
edge_port = usb_get_serial_port_data(port);
kfree(edge_port);
}
static struct usb_serial_driver edgeport_2port_device = {
.driver = {
.owner = THIS_MODULE,
.name = "edgeport_2",
},
.description = "Edgeport 2 port adapter",
.id_table = edgeport_2port_id_table,
.num_ports = 2,
.num_bulk_in = 1,
.num_bulk_out = 1,
.num_interrupt_in = 1,
.open = edge_open,
.close = edge_close,
.throttle = edge_throttle,
.unthrottle = edge_unthrottle,
.attach = edge_startup,
.disconnect = edge_disconnect,
.release = edge_release,
.port_probe = edge_port_probe,
.port_remove = edge_port_remove,
.ioctl = edge_ioctl,
.set_termios = edge_set_termios,
.tiocmget = edge_tiocmget,
.tiocmset = edge_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
.write = edge_write,
.write_room = edge_write_room,
.chars_in_buffer = edge_chars_in_buffer,
.break_ctl = edge_break,
.read_int_callback = edge_interrupt_callback,
.read_bulk_callback = edge_bulk_in_callback,
.write_bulk_callback = edge_bulk_out_data_callback,
};
static struct usb_serial_driver edgeport_4port_device = {
.driver = {
.owner = THIS_MODULE,
.name = "edgeport_4",
},
.description = "Edgeport 4 port adapter",
.id_table = edgeport_4port_id_table,
.num_ports = 4,
.num_bulk_in = 1,
.num_bulk_out = 1,
.num_interrupt_in = 1,
.open = edge_open,
.close = edge_close,
.throttle = edge_throttle,
.unthrottle = edge_unthrottle,
.attach = edge_startup,
.disconnect = edge_disconnect,
.release = edge_release,
.port_probe = edge_port_probe,
.port_remove = edge_port_remove,
.ioctl = edge_ioctl,
.set_termios = edge_set_termios,
.tiocmget = edge_tiocmget,
.tiocmset = edge_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
.write = edge_write,
.write_room = edge_write_room,
.chars_in_buffer = edge_chars_in_buffer,
.break_ctl = edge_break,
.read_int_callback = edge_interrupt_callback,
.read_bulk_callback = edge_bulk_in_callback,
.write_bulk_callback = edge_bulk_out_data_callback,
};
static struct usb_serial_driver edgeport_8port_device = {
.driver = {
.owner = THIS_MODULE,
.name = "edgeport_8",
},
.description = "Edgeport 8 port adapter",
.id_table = edgeport_8port_id_table,
.num_ports = 8,
.num_bulk_in = 1,
.num_bulk_out = 1,
.num_interrupt_in = 1,
.open = edge_open,
.close = edge_close,
.throttle = edge_throttle,
.unthrottle = edge_unthrottle,
.attach = edge_startup,
.disconnect = edge_disconnect,
.release = edge_release,
.port_probe = edge_port_probe,
.port_remove = edge_port_remove,
.ioctl = edge_ioctl,
.set_termios = edge_set_termios,
.tiocmget = edge_tiocmget,
.tiocmset = edge_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
.write = edge_write,
.write_room = edge_write_room,
.chars_in_buffer = edge_chars_in_buffer,
.break_ctl = edge_break,
.read_int_callback = edge_interrupt_callback,
.read_bulk_callback = edge_bulk_in_callback,
.write_bulk_callback = edge_bulk_out_data_callback,
};
static struct usb_serial_driver epic_device = {
.driver = {
.owner = THIS_MODULE,
.name = "epic",
},
.description = "EPiC device",
.id_table = Epic_port_id_table,
.num_ports = 1,
.num_bulk_in = 1,
.num_bulk_out = 1,
.num_interrupt_in = 1,
.open = edge_open,
.close = edge_close,
.throttle = edge_throttle,
.unthrottle = edge_unthrottle,
.attach = edge_startup,
.disconnect = edge_disconnect,
.release = edge_release,
.port_probe = edge_port_probe,
.port_remove = edge_port_remove,
.ioctl = edge_ioctl,
.set_termios = edge_set_termios,
.tiocmget = edge_tiocmget,
.tiocmset = edge_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
.write = edge_write,
.write_room = edge_write_room,
.chars_in_buffer = edge_chars_in_buffer,
.break_ctl = edge_break,
.read_int_callback = edge_interrupt_callback,
.read_bulk_callback = edge_bulk_in_callback,
.write_bulk_callback = edge_bulk_out_data_callback,
};
static struct usb_serial_driver * const serial_drivers[] = {
&edgeport_2port_device, &edgeport_4port_device,
&edgeport_8port_device, &epic_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table_combined);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("edgeport/boot.fw");
MODULE_FIRMWARE("edgeport/boot2.fw");
MODULE_FIRMWARE("edgeport/down.fw");
MODULE_FIRMWARE("edgeport/down2.fw");
| linux-master | drivers/usb/serial/io_edgeport.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB HandSpring Visor, Palm m50x, and Sony Clie driver
* (supports all of the Palm OS USB devices)
*
* Copyright (C) 1999 - 2004
* Greg Kroah-Hartman ([email protected])
*
* See Documentation/usb/usb-serial.rst for more information on using this
* driver
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/usb/cdc.h>
#include "visor.h"
/*
* Version Information
*/
#define DRIVER_AUTHOR "Greg Kroah-Hartman <[email protected]>"
#define DRIVER_DESC "USB HandSpring Visor / Palm OS driver"
/* function prototypes for a handspring visor */
static int visor_open(struct tty_struct *tty, struct usb_serial_port *port);
static void visor_close(struct usb_serial_port *port);
static int visor_probe(struct usb_serial *serial,
const struct usb_device_id *id);
static int visor_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds);
static int clie_5_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds);
static void visor_read_int_callback(struct urb *urb);
static int clie_3_5_startup(struct usb_serial *serial);
static int palm_os_3_probe(struct usb_serial *serial,
const struct usb_device_id *id);
static int palm_os_4_probe(struct usb_serial *serial,
const struct usb_device_id *id);
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_VISOR_ID),
.driver_info = (kernel_ulong_t)&palm_os_3_probe },
{ USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_TREO_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_TREO600_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(GSPDA_VENDOR_ID, GSPDA_XPLORE_M68_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M500_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M505_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M515_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_I705_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M100_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M125_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M130_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_T_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_TREO_650),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_Z_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_0_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_S360_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_1_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_NX60_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_NZ90V_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_TJ25_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(ACER_VENDOR_ID, ACER_S10_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE_INTERFACE_CLASS(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID, 0xff),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SPH_I500_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(TAPWAVE_VENDOR_ID, TAPWAVE_ZODIAC_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(GARMIN_VENDOR_ID, GARMIN_IQUE_3600_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(ACEECA_VENDOR_ID, ACEECA_MEZ1000_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_7135_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ USB_DEVICE(FOSSIL_VENDOR_ID, FOSSIL_ABACUS_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ } /* Terminating entry */
};
static const struct usb_device_id clie_id_5_table[] = {
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_UX50_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ } /* Terminating entry */
};
static const struct usb_device_id clie_id_3_5_table[] = {
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_3_5_ID) },
{ } /* Terminating entry */
};
static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_VISOR_ID) },
{ USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_TREO_ID) },
{ USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_TREO600_ID) },
{ USB_DEVICE(GSPDA_VENDOR_ID, GSPDA_XPLORE_M68_ID) },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M500_ID) },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M505_ID) },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M515_ID) },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_I705_ID) },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M100_ID) },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M125_ID) },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_M130_ID) },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_T_ID) },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_TREO_650) },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_Z_ID) },
{ USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_3_5_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_0_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_S360_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_1_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_NX60_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_NZ90V_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_UX50_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_TJ25_ID) },
{ USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID) },
{ USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SPH_I500_ID) },
{ USB_DEVICE(TAPWAVE_VENDOR_ID, TAPWAVE_ZODIAC_ID) },
{ USB_DEVICE(GARMIN_VENDOR_ID, GARMIN_IQUE_3600_ID) },
{ USB_DEVICE(ACEECA_VENDOR_ID, ACEECA_MEZ1000_ID) },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_7135_ID) },
{ USB_DEVICE(FOSSIL_VENDOR_ID, FOSSIL_ABACUS_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table_combined);
/* All of the device info needed for the Handspring Visor,
and Palm 4.0 devices */
static struct usb_serial_driver handspring_device = {
.driver = {
.owner = THIS_MODULE,
.name = "visor",
},
.description = "Handspring Visor / Palm OS",
.id_table = id_table,
.num_ports = 2,
.bulk_out_size = 256,
.open = visor_open,
.close = visor_close,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
.probe = visor_probe,
.calc_num_ports = visor_calc_num_ports,
.read_int_callback = visor_read_int_callback,
};
/* All of the device info needed for the Clie UX50, TH55 Palm 5.0 devices */
static struct usb_serial_driver clie_5_device = {
.driver = {
.owner = THIS_MODULE,
.name = "clie_5",
},
.description = "Sony Clie 5.0",
.id_table = clie_id_5_table,
.num_ports = 2,
.num_bulk_out = 2,
.bulk_out_size = 256,
.open = visor_open,
.close = visor_close,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
.probe = visor_probe,
.calc_num_ports = clie_5_calc_num_ports,
.read_int_callback = visor_read_int_callback,
};
/* device info for the Sony Clie OS version 3.5 */
static struct usb_serial_driver clie_3_5_device = {
.driver = {
.owner = THIS_MODULE,
.name = "clie_3.5",
},
.description = "Sony Clie 3.5",
.id_table = clie_id_3_5_table,
.num_ports = 1,
.bulk_out_size = 256,
.open = visor_open,
.close = visor_close,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
.attach = clie_3_5_startup,
};
static struct usb_serial_driver * const serial_drivers[] = {
&handspring_device, &clie_5_device, &clie_3_5_device, NULL
};
/******************************************************************************
* Handspring Visor specific driver functions
******************************************************************************/
static int visor_open(struct tty_struct *tty, struct usb_serial_port *port)
{
int result = 0;
if (!port->read_urb) {
/* this is needed for some brain dead Sony devices */
dev_err(&port->dev, "Device lied about number of ports, please use a lower one.\n");
return -ENODEV;
}
/* Start reading from the device */
result = usb_serial_generic_open(tty, port);
if (result)
goto exit;
if (port->interrupt_in_urb) {
dev_dbg(&port->dev, "adding interrupt input for treo\n");
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result)
dev_err(&port->dev,
"%s - failed submitting interrupt urb, error %d\n",
__func__, result);
}
exit:
return result;
}
static void visor_close(struct usb_serial_port *port)
{
unsigned char *transfer_buffer;
usb_serial_generic_close(port);
usb_kill_urb(port->interrupt_in_urb);
transfer_buffer = kmalloc(0x12, GFP_KERNEL);
if (!transfer_buffer)
return;
usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
VISOR_CLOSE_NOTIFICATION, 0xc2,
0x0000, 0x0000,
transfer_buffer, 0x12, 300);
kfree(transfer_buffer);
}
static void visor_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
int status = urb->status;
int result;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&port->dev, "%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_dbg(&port->dev, "%s - nonzero urb status received: %d\n",
__func__, status);
goto exit;
}
/*
* This information is still unknown what it can be used for.
* If anyone has an idea, please let the author know...
*
* Rumor has it this endpoint is used to notify when data
* is ready to be read from the bulk ones.
*/
usb_serial_debug_data(&port->dev, __func__, urb->actual_length,
urb->transfer_buffer);
exit:
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
dev_err(&urb->dev->dev,
"%s - Error %d submitting interrupt urb\n",
__func__, result);
}
static int palm_os_3_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
struct device *dev = &serial->dev->dev;
struct visor_connection_info *connection_info;
unsigned char *transfer_buffer;
char *string;
int retval = 0;
int i;
int num_ports = 0;
transfer_buffer = kmalloc(sizeof(*connection_info), GFP_KERNEL);
if (!transfer_buffer)
return -ENOMEM;
/* send a get connection info request */
retval = usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
VISOR_GET_CONNECTION_INFORMATION,
0xc2, 0x0000, 0x0000, transfer_buffer,
sizeof(*connection_info), 300);
if (retval < 0) {
dev_err(dev, "%s - error %d getting connection information\n",
__func__, retval);
goto exit;
}
if (retval != sizeof(*connection_info)) {
dev_err(dev, "Invalid connection information received from device\n");
retval = -ENODEV;
goto exit;
}
connection_info = (struct visor_connection_info *)transfer_buffer;
num_ports = le16_to_cpu(connection_info->num_ports);
/* Handle devices that report invalid stuff here. */
if (num_ports == 0 || num_ports > 2) {
dev_warn(dev, "%s: No valid connect info available\n",
serial->type->description);
num_ports = 2;
}
for (i = 0; i < num_ports; ++i) {
switch (connection_info->connections[i].port_function_id) {
case VISOR_FUNCTION_GENERIC:
string = "Generic";
break;
case VISOR_FUNCTION_DEBUGGER:
string = "Debugger";
break;
case VISOR_FUNCTION_HOTSYNC:
string = "HotSync";
break;
case VISOR_FUNCTION_CONSOLE:
string = "Console";
break;
case VISOR_FUNCTION_REMOTE_FILE_SYS:
string = "Remote File System";
break;
default:
string = "unknown";
break;
}
dev_info(dev, "%s: port %d, is for %s use\n",
serial->type->description,
connection_info->connections[i].port, string);
}
dev_info(dev, "%s: Number of ports: %d\n", serial->type->description,
num_ports);
/*
* save off our num_ports info so that we can use it in the
* calc_num_ports callback
*/
usb_set_serial_data(serial, (void *)(long)num_ports);
/* ask for the number of bytes available, but ignore the
response as it is broken */
retval = usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
VISOR_REQUEST_BYTES_AVAILABLE,
0xc2, 0x0000, 0x0005, transfer_buffer,
0x02, 300);
if (retval < 0)
dev_err(dev, "%s - error %d getting bytes available request\n",
__func__, retval);
retval = 0;
exit:
kfree(transfer_buffer);
return retval;
}
static int palm_os_4_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
struct device *dev = &serial->dev->dev;
struct palm_ext_connection_info *connection_info;
unsigned char *transfer_buffer;
int retval;
transfer_buffer = kmalloc(sizeof(*connection_info), GFP_KERNEL);
if (!transfer_buffer)
return -ENOMEM;
retval = usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
PALM_GET_EXT_CONNECTION_INFORMATION,
0xc2, 0x0000, 0x0000, transfer_buffer,
sizeof(*connection_info), 300);
if (retval < 0)
dev_err(dev, "%s - error %d getting connection info\n",
__func__, retval);
else
usb_serial_debug_data(dev, __func__, retval, transfer_buffer);
kfree(transfer_buffer);
return 0;
}
static int visor_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
int retval = 0;
int (*startup)(struct usb_serial *serial,
const struct usb_device_id *id);
/*
* some Samsung Android phones in modem mode have the same ID
* as SPH-I500, but they are ACM devices, so dont bind to them
*/
if (id->idVendor == SAMSUNG_VENDOR_ID &&
id->idProduct == SAMSUNG_SPH_I500_ID &&
serial->dev->descriptor.bDeviceClass == USB_CLASS_COMM &&
serial->dev->descriptor.bDeviceSubClass ==
USB_CDC_SUBCLASS_ACM)
return -ENODEV;
if (serial->dev->actconfig->desc.bConfigurationValue != 1) {
dev_err(&serial->dev->dev, "active config #%d != 1 ??\n",
serial->dev->actconfig->desc.bConfigurationValue);
return -ENODEV;
}
if (id->driver_info) {
startup = (void *)id->driver_info;
retval = startup(serial, id);
}
return retval;
}
static int visor_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds)
{
unsigned int vid = le16_to_cpu(serial->dev->descriptor.idVendor);
int num_ports = (int)(long)(usb_get_serial_data(serial));
if (num_ports)
usb_set_serial_data(serial, NULL);
/*
* Only swap the bulk endpoints for the Handspring devices with
* interrupt in endpoints, which for now are the Treo devices.
*/
if (!(vid == HANDSPRING_VENDOR_ID || vid == KYOCERA_VENDOR_ID) ||
epds->num_interrupt_in == 0)
goto out;
if (epds->num_bulk_in < 2 || epds->num_interrupt_in < 2) {
dev_err(&serial->interface->dev, "missing endpoints\n");
return -ENODEV;
}
/*
* It appears that Treos and Kyoceras want to use the
* 1st bulk in endpoint to communicate with the 2nd bulk out endpoint,
* so let's swap the 1st and 2nd bulk in and interrupt endpoints.
* Note that swapping the bulk out endpoints would break lots of
* apps that want to communicate on the second port.
*/
swap(epds->bulk_in[0], epds->bulk_in[1]);
swap(epds->interrupt_in[0], epds->interrupt_in[1]);
out:
return num_ports;
}
static int clie_5_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds)
{
/*
* TH55 registers 2 ports.
* Communication in from the UX50/TH55 uses the first bulk-in
* endpoint, while communication out to the UX50/TH55 uses the second
* bulk-out endpoint.
*/
/*
* FIXME: Should we swap the descriptors instead of using the same
* bulk-out endpoint for both ports?
*/
epds->bulk_out[0] = epds->bulk_out[1];
return serial->type->num_ports;
}
static int clie_3_5_startup(struct usb_serial *serial)
{
struct device *dev = &serial->dev->dev;
int result;
u8 *data;
data = kmalloc(1, GFP_KERNEL);
if (!data)
return -ENOMEM;
/*
* Note that PEG-300 series devices expect the following two calls.
*/
/* get the config number */
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
USB_REQ_GET_CONFIGURATION, USB_DIR_IN,
0, 0, data, 1, 3000);
if (result < 0) {
dev_err(dev, "%s: get config number failed: %d\n",
__func__, result);
goto out;
}
if (result != 1) {
dev_err(dev, "%s: get config number bad return length: %d\n",
__func__, result);
result = -EIO;
goto out;
}
/* get the interface number */
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
USB_REQ_GET_INTERFACE,
USB_DIR_IN | USB_RECIP_INTERFACE,
0, 0, data, 1, 3000);
if (result < 0) {
dev_err(dev, "%s: get interface number failed: %d\n",
__func__, result);
goto out;
}
if (result != 1) {
dev_err(dev,
"%s: get interface number bad return length: %d\n",
__func__, result);
result = -EIO;
goto out;
}
result = 0;
out:
kfree(data);
return result;
}
module_usb_serial_driver(serial_drivers, id_table_combined);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/visor.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB Serial Converter Bus specific functions
*
* Copyright (C) 2002 Greg Kroah-Hartman ([email protected])
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
static int usb_serial_device_match(struct device *dev,
struct device_driver *drv)
{
const struct usb_serial_port *port = to_usb_serial_port(dev);
struct usb_serial_driver *driver = to_usb_serial_driver(drv);
/*
* drivers are already assigned to ports in serial_probe so it's
* a simple check here.
*/
if (driver == port->serial->type)
return 1;
return 0;
}
static int usb_serial_device_probe(struct device *dev)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
struct usb_serial_driver *driver;
struct device *tty_dev;
int retval = 0;
int minor;
/* make sure suspend/resume doesn't race against port_probe */
retval = usb_autopm_get_interface(port->serial->interface);
if (retval)
return retval;
driver = port->serial->type;
if (driver->port_probe) {
retval = driver->port_probe(port);
if (retval)
goto err_autopm_put;
}
minor = port->minor;
tty_dev = tty_port_register_device(&port->port, usb_serial_tty_driver,
minor, dev);
if (IS_ERR(tty_dev)) {
retval = PTR_ERR(tty_dev);
goto err_port_remove;
}
usb_autopm_put_interface(port->serial->interface);
dev_info(&port->serial->dev->dev,
"%s converter now attached to ttyUSB%d\n",
driver->description, minor);
return 0;
err_port_remove:
if (driver->port_remove)
driver->port_remove(port);
err_autopm_put:
usb_autopm_put_interface(port->serial->interface);
return retval;
}
static void usb_serial_device_remove(struct device *dev)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
struct usb_serial_driver *driver;
int minor;
int autopm_err;
/*
* Make sure suspend/resume doesn't race against port_remove.
*
* Note that no further runtime PM callbacks will be made if
* autopm_get fails.
*/
autopm_err = usb_autopm_get_interface(port->serial->interface);
minor = port->minor;
tty_unregister_device(usb_serial_tty_driver, minor);
driver = port->serial->type;
if (driver->port_remove)
driver->port_remove(port);
dev_info(dev, "%s converter now disconnected from ttyUSB%d\n",
driver->description, minor);
if (!autopm_err)
usb_autopm_put_interface(port->serial->interface);
}
static ssize_t new_id_store(struct device_driver *driver,
const char *buf, size_t count)
{
struct usb_serial_driver *usb_drv = to_usb_serial_driver(driver);
ssize_t retval = usb_store_new_id(&usb_drv->dynids, usb_drv->id_table,
driver, buf, count);
if (retval >= 0 && usb_drv->usb_driver != NULL)
retval = usb_store_new_id(&usb_drv->usb_driver->dynids,
usb_drv->usb_driver->id_table,
&usb_drv->usb_driver->drvwrap.driver,
buf, count);
return retval;
}
static ssize_t new_id_show(struct device_driver *driver, char *buf)
{
struct usb_serial_driver *usb_drv = to_usb_serial_driver(driver);
return usb_show_dynids(&usb_drv->dynids, buf);
}
static DRIVER_ATTR_RW(new_id);
static struct attribute *usb_serial_drv_attrs[] = {
&driver_attr_new_id.attr,
NULL,
};
ATTRIBUTE_GROUPS(usb_serial_drv);
static void free_dynids(struct usb_serial_driver *drv)
{
struct usb_dynid *dynid, *n;
spin_lock(&drv->dynids.lock);
list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
list_del(&dynid->node);
kfree(dynid);
}
spin_unlock(&drv->dynids.lock);
}
const struct bus_type usb_serial_bus_type = {
.name = "usb-serial",
.match = usb_serial_device_match,
.probe = usb_serial_device_probe,
.remove = usb_serial_device_remove,
.drv_groups = usb_serial_drv_groups,
};
int usb_serial_bus_register(struct usb_serial_driver *driver)
{
int retval;
driver->driver.bus = &usb_serial_bus_type;
spin_lock_init(&driver->dynids.lock);
INIT_LIST_HEAD(&driver->dynids.list);
retval = driver_register(&driver->driver);
return retval;
}
void usb_serial_bus_deregister(struct usb_serial_driver *driver)
{
free_dynids(driver);
driver_unregister(&driver->driver);
}
| linux-master | drivers/usb/serial/bus.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB Serial Console driver
*
* Copyright (C) 2001 - 2002 Greg Kroah-Hartman ([email protected])
*
* Thanks to Randy Dunlap for the original version of this code.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/console.h>
#include <linux/serial.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
struct usbcons_info {
int magic;
int break_flag;
struct usb_serial_port *port;
};
static struct usbcons_info usbcons_info;
static struct console usbcons;
/*
* ------------------------------------------------------------
* USB Serial console driver
*
* Much of the code here is copied from drivers/char/serial.c
* and implements a phony serial console in the same way that
* serial.c does so that in case some software queries it,
* it will get the same results.
*
* Things that are different from the way the serial port code
* does things, is that we call the lower level usb-serial
* driver code to initialize the device, and we set the initial
* console speeds based on the command line arguments.
* ------------------------------------------------------------
*/
static const struct tty_operations usb_console_fake_tty_ops = {
};
/*
* The parsing of the command line works exactly like the
* serial.c code, except that the specifier is "ttyUSB" instead
* of "ttyS".
*/
static int usb_console_setup(struct console *co, char *options)
{
struct usbcons_info *info = &usbcons_info;
int baud = 9600;
int bits = 8;
int parity = 'n';
int doflow = 0;
int cflag = CREAD | HUPCL | CLOCAL;
char *s;
struct usb_serial *serial;
struct usb_serial_port *port;
int retval;
struct tty_struct *tty = NULL;
struct ktermios dummy;
if (options) {
baud = simple_strtoul(options, NULL, 10);
s = options;
while (*s >= '0' && *s <= '9')
s++;
if (*s)
parity = *s++;
if (*s)
bits = *s++ - '0';
if (*s)
doflow = (*s++ == 'r');
}
/* Sane default */
if (baud == 0)
baud = 9600;
switch (bits) {
case 7:
cflag |= CS7;
break;
default:
case 8:
cflag |= CS8;
break;
}
switch (parity) {
case 'o': case 'O':
cflag |= PARODD;
break;
case 'e': case 'E':
cflag |= PARENB;
break;
}
if (doflow)
cflag |= CRTSCTS;
/*
* no need to check the index here: if the index is wrong, console
* code won't call us
*/
port = usb_serial_port_get_by_minor(co->index);
if (port == NULL) {
/* no device is connected yet, sorry :( */
pr_err("No USB device connected to ttyUSB%i\n", co->index);
return -ENODEV;
}
serial = port->serial;
retval = usb_autopm_get_interface(serial->interface);
if (retval)
goto error_get_interface;
tty_port_tty_set(&port->port, NULL);
info->port = port;
++port->port.count;
if (!tty_port_initialized(&port->port)) {
if (serial->type->set_termios) {
/*
* allocate a fake tty so the driver can initialize
* the termios structure, then later call set_termios to
* configure according to command line arguments
*/
tty = kzalloc(sizeof(*tty), GFP_KERNEL);
if (!tty) {
retval = -ENOMEM;
goto reset_open_count;
}
kref_init(&tty->kref);
tty->driver = usb_serial_tty_driver;
tty->index = co->index;
init_ldsem(&tty->ldisc_sem);
spin_lock_init(&tty->files_lock);
INIT_LIST_HEAD(&tty->tty_files);
kref_get(&tty->driver->kref);
__module_get(tty->driver->owner);
tty->ops = &usb_console_fake_tty_ops;
tty_init_termios(tty);
tty_port_tty_set(&port->port, tty);
}
/* only call the device specific open if this
* is the first time the port is opened */
retval = serial->type->open(NULL, port);
if (retval) {
dev_err(&port->dev, "could not open USB console port\n");
goto fail;
}
if (serial->type->set_termios) {
tty->termios.c_cflag = cflag;
tty_termios_encode_baud_rate(&tty->termios, baud, baud);
memset(&dummy, 0, sizeof(struct ktermios));
serial->type->set_termios(tty, port, &dummy);
tty_port_tty_set(&port->port, NULL);
tty_save_termios(tty);
tty_kref_put(tty);
}
tty_port_set_initialized(&port->port, true);
}
/* Now that any required fake tty operations are completed restore
* the tty port count */
--port->port.count;
/* The console is special in terms of closing the device so
* indicate this port is now acting as a system console. */
port->port.console = 1;
mutex_unlock(&serial->disc_mutex);
return retval;
fail:
tty_port_tty_set(&port->port, NULL);
tty_kref_put(tty);
reset_open_count:
port->port.count = 0;
info->port = NULL;
usb_autopm_put_interface(serial->interface);
error_get_interface:
mutex_unlock(&serial->disc_mutex);
usb_serial_put(serial);
return retval;
}
static void usb_console_write(struct console *co,
const char *buf, unsigned count)
{
static struct usbcons_info *info = &usbcons_info;
struct usb_serial_port *port = info->port;
struct usb_serial *serial;
int retval = -ENODEV;
if (!port || port->serial->dev->state == USB_STATE_NOTATTACHED)
return;
serial = port->serial;
if (count == 0)
return;
dev_dbg(&port->dev, "%s - %d byte(s)\n", __func__, count);
if (!port->port.console) {
dev_dbg(&port->dev, "%s - port not opened\n", __func__);
return;
}
while (count) {
unsigned int i;
unsigned int lf;
/* search for LF so we can insert CR if necessary */
for (i = 0, lf = 0 ; i < count ; i++) {
if (*(buf + i) == 10) {
lf = 1;
i++;
break;
}
}
/* pass on to the driver specific version of this function if
it is available */
retval = serial->type->write(NULL, port, buf, i);
dev_dbg(&port->dev, "%s - write: %d\n", __func__, retval);
if (lf) {
/* append CR after LF */
unsigned char cr = 13;
retval = serial->type->write(NULL, port, &cr, 1);
dev_dbg(&port->dev, "%s - write cr: %d\n",
__func__, retval);
}
buf += i;
count -= i;
}
}
static struct tty_driver *usb_console_device(struct console *co, int *index)
{
struct tty_driver **p = (struct tty_driver **)co->data;
if (!*p)
return NULL;
*index = co->index;
return *p;
}
static struct console usbcons = {
.name = "ttyUSB",
.write = usb_console_write,
.device = usb_console_device,
.setup = usb_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &usb_serial_tty_driver,
};
void usb_serial_console_disconnect(struct usb_serial *serial)
{
if (serial->port[0] && serial->port[0] == usbcons_info.port) {
usb_serial_console_exit();
usb_serial_put(serial);
}
}
void usb_serial_console_init(int minor)
{
if (minor == 0) {
/*
* Call register_console() if this is the first device plugged
* in. If we call it earlier, then the callback to
* console_setup() will fail, as there is not a device seen by
* the USB subsystem yet.
*/
/*
* Register console.
* NOTES:
* console_setup() is called (back) immediately (from
* register_console). console_write() is called immediately
* from register_console iff CON_PRINTBUFFER is set in flags.
*/
pr_debug("registering the USB serial console.\n");
register_console(&usbcons);
}
}
void usb_serial_console_exit(void)
{
if (usbcons_info.port) {
unregister_console(&usbcons);
usbcons_info.port->port.console = 0;
usbcons_info.port = NULL;
}
}
| linux-master | drivers/usb/serial/console.c |
// SPDX-License-Identifier: GPL-2.0
/*
* AIRcable USB Bluetooth Dongle Driver.
*
* Copyright (C) 2010 Johan Hovold <[email protected]>
* Copyright (C) 2006 Manuel Francisco Naranjo ([email protected])
*
* The device works as an standard CDC device, it has 2 interfaces, the first
* one is for firmware access and the second is the serial one.
* The protocol is very simply, there are two possibilities reading or writing.
* When writing the first urb must have a Header that starts with 0x20 0x29 the
* next two bytes must say how much data will be sent.
* When reading the process is almost equal except that the header starts with
* 0x00 0x20.
*
* The device simply need some stuff to understand data coming from the usb
* buffer: The First and Second byte is used for a Header, the Third and Fourth
* tells the device the amount of information the package holds.
* Packages are 60 bytes long Header Stuff.
* When writing to the device the first two bytes of the header are 0x20 0x29
* When reading the bytes are 0x00 0x20, or 0x00 0x10, there is an strange
* situation, when too much data arrives to the device because it sends the data
* but with out the header. I will use a simply hack to override this situation,
* if there is data coming that does not contain any header, then that is data
* that must go directly to the tty, as there is no documentation about if there
* is any other control code, I will simply check for the first
* one.
*
* I have taken some info from a Greg Kroah-Hartman article:
* http://www.linuxjournal.com/article/6573
* And from Linux Device Driver Kit CD, which is a great work, the authors taken
* the work to recompile lots of information an knowledge in drivers development
* and made it all available inside a cd.
* URL: http://kernel.org/pub/linux/kernel/people/gregkh/ddk/
*
*/
#include <asm/unaligned.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/tty_flip.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
/* Vendor and Product ID */
#define AIRCABLE_VID 0x16CA
#define AIRCABLE_USB_PID 0x1502
/* Protocol Stuff */
#define HCI_HEADER_LENGTH 0x4
#define TX_HEADER_0 0x20
#define TX_HEADER_1 0x29
#define RX_HEADER_0 0x00
#define RX_HEADER_1 0x20
#define HCI_COMPLETE_FRAME 64
/* rx_flags */
#define THROTTLED 0x01
#define ACTUALLY_THROTTLED 0x02
#define DRIVER_AUTHOR "Naranjo, Manuel Francisco <[email protected]>, Johan Hovold <[email protected]>"
#define DRIVER_DESC "AIRcable USB Driver"
/* ID table that will be registered with USB core */
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(AIRCABLE_VID, AIRCABLE_USB_PID) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
static int aircable_prepare_write_buffer(struct usb_serial_port *port,
void *dest, size_t size)
{
int count;
unsigned char *buf = dest;
count = kfifo_out_locked(&port->write_fifo, buf + HCI_HEADER_LENGTH,
size - HCI_HEADER_LENGTH, &port->lock);
buf[0] = TX_HEADER_0;
buf[1] = TX_HEADER_1;
put_unaligned_le16(count, &buf[2]);
return count + HCI_HEADER_LENGTH;
}
static int aircable_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds)
{
/* Ignore the first interface, which has no bulk endpoints. */
if (epds->num_bulk_out == 0) {
dev_dbg(&serial->interface->dev,
"ignoring interface with no bulk-out endpoints\n");
return -ENODEV;
}
return 1;
}
static int aircable_process_packet(struct usb_serial_port *port,
int has_headers, char *packet, int len)
{
if (has_headers) {
len -= HCI_HEADER_LENGTH;
packet += HCI_HEADER_LENGTH;
}
if (len <= 0) {
dev_dbg(&port->dev, "%s - malformed packet\n", __func__);
return 0;
}
tty_insert_flip_string(&port->port, packet, len);
return len;
}
static void aircable_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
char *data = urb->transfer_buffer;
int has_headers;
int count;
int len;
int i;
has_headers = (urb->actual_length > 2 && data[0] == RX_HEADER_0);
count = 0;
for (i = 0; i < urb->actual_length; i += HCI_COMPLETE_FRAME) {
len = min_t(int, urb->actual_length - i, HCI_COMPLETE_FRAME);
count += aircable_process_packet(port, has_headers,
&data[i], len);
}
if (count)
tty_flip_buffer_push(&port->port);
}
static struct usb_serial_driver aircable_device = {
.driver = {
.owner = THIS_MODULE,
.name = "aircable",
},
.id_table = id_table,
.bulk_out_size = HCI_COMPLETE_FRAME,
.calc_num_ports = aircable_calc_num_ports,
.process_read_urb = aircable_process_read_urb,
.prepare_write_buffer = aircable_prepare_write_buffer,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
};
static struct usb_serial_driver * const serial_drivers[] = {
&aircable_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/aircable.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB Serial Converter Generic functions
*
* Copyright (C) 2010 - 2013 Johan Hovold ([email protected])
* Copyright (C) 1999 - 2002 Greg Kroah-Hartman ([email protected])
*/
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/sysrq.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
#include <linux/kfifo.h>
#include <linux/serial.h>
#ifdef CONFIG_USB_SERIAL_GENERIC
static __u16 vendor = 0x05f9;
static __u16 product = 0xffff;
module_param(vendor, ushort, 0);
MODULE_PARM_DESC(vendor, "User specified USB idVendor");
module_param(product, ushort, 0);
MODULE_PARM_DESC(product, "User specified USB idProduct");
static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */
static int usb_serial_generic_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
struct device *dev = &serial->interface->dev;
dev_info(dev, "The \"generic\" usb-serial driver is only for testing and one-off prototypes.\n");
dev_info(dev, "Tell [email protected] to add your device to a proper driver.\n");
return 0;
}
static int usb_serial_generic_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds)
{
struct device *dev = &serial->interface->dev;
int num_ports;
num_ports = max(epds->num_bulk_in, epds->num_bulk_out);
if (num_ports == 0) {
dev_err(dev, "device has no bulk endpoints\n");
return -ENODEV;
}
return num_ports;
}
static struct usb_serial_driver usb_serial_generic_device = {
.driver = {
.owner = THIS_MODULE,
.name = "generic",
},
.id_table = generic_device_ids,
.probe = usb_serial_generic_probe,
.calc_num_ports = usb_serial_generic_calc_num_ports,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
.resume = usb_serial_generic_resume,
};
static struct usb_serial_driver * const serial_drivers[] = {
&usb_serial_generic_device, NULL
};
#endif
int usb_serial_generic_register(void)
{
int retval = 0;
#ifdef CONFIG_USB_SERIAL_GENERIC
generic_device_ids[0].idVendor = vendor;
generic_device_ids[0].idProduct = product;
generic_device_ids[0].match_flags =
USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT;
retval = usb_serial_register_drivers(serial_drivers,
"usbserial_generic", generic_device_ids);
#endif
return retval;
}
void usb_serial_generic_deregister(void)
{
#ifdef CONFIG_USB_SERIAL_GENERIC
usb_serial_deregister_drivers(serial_drivers);
#endif
}
int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port)
{
int result = 0;
clear_bit(USB_SERIAL_THROTTLED, &port->flags);
if (port->bulk_in_size)
result = usb_serial_generic_submit_read_urbs(port, GFP_KERNEL);
return result;
}
EXPORT_SYMBOL_GPL(usb_serial_generic_open);
void usb_serial_generic_close(struct usb_serial_port *port)
{
unsigned long flags;
int i;
if (port->bulk_out_size) {
for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
usb_kill_urb(port->write_urbs[i]);
spin_lock_irqsave(&port->lock, flags);
kfifo_reset_out(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
}
if (port->bulk_in_size) {
for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i)
usb_kill_urb(port->read_urbs[i]);
}
}
EXPORT_SYMBOL_GPL(usb_serial_generic_close);
int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port,
void *dest, size_t size)
{
return kfifo_out_locked(&port->write_fifo, dest, size, &port->lock);
}
/**
* usb_serial_generic_write_start - start writing buffered data
* @port: usb-serial port
* @mem_flags: flags to use for memory allocations
*
* Serialised using USB_SERIAL_WRITE_BUSY flag.
*
* Return: Zero on success or if busy, otherwise a negative errno value.
*/
int usb_serial_generic_write_start(struct usb_serial_port *port,
gfp_t mem_flags)
{
struct urb *urb;
int count, result;
unsigned long flags;
int i;
if (test_and_set_bit_lock(USB_SERIAL_WRITE_BUSY, &port->flags))
return 0;
retry:
spin_lock_irqsave(&port->lock, flags);
if (!port->write_urbs_free || !kfifo_len(&port->write_fifo)) {
clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
spin_unlock_irqrestore(&port->lock, flags);
return 0;
}
i = (int)find_first_bit(&port->write_urbs_free,
ARRAY_SIZE(port->write_urbs));
spin_unlock_irqrestore(&port->lock, flags);
urb = port->write_urbs[i];
count = port->serial->type->prepare_write_buffer(port,
urb->transfer_buffer,
port->bulk_out_size);
urb->transfer_buffer_length = count;
usb_serial_debug_data(&port->dev, __func__, count, urb->transfer_buffer);
spin_lock_irqsave(&port->lock, flags);
port->tx_bytes += count;
spin_unlock_irqrestore(&port->lock, flags);
clear_bit(i, &port->write_urbs_free);
result = usb_submit_urb(urb, mem_flags);
if (result) {
dev_err_console(port, "%s - error submitting urb: %d\n",
__func__, result);
set_bit(i, &port->write_urbs_free);
spin_lock_irqsave(&port->lock, flags);
port->tx_bytes -= count;
spin_unlock_irqrestore(&port->lock, flags);
clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
return result;
}
goto retry; /* try sending off another urb */
}
EXPORT_SYMBOL_GPL(usb_serial_generic_write_start);
/**
* usb_serial_generic_write - generic write function
* @tty: tty for the port
* @port: usb-serial port
* @buf: data to write
* @count: number of bytes to write
*
* Return: The number of characters buffered, which may be anything from
* zero to @count, or a negative errno value.
*/
int usb_serial_generic_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count)
{
int result;
if (!port->bulk_out_size)
return -ENODEV;
if (!count)
return 0;
count = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock);
result = usb_serial_generic_write_start(port, GFP_ATOMIC);
if (result)
return result;
return count;
}
EXPORT_SYMBOL_GPL(usb_serial_generic_write);
unsigned int usb_serial_generic_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
unsigned long flags;
unsigned int room;
if (!port->bulk_out_size)
return 0;
spin_lock_irqsave(&port->lock, flags);
room = kfifo_avail(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
dev_dbg(&port->dev, "%s - returns %u\n", __func__, room);
return room;
}
unsigned int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
unsigned long flags;
unsigned int chars;
if (!port->bulk_out_size)
return 0;
spin_lock_irqsave(&port->lock, flags);
chars = kfifo_len(&port->write_fifo) + port->tx_bytes;
spin_unlock_irqrestore(&port->lock, flags);
dev_dbg(&port->dev, "%s - returns %u\n", __func__, chars);
return chars;
}
EXPORT_SYMBOL_GPL(usb_serial_generic_chars_in_buffer);
void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
{
struct usb_serial_port *port = tty->driver_data;
unsigned int bps;
unsigned long period;
unsigned long expire;
bps = tty_get_baud_rate(tty);
if (!bps)
bps = 9600; /* B0 */
/*
* Use a poll-period of roughly the time it takes to send one
* character or at least one jiffy.
*/
period = max_t(unsigned long, (10 * HZ / bps), 1);
if (timeout)
period = min_t(unsigned long, period, timeout);
dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n",
__func__, jiffies_to_msecs(timeout),
jiffies_to_msecs(period));
expire = jiffies + timeout;
while (!port->serial->type->tx_empty(port)) {
schedule_timeout_interruptible(period);
if (signal_pending(current))
break;
if (timeout && time_after(jiffies, expire))
break;
}
}
EXPORT_SYMBOL_GPL(usb_serial_generic_wait_until_sent);
static int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
int index, gfp_t mem_flags)
{
int res;
if (!test_and_clear_bit(index, &port->read_urbs_free))
return 0;
dev_dbg(&port->dev, "%s - urb %d\n", __func__, index);
res = usb_submit_urb(port->read_urbs[index], mem_flags);
if (res) {
if (res != -EPERM && res != -ENODEV) {
dev_err(&port->dev,
"%s - usb_submit_urb failed: %d\n",
__func__, res);
}
set_bit(index, &port->read_urbs_free);
return res;
}
return 0;
}
int usb_serial_generic_submit_read_urbs(struct usb_serial_port *port,
gfp_t mem_flags)
{
int res;
int i;
for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i) {
res = usb_serial_generic_submit_read_urb(port, i, mem_flags);
if (res)
goto err;
}
return 0;
err:
for (; i >= 0; --i)
usb_kill_urb(port->read_urbs[i]);
return res;
}
EXPORT_SYMBOL_GPL(usb_serial_generic_submit_read_urbs);
void usb_serial_generic_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
char *ch = urb->transfer_buffer;
int i;
if (!urb->actual_length)
return;
/*
* The per character mucking around with sysrq path it too slow for
* stuff like 3G modems, so shortcircuit it in the 99.9999999% of
* cases where the USB serial is not a console anyway.
*/
if (port->sysrq) {
for (i = 0; i < urb->actual_length; i++, ch++) {
if (!usb_serial_handle_sysrq_char(port, *ch))
tty_insert_flip_char(&port->port, *ch, TTY_NORMAL);
}
} else {
tty_insert_flip_string(&port->port, ch, urb->actual_length);
}
tty_flip_buffer_push(&port->port);
}
EXPORT_SYMBOL_GPL(usb_serial_generic_process_read_urb);
void usb_serial_generic_read_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
bool stopped = false;
int status = urb->status;
int i;
for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i) {
if (urb == port->read_urbs[i])
break;
}
dev_dbg(&port->dev, "%s - urb %d, len %d\n", __func__, i,
urb->actual_length);
switch (status) {
case 0:
usb_serial_debug_data(&port->dev, __func__, urb->actual_length,
data);
port->serial->type->process_read_urb(urb);
break;
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
dev_dbg(&port->dev, "%s - urb stopped: %d\n",
__func__, status);
stopped = true;
break;
case -EPIPE:
dev_err(&port->dev, "%s - urb stopped: %d\n",
__func__, status);
stopped = true;
break;
default:
dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
__func__, status);
break;
}
/*
* Make sure URB processing is done before marking as free to avoid
* racing with unthrottle() on another CPU. Matches the barriers
* implied by the test_and_clear_bit() in
* usb_serial_generic_submit_read_urb().
*/
smp_mb__before_atomic();
set_bit(i, &port->read_urbs_free);
/*
* Make sure URB is marked as free before checking the throttled flag
* to avoid racing with unthrottle() on another CPU. Matches the
* smp_mb__after_atomic() in unthrottle().
*/
smp_mb__after_atomic();
if (stopped)
return;
if (test_bit(USB_SERIAL_THROTTLED, &port->flags))
return;
usb_serial_generic_submit_read_urb(port, i, GFP_ATOMIC);
}
EXPORT_SYMBOL_GPL(usb_serial_generic_read_bulk_callback);
void usb_serial_generic_write_bulk_callback(struct urb *urb)
{
unsigned long flags;
struct usb_serial_port *port = urb->context;
int status = urb->status;
int i;
for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i) {
if (port->write_urbs[i] == urb)
break;
}
spin_lock_irqsave(&port->lock, flags);
port->tx_bytes -= urb->transfer_buffer_length;
set_bit(i, &port->write_urbs_free);
spin_unlock_irqrestore(&port->lock, flags);
switch (status) {
case 0:
break;
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
dev_dbg(&port->dev, "%s - urb stopped: %d\n",
__func__, status);
return;
case -EPIPE:
dev_err_console(port, "%s - urb stopped: %d\n",
__func__, status);
return;
default:
dev_err_console(port, "%s - nonzero urb status: %d\n",
__func__, status);
break;
}
usb_serial_generic_write_start(port, GFP_ATOMIC);
usb_serial_port_softint(port);
}
EXPORT_SYMBOL_GPL(usb_serial_generic_write_bulk_callback);
void usb_serial_generic_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
set_bit(USB_SERIAL_THROTTLED, &port->flags);
}
EXPORT_SYMBOL_GPL(usb_serial_generic_throttle);
void usb_serial_generic_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
clear_bit(USB_SERIAL_THROTTLED, &port->flags);
/*
* Matches the smp_mb__after_atomic() in
* usb_serial_generic_read_bulk_callback().
*/
smp_mb__after_atomic();
usb_serial_generic_submit_read_urbs(port, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(usb_serial_generic_unthrottle);
static bool usb_serial_generic_msr_changed(struct tty_struct *tty,
unsigned long arg, struct async_icount *cprev)
{
struct usb_serial_port *port = tty->driver_data;
struct async_icount cnow;
unsigned long flags;
bool ret;
/*
* Use tty-port initialised flag to detect all hangups including the
* one generated at USB-device disconnect.
*/
if (!tty_port_initialized(&port->port))
return true;
spin_lock_irqsave(&port->lock, flags);
cnow = port->icount; /* atomic copy*/
spin_unlock_irqrestore(&port->lock, flags);
ret = ((arg & TIOCM_RNG) && (cnow.rng != cprev->rng)) ||
((arg & TIOCM_DSR) && (cnow.dsr != cprev->dsr)) ||
((arg & TIOCM_CD) && (cnow.dcd != cprev->dcd)) ||
((arg & TIOCM_CTS) && (cnow.cts != cprev->cts));
*cprev = cnow;
return ret;
}
int usb_serial_generic_tiocmiwait(struct tty_struct *tty, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
struct async_icount cnow;
unsigned long flags;
int ret;
spin_lock_irqsave(&port->lock, flags);
cnow = port->icount; /* atomic copy */
spin_unlock_irqrestore(&port->lock, flags);
ret = wait_event_interruptible(port->port.delta_msr_wait,
usb_serial_generic_msr_changed(tty, arg, &cnow));
if (!ret && !tty_port_initialized(&port->port))
ret = -EIO;
return ret;
}
EXPORT_SYMBOL_GPL(usb_serial_generic_tiocmiwait);
int usb_serial_generic_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount)
{
struct usb_serial_port *port = tty->driver_data;
struct async_icount cnow;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
cnow = port->icount; /* atomic copy */
spin_unlock_irqrestore(&port->lock, flags);
icount->cts = cnow.cts;
icount->dsr = cnow.dsr;
icount->rng = cnow.rng;
icount->dcd = cnow.dcd;
icount->tx = cnow.tx;
icount->rx = cnow.rx;
icount->frame = cnow.frame;
icount->parity = cnow.parity;
icount->overrun = cnow.overrun;
icount->brk = cnow.brk;
icount->buf_overrun = cnow.buf_overrun;
return 0;
}
EXPORT_SYMBOL_GPL(usb_serial_generic_get_icount);
#if defined(CONFIG_USB_SERIAL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
{
if (port->sysrq) {
if (ch && time_before(jiffies, port->sysrq)) {
handle_sysrq(ch);
port->sysrq = 0;
return 1;
}
port->sysrq = 0;
}
return 0;
}
EXPORT_SYMBOL_GPL(usb_serial_handle_sysrq_char);
int usb_serial_handle_break(struct usb_serial_port *port)
{
if (!port->port.console)
return 0;
if (!port->sysrq) {
port->sysrq = jiffies + HZ*5;
return 1;
}
port->sysrq = 0;
return 0;
}
EXPORT_SYMBOL_GPL(usb_serial_handle_break);
#endif
/**
* usb_serial_handle_dcd_change - handle a change of carrier detect state
* @port: usb-serial port
* @tty: tty for the port
* @status: new carrier detect status, nonzero if active
*/
void usb_serial_handle_dcd_change(struct usb_serial_port *port,
struct tty_struct *tty, unsigned int status)
{
dev_dbg(&port->dev, "%s - status %d\n", __func__, status);
if (tty) {
struct tty_ldisc *ld = tty_ldisc_ref(tty);
if (ld) {
if (ld->ops->dcd_change)
ld->ops->dcd_change(tty, status);
tty_ldisc_deref(ld);
}
}
if (status)
wake_up_interruptible(&port->port.open_wait);
else if (tty && !C_CLOCAL(tty))
tty_hangup(tty);
}
EXPORT_SYMBOL_GPL(usb_serial_handle_dcd_change);
int usb_serial_generic_resume(struct usb_serial *serial)
{
struct usb_serial_port *port;
int i, c = 0, r;
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
if (!tty_port_initialized(&port->port))
continue;
if (port->bulk_in_size) {
r = usb_serial_generic_submit_read_urbs(port,
GFP_NOIO);
if (r < 0)
c++;
}
if (port->bulk_out_size) {
r = usb_serial_generic_write_start(port, GFP_NOIO);
if (r < 0)
c++;
}
}
return c ? -EIO : 0;
}
EXPORT_SYMBOL_GPL(usb_serial_generic_resume);
| linux-master | drivers/usb/serial/generic.c |
// SPDX-License-Identifier: GPL-2.0
/*
USB Driver layer for GSM modems
Copyright (C) 2005 Matthias Urlichs <[email protected]>
Portions copied from the Keyspan driver by Hugh Blemings <[email protected]>
History: see the git log.
Work sponsored by: Sigos GmbH, Germany <[email protected]>
This driver exists because the "normal" serial driver doesn't work too well
with GSM modems. Issues:
- data loss -- one single Receive URB is not nearly enough
- controlling the baud rate doesn't make sense
*/
#define DRIVER_AUTHOR "Matthias Urlichs <[email protected]>"
#define DRIVER_DESC "USB Driver for GSM modems"
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/cdc.h>
#include <linux/usb/serial.h>
#include <linux/serial.h>
#include "usb-wwan.h"
/*
* Generate DTR/RTS signals on the port using the SET_CONTROL_LINE_STATE request
* in CDC ACM.
*/
static int usb_wwan_send_setup(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct usb_wwan_port_private *portdata;
int val = 0;
int ifnum;
int res;
portdata = usb_get_serial_port_data(port);
if (portdata->dtr_state)
val |= USB_CDC_CTRL_DTR;
if (portdata->rts_state)
val |= USB_CDC_CTRL_RTS;
ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
res = usb_autopm_get_interface(serial->interface);
if (res)
return res;
res = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
USB_CDC_REQ_SET_CONTROL_LINE_STATE,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
val, ifnum, NULL, 0, USB_CTRL_SET_TIMEOUT);
usb_autopm_put_interface(port->serial->interface);
return res;
}
void usb_wwan_dtr_rts(struct usb_serial_port *port, int on)
{
struct usb_wwan_port_private *portdata;
struct usb_wwan_intf_private *intfdata;
intfdata = usb_get_serial_data(port->serial);
if (!intfdata->use_send_setup)
return;
portdata = usb_get_serial_port_data(port);
/* FIXME: locking */
portdata->rts_state = on;
portdata->dtr_state = on;
usb_wwan_send_setup(port);
}
EXPORT_SYMBOL(usb_wwan_dtr_rts);
int usb_wwan_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
unsigned int value;
struct usb_wwan_port_private *portdata;
portdata = usb_get_serial_port_data(port);
value = ((portdata->rts_state) ? TIOCM_RTS : 0) |
((portdata->dtr_state) ? TIOCM_DTR : 0) |
((portdata->cts_state) ? TIOCM_CTS : 0) |
((portdata->dsr_state) ? TIOCM_DSR : 0) |
((portdata->dcd_state) ? TIOCM_CAR : 0) |
((portdata->ri_state) ? TIOCM_RNG : 0);
return value;
}
EXPORT_SYMBOL(usb_wwan_tiocmget);
int usb_wwan_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_wwan_port_private *portdata;
struct usb_wwan_intf_private *intfdata;
portdata = usb_get_serial_port_data(port);
intfdata = usb_get_serial_data(port->serial);
if (!intfdata->use_send_setup)
return -EINVAL;
/* FIXME: what locks portdata fields ? */
if (set & TIOCM_RTS)
portdata->rts_state = 1;
if (set & TIOCM_DTR)
portdata->dtr_state = 1;
if (clear & TIOCM_RTS)
portdata->rts_state = 0;
if (clear & TIOCM_DTR)
portdata->dtr_state = 0;
return usb_wwan_send_setup(port);
}
EXPORT_SYMBOL(usb_wwan_tiocmset);
int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
struct usb_wwan_port_private *portdata;
struct usb_wwan_intf_private *intfdata;
int i;
int left, todo;
struct urb *this_urb = NULL; /* spurious */
int err;
unsigned long flags;
portdata = usb_get_serial_port_data(port);
intfdata = usb_get_serial_data(port->serial);
dev_dbg(&port->dev, "%s: write (%d chars)\n", __func__, count);
left = count;
for (i = 0; left > 0 && i < N_OUT_URB; i++) {
todo = left;
if (todo > OUT_BUFLEN)
todo = OUT_BUFLEN;
this_urb = portdata->out_urbs[i];
if (test_and_set_bit(i, &portdata->out_busy)) {
if (time_before(jiffies,
portdata->tx_start_time[i] + 10 * HZ))
continue;
usb_unlink_urb(this_urb);
continue;
}
dev_dbg(&port->dev, "%s: endpoint %d buf %d\n", __func__,
usb_pipeendpoint(this_urb->pipe), i);
err = usb_autopm_get_interface_async(port->serial->interface);
if (err < 0) {
clear_bit(i, &portdata->out_busy);
break;
}
/* send the data */
memcpy(this_urb->transfer_buffer, buf, todo);
this_urb->transfer_buffer_length = todo;
spin_lock_irqsave(&intfdata->susp_lock, flags);
if (intfdata->suspended) {
usb_anchor_urb(this_urb, &portdata->delayed);
spin_unlock_irqrestore(&intfdata->susp_lock, flags);
} else {
intfdata->in_flight++;
spin_unlock_irqrestore(&intfdata->susp_lock, flags);
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err) {
dev_err(&port->dev,
"%s: submit urb %d failed: %d\n",
__func__, i, err);
clear_bit(i, &portdata->out_busy);
spin_lock_irqsave(&intfdata->susp_lock, flags);
intfdata->in_flight--;
spin_unlock_irqrestore(&intfdata->susp_lock,
flags);
usb_autopm_put_interface_async(port->serial->interface);
break;
}
}
portdata->tx_start_time[i] = jiffies;
buf += todo;
left -= todo;
}
count -= left;
dev_dbg(&port->dev, "%s: wrote (did %d)\n", __func__, count);
return count;
}
EXPORT_SYMBOL(usb_wwan_write);
static void usb_wwan_indat_callback(struct urb *urb)
{
int err;
int endpoint;
struct usb_serial_port *port;
struct device *dev;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
endpoint = usb_pipeendpoint(urb->pipe);
port = urb->context;
dev = &port->dev;
if (status) {
dev_dbg(dev, "%s: nonzero status: %d on endpoint %02x.\n",
__func__, status, endpoint);
/* don't resubmit on fatal errors */
if (status == -ESHUTDOWN || status == -ENOENT)
return;
} else {
if (urb->actual_length) {
tty_insert_flip_string(&port->port, data,
urb->actual_length);
tty_flip_buffer_push(&port->port);
} else
dev_dbg(dev, "%s: empty read urb received\n", __func__);
}
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err) {
if (err != -EPERM && err != -ENODEV) {
dev_err(dev, "%s: resubmit read urb failed. (%d)\n",
__func__, err);
/* busy also in error unless we are killed */
usb_mark_last_busy(port->serial->dev);
}
} else {
usb_mark_last_busy(port->serial->dev);
}
}
static void usb_wwan_outdat_callback(struct urb *urb)
{
struct usb_serial_port *port;
struct usb_wwan_port_private *portdata;
struct usb_wwan_intf_private *intfdata;
unsigned long flags;
int i;
port = urb->context;
intfdata = usb_get_serial_data(port->serial);
usb_serial_port_softint(port);
usb_autopm_put_interface_async(port->serial->interface);
portdata = usb_get_serial_port_data(port);
spin_lock_irqsave(&intfdata->susp_lock, flags);
intfdata->in_flight--;
spin_unlock_irqrestore(&intfdata->susp_lock, flags);
for (i = 0; i < N_OUT_URB; ++i) {
if (portdata->out_urbs[i] == urb) {
smp_mb__before_atomic();
clear_bit(i, &portdata->out_busy);
break;
}
}
}
unsigned int usb_wwan_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_wwan_port_private *portdata;
int i;
unsigned int data_len = 0;
struct urb *this_urb;
portdata = usb_get_serial_port_data(port);
for (i = 0; i < N_OUT_URB; i++) {
this_urb = portdata->out_urbs[i];
if (this_urb && !test_bit(i, &portdata->out_busy))
data_len += OUT_BUFLEN;
}
dev_dbg(&port->dev, "%s: %u\n", __func__, data_len);
return data_len;
}
EXPORT_SYMBOL(usb_wwan_write_room);
unsigned int usb_wwan_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_wwan_port_private *portdata;
int i;
unsigned int data_len = 0;
struct urb *this_urb;
portdata = usb_get_serial_port_data(port);
for (i = 0; i < N_OUT_URB; i++) {
this_urb = portdata->out_urbs[i];
/* FIXME: This locking is insufficient as this_urb may
go unused during the test */
if (this_urb && test_bit(i, &portdata->out_busy))
data_len += this_urb->transfer_buffer_length;
}
dev_dbg(&port->dev, "%s: %u\n", __func__, data_len);
return data_len;
}
EXPORT_SYMBOL(usb_wwan_chars_in_buffer);
int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_wwan_port_private *portdata;
struct usb_wwan_intf_private *intfdata;
struct usb_serial *serial = port->serial;
int i, err;
struct urb *urb;
portdata = usb_get_serial_port_data(port);
intfdata = usb_get_serial_data(serial);
if (port->interrupt_in_urb) {
err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (err) {
dev_err(&port->dev, "%s: submit int urb failed: %d\n",
__func__, err);
}
}
/* Start reading from the IN endpoint */
for (i = 0; i < N_IN_URB; i++) {
urb = portdata->in_urbs[i];
if (!urb)
continue;
err = usb_submit_urb(urb, GFP_KERNEL);
if (err) {
dev_err(&port->dev,
"%s: submit read urb %d failed: %d\n",
__func__, i, err);
}
}
spin_lock_irq(&intfdata->susp_lock);
if (++intfdata->open_ports == 1)
serial->interface->needs_remote_wakeup = 1;
spin_unlock_irq(&intfdata->susp_lock);
/* this balances a get in the generic USB serial code */
usb_autopm_put_interface(serial->interface);
return 0;
}
EXPORT_SYMBOL(usb_wwan_open);
static void unbusy_queued_urb(struct urb *urb,
struct usb_wwan_port_private *portdata)
{
int i;
for (i = 0; i < N_OUT_URB; i++) {
if (urb == portdata->out_urbs[i]) {
clear_bit(i, &portdata->out_busy);
break;
}
}
}
void usb_wwan_close(struct usb_serial_port *port)
{
int i;
struct usb_serial *serial = port->serial;
struct usb_wwan_port_private *portdata;
struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial);
struct urb *urb;
portdata = usb_get_serial_port_data(port);
/*
* Need to take susp_lock to make sure port is not already being
* resumed, but no need to hold it due to the tty-port initialized
* flag.
*/
spin_lock_irq(&intfdata->susp_lock);
if (--intfdata->open_ports == 0)
serial->interface->needs_remote_wakeup = 0;
spin_unlock_irq(&intfdata->susp_lock);
for (;;) {
urb = usb_get_from_anchor(&portdata->delayed);
if (!urb)
break;
unbusy_queued_urb(urb, portdata);
usb_autopm_put_interface_async(serial->interface);
}
for (i = 0; i < N_IN_URB; i++)
usb_kill_urb(portdata->in_urbs[i]);
for (i = 0; i < N_OUT_URB; i++)
usb_kill_urb(portdata->out_urbs[i]);
usb_kill_urb(port->interrupt_in_urb);
usb_autopm_get_interface_no_resume(serial->interface);
}
EXPORT_SYMBOL(usb_wwan_close);
static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port,
int endpoint,
int dir, void *ctx, char *buf, int len,
void (*callback) (struct urb *))
{
struct usb_serial *serial = port->serial;
struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial);
struct urb *urb;
urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */
if (!urb)
return NULL;
usb_fill_bulk_urb(urb, serial->dev,
usb_sndbulkpipe(serial->dev, endpoint) | dir,
buf, len, callback, ctx);
if (intfdata->use_zlp && dir == USB_DIR_OUT)
urb->transfer_flags |= URB_ZERO_PACKET;
return urb;
}
int usb_wwan_port_probe(struct usb_serial_port *port)
{
struct usb_wwan_port_private *portdata;
struct urb *urb;
u8 *buffer;
int i;
if (!port->bulk_in_size || !port->bulk_out_size)
return -ENODEV;
portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
if (!portdata)
return -ENOMEM;
init_usb_anchor(&portdata->delayed);
for (i = 0; i < N_IN_URB; i++) {
buffer = (u8 *)__get_free_page(GFP_KERNEL);
if (!buffer)
goto bail_out_error;
portdata->in_buffer[i] = buffer;
urb = usb_wwan_setup_urb(port, port->bulk_in_endpointAddress,
USB_DIR_IN, port,
buffer, IN_BUFLEN,
usb_wwan_indat_callback);
portdata->in_urbs[i] = urb;
}
for (i = 0; i < N_OUT_URB; i++) {
buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL);
if (!buffer)
goto bail_out_error2;
portdata->out_buffer[i] = buffer;
urb = usb_wwan_setup_urb(port, port->bulk_out_endpointAddress,
USB_DIR_OUT, port,
buffer, OUT_BUFLEN,
usb_wwan_outdat_callback);
portdata->out_urbs[i] = urb;
}
usb_set_serial_port_data(port, portdata);
return 0;
bail_out_error2:
for (i = 0; i < N_OUT_URB; i++) {
usb_free_urb(portdata->out_urbs[i]);
kfree(portdata->out_buffer[i]);
}
bail_out_error:
for (i = 0; i < N_IN_URB; i++) {
usb_free_urb(portdata->in_urbs[i]);
free_page((unsigned long)portdata->in_buffer[i]);
}
kfree(portdata);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(usb_wwan_port_probe);
void usb_wwan_port_remove(struct usb_serial_port *port)
{
int i;
struct usb_wwan_port_private *portdata;
portdata = usb_get_serial_port_data(port);
usb_set_serial_port_data(port, NULL);
for (i = 0; i < N_IN_URB; i++) {
usb_free_urb(portdata->in_urbs[i]);
free_page((unsigned long)portdata->in_buffer[i]);
}
for (i = 0; i < N_OUT_URB; i++) {
usb_free_urb(portdata->out_urbs[i]);
kfree(portdata->out_buffer[i]);
}
kfree(portdata);
}
EXPORT_SYMBOL(usb_wwan_port_remove);
#ifdef CONFIG_PM
static void stop_urbs(struct usb_serial *serial)
{
int i, j;
struct usb_serial_port *port;
struct usb_wwan_port_private *portdata;
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
portdata = usb_get_serial_port_data(port);
if (!portdata)
continue;
for (j = 0; j < N_IN_URB; j++)
usb_kill_urb(portdata->in_urbs[j]);
for (j = 0; j < N_OUT_URB; j++)
usb_kill_urb(portdata->out_urbs[j]);
usb_kill_urb(port->interrupt_in_urb);
}
}
int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message)
{
struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial);
spin_lock_irq(&intfdata->susp_lock);
if (PMSG_IS_AUTO(message)) {
if (intfdata->in_flight) {
spin_unlock_irq(&intfdata->susp_lock);
return -EBUSY;
}
}
intfdata->suspended = 1;
spin_unlock_irq(&intfdata->susp_lock);
stop_urbs(serial);
return 0;
}
EXPORT_SYMBOL(usb_wwan_suspend);
/* Caller must hold susp_lock. */
static int usb_wwan_submit_delayed_urbs(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct usb_wwan_intf_private *data = usb_get_serial_data(serial);
struct usb_wwan_port_private *portdata;
struct urb *urb;
int err_count = 0;
int err;
portdata = usb_get_serial_port_data(port);
for (;;) {
urb = usb_get_from_anchor(&portdata->delayed);
if (!urb)
break;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err) {
dev_err(&port->dev, "%s: submit urb failed: %d\n",
__func__, err);
err_count++;
unbusy_queued_urb(urb, portdata);
usb_autopm_put_interface_async(serial->interface);
continue;
}
data->in_flight++;
}
if (err_count)
return -EIO;
return 0;
}
int usb_wwan_resume(struct usb_serial *serial)
{
int i, j;
struct usb_serial_port *port;
struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial);
struct usb_wwan_port_private *portdata;
struct urb *urb;
int err;
int err_count = 0;
spin_lock_irq(&intfdata->susp_lock);
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
if (!tty_port_initialized(&port->port))
continue;
portdata = usb_get_serial_port_data(port);
if (port->interrupt_in_urb) {
err = usb_submit_urb(port->interrupt_in_urb,
GFP_ATOMIC);
if (err) {
dev_err(&port->dev,
"%s: submit int urb failed: %d\n",
__func__, err);
err_count++;
}
}
err = usb_wwan_submit_delayed_urbs(port);
if (err)
err_count++;
for (j = 0; j < N_IN_URB; j++) {
urb = portdata->in_urbs[j];
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
dev_err(&port->dev,
"%s: submit read urb %d failed: %d\n",
__func__, i, err);
err_count++;
}
}
}
intfdata->suspended = 0;
spin_unlock_irq(&intfdata->susp_lock);
if (err_count)
return -EIO;
return 0;
}
EXPORT_SYMBOL(usb_wwan_resume);
#endif
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/usb_wwan.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* REINER SCT cyberJack pinpad/e-com USB Chipcard Reader Driver
*
* Copyright (C) 2001 REINER SCT
* Author: Matthias Bruestle
*
* Contact: [email protected] (see MAINTAINERS)
*
* This program is largely derived from work by the linux-usb group
* and associated source files. Please see the usb/serial files for
* individual credits and copyrights.
*
* Thanks to Greg Kroah-Hartman ([email protected]) for his help and
* patience.
*
* In case of problems, please write to the contact e-mail address
* mentioned above.
*
* Please note that later models of the cyberjack reader family are
* supported by a libusb-based userspace device driver.
*
* Homepage: http://www.reiner-sct.de/support/treiber_cyberjack.php#linux
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#define CYBERJACK_LOCAL_BUF_SIZE 32
#define DRIVER_AUTHOR "Matthias Bruestle"
#define DRIVER_DESC "REINER SCT cyberJack pinpad/e-com USB Chipcard Reader Driver"
#define CYBERJACK_VENDOR_ID 0x0C4B
#define CYBERJACK_PRODUCT_ID 0x0100
/* Function prototypes */
static int cyberjack_port_probe(struct usb_serial_port *port);
static void cyberjack_port_remove(struct usb_serial_port *port);
static int cyberjack_open(struct tty_struct *tty,
struct usb_serial_port *port);
static void cyberjack_close(struct usb_serial_port *port);
static int cyberjack_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count);
static unsigned int cyberjack_write_room(struct tty_struct *tty);
static void cyberjack_read_int_callback(struct urb *urb);
static void cyberjack_read_bulk_callback(struct urb *urb);
static void cyberjack_write_bulk_callback(struct urb *urb);
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(CYBERJACK_VENDOR_ID, CYBERJACK_PRODUCT_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver cyberjack_device = {
.driver = {
.owner = THIS_MODULE,
.name = "cyberjack",
},
.description = "Reiner SCT Cyberjack USB card reader",
.id_table = id_table,
.num_ports = 1,
.num_bulk_out = 1,
.port_probe = cyberjack_port_probe,
.port_remove = cyberjack_port_remove,
.open = cyberjack_open,
.close = cyberjack_close,
.write = cyberjack_write,
.write_room = cyberjack_write_room,
.read_int_callback = cyberjack_read_int_callback,
.read_bulk_callback = cyberjack_read_bulk_callback,
.write_bulk_callback = cyberjack_write_bulk_callback,
};
static struct usb_serial_driver * const serial_drivers[] = {
&cyberjack_device, NULL
};
struct cyberjack_private {
spinlock_t lock; /* Lock for SMP */
short rdtodo; /* Bytes still to read */
unsigned char wrbuf[5*64]; /* Buffer for collecting data to write */
short wrfilled; /* Overall data size we already got */
short wrsent; /* Data already sent */
};
static int cyberjack_port_probe(struct usb_serial_port *port)
{
struct cyberjack_private *priv;
int result;
priv = kmalloc(sizeof(struct cyberjack_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
spin_lock_init(&priv->lock);
priv->rdtodo = 0;
priv->wrfilled = 0;
priv->wrsent = 0;
usb_set_serial_port_data(port, priv);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result)
dev_err(&port->dev, "usb_submit_urb(read int) failed\n");
return 0;
}
static void cyberjack_port_remove(struct usb_serial_port *port)
{
struct cyberjack_private *priv;
usb_kill_urb(port->interrupt_in_urb);
priv = usb_get_serial_port_data(port);
kfree(priv);
}
static int cyberjack_open(struct tty_struct *tty,
struct usb_serial_port *port)
{
struct cyberjack_private *priv;
unsigned long flags;
dev_dbg(&port->dev, "%s - usb_clear_halt\n", __func__);
usb_clear_halt(port->serial->dev, port->write_urb->pipe);
priv = usb_get_serial_port_data(port);
spin_lock_irqsave(&priv->lock, flags);
priv->rdtodo = 0;
priv->wrfilled = 0;
priv->wrsent = 0;
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static void cyberjack_close(struct usb_serial_port *port)
{
usb_kill_urb(port->write_urb);
usb_kill_urb(port->read_urb);
}
static int cyberjack_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count)
{
struct device *dev = &port->dev;
struct cyberjack_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
int result;
int wrexpected;
if (count == 0) {
dev_dbg(dev, "%s - write request of 0 bytes\n", __func__);
return 0;
}
if (!test_and_clear_bit(0, &port->write_urbs_free)) {
dev_dbg(dev, "%s - already writing\n", __func__);
return 0;
}
spin_lock_irqsave(&priv->lock, flags);
if (count+priv->wrfilled > sizeof(priv->wrbuf)) {
/* To much data for buffer. Reset buffer. */
priv->wrfilled = 0;
spin_unlock_irqrestore(&priv->lock, flags);
set_bit(0, &port->write_urbs_free);
return 0;
}
/* Copy data */
memcpy(priv->wrbuf + priv->wrfilled, buf, count);
usb_serial_debug_data(dev, __func__, count, priv->wrbuf + priv->wrfilled);
priv->wrfilled += count;
if (priv->wrfilled >= 3) {
wrexpected = ((int)priv->wrbuf[2]<<8)+priv->wrbuf[1]+3;
dev_dbg(dev, "%s - expected data: %d\n", __func__, wrexpected);
} else
wrexpected = sizeof(priv->wrbuf);
if (priv->wrfilled >= wrexpected) {
/* We have enough data to begin transmission */
int length;
dev_dbg(dev, "%s - transmitting data (frame 1)\n", __func__);
length = (wrexpected > port->bulk_out_size) ?
port->bulk_out_size : wrexpected;
memcpy(port->write_urb->transfer_buffer, priv->wrbuf, length);
priv->wrsent = length;
/* set up our urb */
port->write_urb->transfer_buffer_length = length;
/* send the data out the bulk port */
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (result) {
dev_err(&port->dev,
"%s - failed submitting write urb, error %d\n",
__func__, result);
/* Throw away data. No better idea what to do with it. */
priv->wrfilled = 0;
priv->wrsent = 0;
spin_unlock_irqrestore(&priv->lock, flags);
set_bit(0, &port->write_urbs_free);
return 0;
}
dev_dbg(dev, "%s - priv->wrsent=%d\n", __func__, priv->wrsent);
dev_dbg(dev, "%s - priv->wrfilled=%d\n", __func__, priv->wrfilled);
if (priv->wrsent >= priv->wrfilled) {
dev_dbg(dev, "%s - buffer cleaned\n", __func__);
memset(priv->wrbuf, 0, sizeof(priv->wrbuf));
priv->wrfilled = 0;
priv->wrsent = 0;
}
}
spin_unlock_irqrestore(&priv->lock, flags);
return count;
}
static unsigned int cyberjack_write_room(struct tty_struct *tty)
{
/* FIXME: .... */
return CYBERJACK_LOCAL_BUF_SIZE;
}
static void cyberjack_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct cyberjack_private *priv = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
unsigned long flags;
int result;
/* the urb might have been killed. */
if (status)
return;
usb_serial_debug_data(dev, __func__, urb->actual_length, data);
/* React only to interrupts signaling a bulk_in transfer */
if (urb->actual_length == 4 && data[0] == 0x01) {
short old_rdtodo;
/* This is a announcement of coming bulk_ins. */
unsigned short size = ((unsigned short)data[3]<<8)+data[2]+3;
spin_lock_irqsave(&priv->lock, flags);
old_rdtodo = priv->rdtodo;
if (old_rdtodo > SHRT_MAX - size) {
dev_dbg(dev, "Too many bulk_in urbs to do.\n");
spin_unlock_irqrestore(&priv->lock, flags);
goto resubmit;
}
/* "+=" is probably more fault tolerant than "=" */
priv->rdtodo += size;
dev_dbg(dev, "%s - rdtodo: %d\n", __func__, priv->rdtodo);
spin_unlock_irqrestore(&priv->lock, flags);
if (!old_rdtodo) {
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (result)
dev_err(dev, "%s - failed resubmitting read urb, error %d\n",
__func__, result);
dev_dbg(dev, "%s - usb_submit_urb(read urb)\n", __func__);
}
}
resubmit:
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
if (result)
dev_err(&port->dev, "usb_submit_urb(read int) failed\n");
dev_dbg(dev, "%s - usb_submit_urb(int urb)\n", __func__);
}
static void cyberjack_read_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct cyberjack_private *priv = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
unsigned char *data = urb->transfer_buffer;
unsigned long flags;
short todo;
int result;
int status = urb->status;
usb_serial_debug_data(dev, __func__, urb->actual_length, data);
if (status) {
dev_dbg(dev, "%s - nonzero read bulk status received: %d\n",
__func__, status);
return;
}
if (urb->actual_length) {
tty_insert_flip_string(&port->port, data, urb->actual_length);
tty_flip_buffer_push(&port->port);
}
spin_lock_irqsave(&priv->lock, flags);
/* Reduce urbs to do by one. */
priv->rdtodo -= urb->actual_length;
/* Just to be sure */
if (priv->rdtodo < 0)
priv->rdtodo = 0;
todo = priv->rdtodo;
spin_unlock_irqrestore(&priv->lock, flags);
dev_dbg(dev, "%s - rdtodo: %d\n", __func__, todo);
/* Continue to read if we have still urbs to do. */
if (todo /* || (urb->actual_length==port->bulk_in_endpointAddress)*/) {
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (result)
dev_err(dev, "%s - failed resubmitting read urb, error %d\n",
__func__, result);
dev_dbg(dev, "%s - usb_submit_urb(read urb)\n", __func__);
}
}
static void cyberjack_write_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct cyberjack_private *priv = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
int status = urb->status;
unsigned long flags;
bool resubmitted = false;
if (status) {
dev_dbg(dev, "%s - nonzero write bulk status received: %d\n",
__func__, status);
set_bit(0, &port->write_urbs_free);
return;
}
spin_lock_irqsave(&priv->lock, flags);
/* only do something if we have more data to send */
if (priv->wrfilled) {
int length, blksize, result;
dev_dbg(dev, "%s - transmitting data (frame n)\n", __func__);
length = ((priv->wrfilled - priv->wrsent) > port->bulk_out_size) ?
port->bulk_out_size : (priv->wrfilled - priv->wrsent);
memcpy(port->write_urb->transfer_buffer,
priv->wrbuf + priv->wrsent, length);
priv->wrsent += length;
/* set up our urb */
port->write_urb->transfer_buffer_length = length;
/* send the data out the bulk port */
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (result) {
dev_err(dev, "%s - failed submitting write urb, error %d\n",
__func__, result);
/* Throw away data. No better idea what to do with it. */
priv->wrfilled = 0;
priv->wrsent = 0;
goto exit;
}
resubmitted = true;
dev_dbg(dev, "%s - priv->wrsent=%d\n", __func__, priv->wrsent);
dev_dbg(dev, "%s - priv->wrfilled=%d\n", __func__, priv->wrfilled);
blksize = ((int)priv->wrbuf[2]<<8)+priv->wrbuf[1]+3;
if (priv->wrsent >= priv->wrfilled ||
priv->wrsent >= blksize) {
dev_dbg(dev, "%s - buffer cleaned\n", __func__);
memset(priv->wrbuf, 0, sizeof(priv->wrbuf));
priv->wrfilled = 0;
priv->wrsent = 0;
}
}
exit:
spin_unlock_irqrestore(&priv->lock, flags);
if (!resubmitted)
set_bit(0, &port->write_urbs_free);
usb_serial_port_softint(port);
}
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/serial/cyberjack.c |
// SPDX-License-Identifier: GPL-2.0
/*
* usb-serial driver for Quatech USB 2 devices
*
* Copyright (C) 2012 Bill Pemberton ([email protected])
*
* These devices all have only 1 bulk in and 1 bulk out that is shared
* for all serial ports.
*
*/
#include <asm/unaligned.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/serial.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/serial_reg.h>
#include <linux/uaccess.h>
/* default urb timeout for usb operations */
#define QT2_USB_TIMEOUT USB_CTRL_SET_TIMEOUT
#define QT_OPEN_CLOSE_CHANNEL 0xca
#define QT_SET_GET_DEVICE 0xc2
#define QT_SET_GET_REGISTER 0xc0
#define QT_GET_SET_PREBUF_TRIG_LVL 0xcc
#define QT_SET_ATF 0xcd
#define QT_TRANSFER_IN 0xc0
#define QT_HW_FLOW_CONTROL_MASK 0xc5
#define QT_SW_FLOW_CONTROL_MASK 0xc6
#define QT2_BREAK_CONTROL 0xc8
#define QT2_GET_SET_UART 0xc1
#define QT2_FLUSH_DEVICE 0xc4
#define QT2_GET_SET_QMCR 0xe1
#define QT2_QMCR_RS232 0x40
#define QT2_QMCR_RS422 0x10
#define SERIAL_CRTSCTS ((UART_MCR_RTS << 8) | UART_MSR_CTS)
#define SERIAL_EVEN_PARITY (UART_LCR_PARITY | UART_LCR_EPAR)
/* status bytes for the device */
#define QT2_CONTROL_BYTE 0x1b
#define QT2_LINE_STATUS 0x00 /* following 1 byte is line status */
#define QT2_MODEM_STATUS 0x01 /* following 1 byte is modem status */
#define QT2_XMIT_HOLD 0x02 /* following 2 bytes are ?? */
#define QT2_CHANGE_PORT 0x03 /* following 1 byte is port to change to */
#define QT2_REC_FLUSH 0x04 /* no following info */
#define QT2_XMIT_FLUSH 0x05 /* no following info */
#define QT2_CONTROL_ESCAPE 0xff /* pass through previous 2 control bytes */
#define MAX_BAUD_RATE 921600
#define DEFAULT_BAUD_RATE 9600
#define QT2_READ_BUFFER_SIZE 512 /* size of read buffer */
#define QT2_WRITE_BUFFER_SIZE 512 /* size of write buffer */
#define QT2_WRITE_CONTROL_SIZE 5 /* control bytes used for a write */
#define DRIVER_DESC "Quatech 2nd gen USB to Serial Driver"
#define USB_VENDOR_ID_QUATECH 0x061d
#define QUATECH_SSU2_100 0xC120 /* RS232 single port */
#define QUATECH_DSU2_100 0xC140 /* RS232 dual port */
#define QUATECH_DSU2_400 0xC150 /* RS232/422/485 dual port */
#define QUATECH_QSU2_100 0xC160 /* RS232 four port */
#define QUATECH_QSU2_400 0xC170 /* RS232/422/485 four port */
#define QUATECH_ESU2_100 0xC1A0 /* RS232 eight port */
#define QUATECH_ESU2_400 0xC180 /* RS232/422/485 eight port */
struct qt2_device_detail {
int product_id;
int num_ports;
};
#define QT_DETAILS(prod, ports) \
.product_id = (prod), \
.num_ports = (ports)
static const struct qt2_device_detail qt2_device_details[] = {
{QT_DETAILS(QUATECH_SSU2_100, 1)},
{QT_DETAILS(QUATECH_DSU2_400, 2)},
{QT_DETAILS(QUATECH_DSU2_100, 2)},
{QT_DETAILS(QUATECH_QSU2_400, 4)},
{QT_DETAILS(QUATECH_QSU2_100, 4)},
{QT_DETAILS(QUATECH_ESU2_400, 8)},
{QT_DETAILS(QUATECH_ESU2_100, 8)},
{QT_DETAILS(0, 0)} /* Terminating entry */
};
static const struct usb_device_id id_table[] = {
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_SSU2_100)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_DSU2_100)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_DSU2_400)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_QSU2_100)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_QSU2_400)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_ESU2_100)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_ESU2_400)},
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
struct qt2_serial_private {
unsigned char current_port; /* current port for incoming data */
struct urb *read_urb; /* shared among all ports */
char *read_buffer;
};
struct qt2_port_private {
u8 device_port;
spinlock_t urb_lock;
bool urb_in_use;
struct urb *write_urb;
char *write_buffer;
spinlock_t lock;
u8 shadowLSR;
u8 shadowMSR;
struct usb_serial_port *port;
};
static void qt2_update_lsr(struct usb_serial_port *port, unsigned char *ch);
static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch);
static void qt2_write_bulk_callback(struct urb *urb);
static void qt2_read_bulk_callback(struct urb *urb);
static void qt2_release(struct usb_serial *serial)
{
struct qt2_serial_private *serial_priv;
serial_priv = usb_get_serial_data(serial);
usb_kill_urb(serial_priv->read_urb);
usb_free_urb(serial_priv->read_urb);
kfree(serial_priv->read_buffer);
kfree(serial_priv);
}
static inline int calc_baud_divisor(int baudrate)
{
int divisor, rem;
divisor = MAX_BAUD_RATE / baudrate;
rem = MAX_BAUD_RATE % baudrate;
/* Round to nearest divisor */
if (((rem * 2) >= baudrate) && (baudrate != 110))
divisor++;
return divisor;
}
static inline int qt2_set_port_config(struct usb_device *dev,
unsigned char port_number,
u16 baudrate, u16 lcr)
{
int divisor = calc_baud_divisor(baudrate);
u16 index = ((u16) (lcr << 8) | (u16) (port_number));
return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
QT2_GET_SET_UART, 0x40,
divisor, index, NULL, 0, QT2_USB_TIMEOUT);
}
static inline int qt2_control_msg(struct usb_device *dev,
u8 request, u16 data, u16 index)
{
return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
request, 0x40, data, index,
NULL, 0, QT2_USB_TIMEOUT);
}
static inline int qt2_getregister(struct usb_device *dev,
u8 uart,
u8 reg,
u8 *data)
{
int ret;
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
QT_SET_GET_REGISTER, 0xc0, reg,
uart, data, sizeof(*data), QT2_USB_TIMEOUT);
if (ret < (int)sizeof(*data)) {
if (ret >= 0)
ret = -EIO;
}
return ret;
}
static inline int qt2_setregister(struct usb_device *dev,
u8 uart, u8 reg, u16 data)
{
u16 value = (data << 8) | reg;
return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
QT_SET_GET_REGISTER, 0x40, value, uart,
NULL, 0, QT2_USB_TIMEOUT);
}
static inline int update_mctrl(struct qt2_port_private *port_priv,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = port_priv->port;
struct usb_device *dev = port->serial->dev;
unsigned urb_value;
int status;
if (((set | clear) & (TIOCM_DTR | TIOCM_RTS)) == 0) {
dev_dbg(&port->dev,
"update_mctrl - DTR|RTS not being set|cleared\n");
return 0; /* no change */
}
clear &= ~set; /* 'set' takes precedence over 'clear' */
urb_value = 0;
if (set & TIOCM_DTR)
urb_value |= UART_MCR_DTR;
if (set & TIOCM_RTS)
urb_value |= UART_MCR_RTS;
status = qt2_setregister(dev, port_priv->device_port, UART_MCR,
urb_value);
if (status < 0)
dev_err(&port->dev,
"update_mctrl - Error from MODEM_CTRL urb: %i\n",
status);
return status;
}
static int qt2_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds)
{
struct qt2_device_detail d;
int i;
for (i = 0; d = qt2_device_details[i], d.product_id != 0; i++) {
if (d.product_id == le16_to_cpu(serial->dev->descriptor.idProduct))
return d.num_ports;
}
/* we didn't recognize the device */
dev_err(&serial->dev->dev,
"don't know the number of ports, assuming 1\n");
return 1;
}
static void qt2_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct usb_device *dev = port->serial->dev;
struct qt2_port_private *port_priv;
struct ktermios *termios = &tty->termios;
u16 baud;
unsigned int cflag = termios->c_cflag;
u16 new_lcr = 0;
int status;
port_priv = usb_get_serial_port_data(port);
if (cflag & PARENB) {
if (cflag & PARODD)
new_lcr |= UART_LCR_PARITY;
else
new_lcr |= SERIAL_EVEN_PARITY;
}
new_lcr |= UART_LCR_WLEN(tty_get_char_size(cflag));
baud = tty_get_baud_rate(tty);
if (!baud)
baud = 9600;
status = qt2_set_port_config(dev, port_priv->device_port, baud,
new_lcr);
if (status < 0)
dev_err(&port->dev, "%s - qt2_set_port_config failed: %i\n",
__func__, status);
if (cflag & CRTSCTS)
status = qt2_control_msg(dev, QT_HW_FLOW_CONTROL_MASK,
SERIAL_CRTSCTS,
port_priv->device_port);
else
status = qt2_control_msg(dev, QT_HW_FLOW_CONTROL_MASK,
0, port_priv->device_port);
if (status < 0)
dev_err(&port->dev, "%s - set HW flow control failed: %i\n",
__func__, status);
if (I_IXOFF(tty) || I_IXON(tty)) {
u16 x = ((u16) (START_CHAR(tty) << 8) | (u16) (STOP_CHAR(tty)));
status = qt2_control_msg(dev, QT_SW_FLOW_CONTROL_MASK,
x, port_priv->device_port);
} else
status = qt2_control_msg(dev, QT_SW_FLOW_CONTROL_MASK,
0, port_priv->device_port);
if (status < 0)
dev_err(&port->dev, "%s - set SW flow control failed: %i\n",
__func__, status);
}
static int qt2_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_serial *serial;
struct qt2_port_private *port_priv;
u8 *data;
u16 device_port;
int status;
unsigned long flags;
device_port = port->port_number;
serial = port->serial;
port_priv = usb_get_serial_port_data(port);
/* set the port to RS232 mode */
status = qt2_control_msg(serial->dev, QT2_GET_SET_QMCR,
QT2_QMCR_RS232, device_port);
if (status < 0) {
dev_err(&port->dev,
"%s failed to set RS232 mode for port %i error %i\n",
__func__, device_port, status);
return status;
}
data = kzalloc(2, GFP_KERNEL);
if (!data)
return -ENOMEM;
/* open the port */
status = usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
QT_OPEN_CLOSE_CHANNEL,
0xc0, 0,
device_port, data, 2, QT2_USB_TIMEOUT);
if (status < 2) {
dev_err(&port->dev, "%s - open port failed %i\n", __func__,
status);
if (status >= 0)
status = -EIO;
kfree(data);
return status;
}
spin_lock_irqsave(&port_priv->lock, flags);
port_priv->shadowLSR = data[0];
port_priv->shadowMSR = data[1];
spin_unlock_irqrestore(&port_priv->lock, flags);
kfree(data);
/* set to default speed and 8bit word size */
status = qt2_set_port_config(serial->dev, device_port,
DEFAULT_BAUD_RATE, UART_LCR_WLEN8);
if (status < 0) {
dev_err(&port->dev, "%s - initial setup failed (%i)\n",
__func__, device_port);
return status;
}
port_priv->device_port = (u8) device_port;
if (tty)
qt2_set_termios(tty, port, &tty->termios);
return 0;
}
static void qt2_close(struct usb_serial_port *port)
{
struct usb_serial *serial;
struct qt2_port_private *port_priv;
int i;
serial = port->serial;
port_priv = usb_get_serial_port_data(port);
usb_kill_urb(port_priv->write_urb);
/* flush the port transmit buffer */
i = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0),
QT2_FLUSH_DEVICE, 0x40, 1,
port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT);
if (i < 0)
dev_err(&port->dev, "%s - transmit buffer flush failed: %i\n",
__func__, i);
/* flush the port receive buffer */
i = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0),
QT2_FLUSH_DEVICE, 0x40, 0,
port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT);
if (i < 0)
dev_err(&port->dev, "%s - receive buffer flush failed: %i\n",
__func__, i);
/* close the port */
i = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0),
QT_OPEN_CLOSE_CHANNEL,
0x40, 0,
port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT);
if (i < 0)
dev_err(&port->dev, "%s - close port failed %i\n",
__func__, i);
}
static void qt2_disconnect(struct usb_serial *serial)
{
struct qt2_serial_private *serial_priv = usb_get_serial_data(serial);
usb_kill_urb(serial_priv->read_urb);
}
static void qt2_process_status(struct usb_serial_port *port, unsigned char *ch)
{
switch (*ch) {
case QT2_LINE_STATUS:
qt2_update_lsr(port, ch + 1);
break;
case QT2_MODEM_STATUS:
qt2_update_msr(port, ch + 1);
break;
}
}
static void qt2_process_read_urb(struct urb *urb)
{
struct usb_serial *serial;
struct qt2_serial_private *serial_priv;
struct usb_serial_port *port;
bool escapeflag;
unsigned char *ch;
int i;
unsigned char newport;
int len = urb->actual_length;
if (!len)
return;
ch = urb->transfer_buffer;
serial = urb->context;
serial_priv = usb_get_serial_data(serial);
port = serial->port[serial_priv->current_port];
for (i = 0; i < urb->actual_length; i++) {
ch = (unsigned char *)urb->transfer_buffer + i;
if ((i <= (len - 3)) &&
(*ch == QT2_CONTROL_BYTE) &&
(*(ch + 1) == QT2_CONTROL_BYTE)) {
escapeflag = false;
switch (*(ch + 2)) {
case QT2_LINE_STATUS:
case QT2_MODEM_STATUS:
if (i > (len - 4)) {
dev_warn(&port->dev,
"%s - status message too short\n",
__func__);
break;
}
qt2_process_status(port, ch + 2);
i += 3;
escapeflag = true;
break;
case QT2_XMIT_HOLD:
if (i > (len - 5)) {
dev_warn(&port->dev,
"%s - xmit_empty message too short\n",
__func__);
break;
}
/* bytes_written = (ch[1] << 4) + ch[0]; */
i += 4;
escapeflag = true;
break;
case QT2_CHANGE_PORT:
if (i > (len - 4)) {
dev_warn(&port->dev,
"%s - change_port message too short\n",
__func__);
break;
}
tty_flip_buffer_push(&port->port);
newport = *(ch + 3);
if (newport > serial->num_ports) {
dev_err(&port->dev,
"%s - port change to invalid port: %i\n",
__func__, newport);
break;
}
serial_priv->current_port = newport;
port = serial->port[serial_priv->current_port];
i += 3;
escapeflag = true;
break;
case QT2_REC_FLUSH:
case QT2_XMIT_FLUSH:
i += 2;
escapeflag = true;
break;
case QT2_CONTROL_ESCAPE:
tty_insert_flip_string(&port->port, ch, 2);
i += 2;
escapeflag = true;
break;
default:
dev_warn(&port->dev,
"%s - unsupported command %i\n",
__func__, *(ch + 2));
break;
}
if (escapeflag)
continue;
}
tty_insert_flip_char(&port->port, *ch, TTY_NORMAL);
}
tty_flip_buffer_push(&port->port);
}
static void qt2_write_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port;
struct qt2_port_private *port_priv;
unsigned long flags;
port = urb->context;
port_priv = usb_get_serial_port_data(port);
spin_lock_irqsave(&port_priv->urb_lock, flags);
port_priv->urb_in_use = false;
usb_serial_port_softint(port);
spin_unlock_irqrestore(&port_priv->urb_lock, flags);
}
static void qt2_read_bulk_callback(struct urb *urb)
{
struct usb_serial *serial = urb->context;
int status;
if (urb->status) {
dev_warn(&serial->dev->dev,
"%s - non-zero urb status: %i\n", __func__,
urb->status);
return;
}
qt2_process_read_urb(urb);
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status != 0)
dev_err(&serial->dev->dev,
"%s - resubmit read urb failed: %i\n",
__func__, status);
}
static int qt2_setup_urbs(struct usb_serial *serial)
{
struct usb_serial_port *port0;
struct qt2_serial_private *serial_priv;
int status;
port0 = serial->port[0];
serial_priv = usb_get_serial_data(serial);
serial_priv->read_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!serial_priv->read_urb)
return -ENOMEM;
usb_fill_bulk_urb(serial_priv->read_urb, serial->dev,
usb_rcvbulkpipe(serial->dev,
port0->bulk_in_endpointAddress),
serial_priv->read_buffer,
QT2_READ_BUFFER_SIZE,
qt2_read_bulk_callback, serial);
status = usb_submit_urb(serial_priv->read_urb, GFP_KERNEL);
if (status != 0) {
dev_err(&serial->dev->dev,
"%s - submit read urb failed %i\n", __func__, status);
usb_free_urb(serial_priv->read_urb);
return status;
}
return 0;
}
static int qt2_attach(struct usb_serial *serial)
{
struct qt2_serial_private *serial_priv;
int status;
/* power on unit */
status = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
0xc2, 0x40, 0x8000, 0, NULL, 0,
QT2_USB_TIMEOUT);
if (status < 0) {
dev_err(&serial->dev->dev,
"%s - failed to power on unit: %i\n", __func__, status);
return status;
}
serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
if (!serial_priv)
return -ENOMEM;
serial_priv->read_buffer = kmalloc(QT2_READ_BUFFER_SIZE, GFP_KERNEL);
if (!serial_priv->read_buffer) {
status = -ENOMEM;
goto err_buf;
}
usb_set_serial_data(serial, serial_priv);
status = qt2_setup_urbs(serial);
if (status != 0)
goto attach_failed;
return 0;
attach_failed:
kfree(serial_priv->read_buffer);
err_buf:
kfree(serial_priv);
return status;
}
static int qt2_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct qt2_port_private *port_priv;
u8 bEndpointAddress;
port_priv = kzalloc(sizeof(*port_priv), GFP_KERNEL);
if (!port_priv)
return -ENOMEM;
spin_lock_init(&port_priv->lock);
spin_lock_init(&port_priv->urb_lock);
port_priv->port = port;
port_priv->write_buffer = kmalloc(QT2_WRITE_BUFFER_SIZE, GFP_KERNEL);
if (!port_priv->write_buffer)
goto err_buf;
port_priv->write_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!port_priv->write_urb)
goto err_urb;
bEndpointAddress = serial->port[0]->bulk_out_endpointAddress;
usb_fill_bulk_urb(port_priv->write_urb, serial->dev,
usb_sndbulkpipe(serial->dev, bEndpointAddress),
port_priv->write_buffer,
QT2_WRITE_BUFFER_SIZE,
qt2_write_bulk_callback, port);
usb_set_serial_port_data(port, port_priv);
return 0;
err_urb:
kfree(port_priv->write_buffer);
err_buf:
kfree(port_priv);
return -ENOMEM;
}
static void qt2_port_remove(struct usb_serial_port *port)
{
struct qt2_port_private *port_priv;
port_priv = usb_get_serial_port_data(port);
usb_free_urb(port_priv->write_urb);
kfree(port_priv->write_buffer);
kfree(port_priv);
}
static int qt2_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_device *dev = port->serial->dev;
struct qt2_port_private *port_priv = usb_get_serial_port_data(port);
u8 *d;
int r;
d = kzalloc(2, GFP_KERNEL);
if (!d)
return -ENOMEM;
r = qt2_getregister(dev, port_priv->device_port, UART_MCR, d);
if (r < 0)
goto mget_out;
r = qt2_getregister(dev, port_priv->device_port, UART_MSR, d + 1);
if (r < 0)
goto mget_out;
r = (d[0] & UART_MCR_DTR ? TIOCM_DTR : 0) |
(d[0] & UART_MCR_RTS ? TIOCM_RTS : 0) |
(d[1] & UART_MSR_CTS ? TIOCM_CTS : 0) |
(d[1] & UART_MSR_DCD ? TIOCM_CAR : 0) |
(d[1] & UART_MSR_RI ? TIOCM_RI : 0) |
(d[1] & UART_MSR_DSR ? TIOCM_DSR : 0);
mget_out:
kfree(d);
return r;
}
static int qt2_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct qt2_port_private *port_priv;
port_priv = usb_get_serial_port_data(tty->driver_data);
return update_mctrl(port_priv, set, clear);
}
static int qt2_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct qt2_port_private *port_priv;
int status;
u16 val;
port_priv = usb_get_serial_port_data(port);
val = (break_state == -1) ? 1 : 0;
status = qt2_control_msg(port->serial->dev, QT2_BREAK_CONTROL,
val, port_priv->device_port);
if (status < 0) {
dev_warn(&port->dev,
"%s - failed to send control message: %i\n", __func__,
status);
return status;
}
return 0;
}
static void qt2_dtr_rts(struct usb_serial_port *port, int on)
{
struct usb_device *dev = port->serial->dev;
struct qt2_port_private *port_priv = usb_get_serial_port_data(port);
/* Disable flow control */
if (!on) {
if (qt2_setregister(dev, port_priv->device_port,
UART_MCR, 0) < 0)
dev_warn(&port->dev, "error from flowcontrol urb\n");
}
/* drop RTS and DTR */
if (on)
update_mctrl(port_priv, TIOCM_DTR | TIOCM_RTS, 0);
else
update_mctrl(port_priv, 0, TIOCM_DTR | TIOCM_RTS);
}
static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch)
{
struct qt2_port_private *port_priv;
u8 newMSR = (u8) *ch;
unsigned long flags;
/* May be called from qt2_process_read_urb() for an unbound port. */
port_priv = usb_get_serial_port_data(port);
if (!port_priv)
return;
spin_lock_irqsave(&port_priv->lock, flags);
port_priv->shadowMSR = newMSR;
spin_unlock_irqrestore(&port_priv->lock, flags);
if (newMSR & UART_MSR_ANY_DELTA) {
/* update input line counters */
if (newMSR & UART_MSR_DCTS)
port->icount.cts++;
if (newMSR & UART_MSR_DDSR)
port->icount.dsr++;
if (newMSR & UART_MSR_DDCD)
port->icount.dcd++;
if (newMSR & UART_MSR_TERI)
port->icount.rng++;
wake_up_interruptible(&port->port.delta_msr_wait);
}
}
static void qt2_update_lsr(struct usb_serial_port *port, unsigned char *ch)
{
struct qt2_port_private *port_priv;
struct async_icount *icount;
unsigned long flags;
u8 newLSR = (u8) *ch;
/* May be called from qt2_process_read_urb() for an unbound port. */
port_priv = usb_get_serial_port_data(port);
if (!port_priv)
return;
if (newLSR & UART_LSR_BI)
newLSR &= (u8) (UART_LSR_OE | UART_LSR_BI);
spin_lock_irqsave(&port_priv->lock, flags);
port_priv->shadowLSR = newLSR;
spin_unlock_irqrestore(&port_priv->lock, flags);
icount = &port->icount;
if (newLSR & UART_LSR_BRK_ERROR_BITS) {
if (newLSR & UART_LSR_BI)
icount->brk++;
if (newLSR & UART_LSR_OE)
icount->overrun++;
if (newLSR & UART_LSR_PE)
icount->parity++;
if (newLSR & UART_LSR_FE)
icount->frame++;
}
}
static unsigned int qt2_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct qt2_port_private *port_priv;
unsigned long flags;
unsigned int r;
port_priv = usb_get_serial_port_data(port);
spin_lock_irqsave(&port_priv->urb_lock, flags);
if (port_priv->urb_in_use)
r = 0;
else
r = QT2_WRITE_BUFFER_SIZE - QT2_WRITE_CONTROL_SIZE;
spin_unlock_irqrestore(&port_priv->urb_lock, flags);
return r;
}
static int qt2_write(struct tty_struct *tty,
struct usb_serial_port *port,
const unsigned char *buf, int count)
{
struct qt2_port_private *port_priv;
struct urb *write_urb;
unsigned char *data;
unsigned long flags;
int status;
int bytes_out = 0;
port_priv = usb_get_serial_port_data(port);
if (port_priv->write_urb == NULL) {
dev_err(&port->dev, "%s - no output urb\n", __func__);
return 0;
}
write_urb = port_priv->write_urb;
count = min(count, QT2_WRITE_BUFFER_SIZE - QT2_WRITE_CONTROL_SIZE);
data = write_urb->transfer_buffer;
spin_lock_irqsave(&port_priv->urb_lock, flags);
if (port_priv->urb_in_use) {
dev_err(&port->dev, "qt2_write - urb is in use\n");
goto write_out;
}
*data++ = QT2_CONTROL_BYTE;
*data++ = QT2_CONTROL_BYTE;
*data++ = port_priv->device_port;
put_unaligned_le16(count, data);
data += 2;
memcpy(data, buf, count);
write_urb->transfer_buffer_length = count + QT2_WRITE_CONTROL_SIZE;
status = usb_submit_urb(write_urb, GFP_ATOMIC);
if (status == 0) {
port_priv->urb_in_use = true;
bytes_out += count;
}
write_out:
spin_unlock_irqrestore(&port_priv->urb_lock, flags);
return bytes_out;
}
static struct usb_serial_driver qt2_device = {
.driver = {
.owner = THIS_MODULE,
.name = "quatech-serial",
},
.description = DRIVER_DESC,
.id_table = id_table,
.open = qt2_open,
.close = qt2_close,
.write = qt2_write,
.write_room = qt2_write_room,
.calc_num_ports = qt2_calc_num_ports,
.attach = qt2_attach,
.release = qt2_release,
.disconnect = qt2_disconnect,
.port_probe = qt2_port_probe,
.port_remove = qt2_port_remove,
.dtr_rts = qt2_dtr_rts,
.break_ctl = qt2_break_ctl,
.tiocmget = qt2_tiocmget,
.tiocmset = qt2_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
.set_termios = qt2_set_termios,
};
static struct usb_serial_driver *const serial_drivers[] = {
&qt2_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/quatech2.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB Debug cable driver
*
* Copyright (C) 2006 Greg Kroah-Hartman <[email protected]>
*/
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#define USB_DEBUG_MAX_PACKET_SIZE 8
#define USB_DEBUG_BRK_SIZE 8
static const char USB_DEBUG_BRK[USB_DEBUG_BRK_SIZE] = {
0x00,
0xff,
0x01,
0xfe,
0x00,
0xfe,
0x01,
0xff,
};
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0525, 0x127a) },
{ },
};
static const struct usb_device_id dbc_id_table[] = {
{ USB_DEVICE(0x1d6b, 0x0010) },
{ USB_DEVICE(0x1d6b, 0x0011) },
{ },
};
static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(0x0525, 0x127a) },
{ USB_DEVICE(0x1d6b, 0x0010) },
{ USB_DEVICE(0x1d6b, 0x0011) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table_combined);
/* This HW really does not support a serial break, so one will be
* emulated when ever the break state is set to true.
*/
static int usb_debug_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
int ret;
if (!break_state)
return 0;
ret = usb_serial_generic_write(tty, port, USB_DEBUG_BRK, USB_DEBUG_BRK_SIZE);
if (ret < 0)
return ret;
return 0;
}
static void usb_debug_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
if (urb->actual_length == USB_DEBUG_BRK_SIZE &&
memcmp(urb->transfer_buffer, USB_DEBUG_BRK,
USB_DEBUG_BRK_SIZE) == 0) {
usb_serial_handle_break(port);
return;
}
usb_serial_generic_process_read_urb(urb);
}
static struct usb_serial_driver debug_device = {
.driver = {
.owner = THIS_MODULE,
.name = "debug",
},
.id_table = id_table,
.num_ports = 1,
.bulk_out_size = USB_DEBUG_MAX_PACKET_SIZE,
.break_ctl = usb_debug_break_ctl,
.process_read_urb = usb_debug_process_read_urb,
};
static struct usb_serial_driver dbc_device = {
.driver = {
.owner = THIS_MODULE,
.name = "xhci_dbc",
},
.id_table = dbc_id_table,
.num_ports = 1,
.break_ctl = usb_debug_break_ctl,
.process_read_urb = usb_debug_process_read_urb,
};
static struct usb_serial_driver * const serial_drivers[] = {
&debug_device, &dbc_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table_combined);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/usb_debug.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* USB FTDI SIO driver
*
* Copyright (C) 2009 - 2013
* Johan Hovold ([email protected])
* Copyright (C) 1999 - 2001
* Greg Kroah-Hartman ([email protected])
* Bill Ryder ([email protected])
* Copyright (C) 2002
* Kuba Ober ([email protected])
*
* See Documentation/usb/usb-serial.rst for more information on using this
* driver
*
* See http://ftdi-usb-sio.sourceforge.net for up to date testing info
* and extra documentation
*
* Change entries from 2004 and earlier can be found in versions of this
* file in kernel versions prior to the 2.6.24 release.
*
*/
/* Bill Ryder - [email protected] - wrote the FTDI_SIO implementation */
/* Thanx to FTDI for so kindly providing details of the protocol required */
/* to talk to the device */
/* Thanx to gkh and the rest of the usb dev group for all code I have
assimilated :-) */
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/serial.h>
#include <linux/gpio/driver.h>
#include <linux/usb/serial.h>
#include "ftdi_sio.h"
#include "ftdi_sio_ids.h"
#define DRIVER_AUTHOR "Greg Kroah-Hartman <[email protected]>, Bill Ryder <[email protected]>, Kuba Ober <[email protected]>, Andreas Mohr, Johan Hovold <[email protected]>"
#define DRIVER_DESC "USB FTDI Serial Converters Driver"
enum ftdi_chip_type {
SIO,
FT232A,
FT232B,
FT2232C,
FT232R,
FT232H,
FT2232H,
FT4232H,
FT4232HA,
FT232HP,
FT233HP,
FT2232HP,
FT2233HP,
FT4232HP,
FT4233HP,
FTX,
};
struct ftdi_private {
enum ftdi_chip_type chip_type;
int baud_base; /* baud base clock for divisor setting */
int custom_divisor; /* custom_divisor kludge, this is for
baud_base (different from what goes to the
chip!) */
u16 last_set_data_value; /* the last data state set - needed for doing
* a break
*/
int flags; /* some ASYNC_xxxx flags are supported */
unsigned long last_dtr_rts; /* saved modem control outputs */
char prev_status; /* Used for TIOCMIWAIT */
char transmit_empty; /* If transmitter is empty or not */
u16 channel; /* channel index, or 0 for legacy types */
speed_t force_baud; /* if non-zero, force the baud rate to
this value */
int force_rtscts; /* if non-zero, force RTS-CTS to always
be enabled */
unsigned int latency; /* latency setting in use */
unsigned short max_packet_size;
struct mutex cfg_lock; /* Avoid mess by parallel calls of config ioctl() and change_speed() */
#ifdef CONFIG_GPIOLIB
struct gpio_chip gc;
struct mutex gpio_lock; /* protects GPIO state */
bool gpio_registered; /* is the gpiochip in kernel registered */
bool gpio_used; /* true if the user requested a gpio */
u8 gpio_altfunc; /* which pins are in gpio mode */
u8 gpio_output; /* pin directions cache */
u8 gpio_value; /* pin value for outputs */
#endif
};
struct ftdi_quirk {
int (*probe)(struct usb_serial *);
/* Special settings for probed ports. */
void (*port_probe)(struct ftdi_private *);
};
static int ftdi_jtag_probe(struct usb_serial *serial);
static int ftdi_NDI_device_setup(struct usb_serial *serial);
static int ftdi_stmclite_probe(struct usb_serial *serial);
static int ftdi_8u2232c_probe(struct usb_serial *serial);
static void ftdi_USB_UIRT_setup(struct ftdi_private *priv);
static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv);
static const struct ftdi_quirk ftdi_jtag_quirk = {
.probe = ftdi_jtag_probe,
};
static const struct ftdi_quirk ftdi_NDI_device_quirk = {
.probe = ftdi_NDI_device_setup,
};
static const struct ftdi_quirk ftdi_USB_UIRT_quirk = {
.port_probe = ftdi_USB_UIRT_setup,
};
static const struct ftdi_quirk ftdi_HE_TIRA1_quirk = {
.port_probe = ftdi_HE_TIRA1_setup,
};
static const struct ftdi_quirk ftdi_stmclite_quirk = {
.probe = ftdi_stmclite_probe,
};
static const struct ftdi_quirk ftdi_8u2232c_quirk = {
.probe = ftdi_8u2232c_probe,
};
/*
* The 8U232AM has the same API as the sio except for:
* - it can support MUCH higher baudrates; up to:
* o 921600 for RS232 and 2000000 for RS422/485 at 48MHz
* o 230400 at 12MHz
* so .. 8U232AM's baudrate setting codes are different
* - it has a two byte status code.
* - it returns characters every 16ms (the FTDI does it every 40ms)
*
* the bcdDevice value is used to differentiate FT232BM and FT245BM from
* the earlier FT8U232AM and FT8U232BM. For now, include all known VID/PID
* combinations in both tables.
* FIXME: perhaps bcdDevice can also identify 12MHz FT8U232AM devices,
* but I don't know if those ever went into mass production. [Ian Abbott]
*/
/*
* Device ID not listed? Test it using
* /sys/bus/usb-serial/drivers/ftdi_sio/new_id and send a patch or report.
*/
static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, FTDI_BRICK_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_BM_ATOM_NANO_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_3_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_4_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_5_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_6_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_7_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_USINT_CAT_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_USINT_WKEY_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_USINT_RS232_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ACTZWAVE_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_IRTRANS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_IPLUS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_IPLUS2_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_DMX4ALL) },
{ USB_DEVICE(FTDI_VID, FTDI_SIO_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_8U232AM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_232RL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) ,
.driver_info = (kernel_ulong_t)&ftdi_8u2232c_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_4232H_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_232H_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_FTX_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_FT2233HP_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_FT4233HP_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_FT2232HP_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_FT4232HP_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_FT233HP_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_FT232HP_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_FT4232HA_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_SNIFFER_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_BOOST_PID) },
{ USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) },
{ USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_CC_PID) },
{ USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_AGP_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
{ USB_DEVICE(FTDI_VID, FTDI_TAGSYS_LP101_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_TAGSYS_P200X_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_633_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_631_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_635_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_640_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_642_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_DSS20_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_URBAN_0_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_URBAN_1_PID) },
{ USB_DEVICE(FTDI_NF_RIC_VID, FTDI_NF_RIC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_VNHCPCUSB_D_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MTXORB_0_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MTXORB_1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MTXORB_2_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MTXORB_3_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MTXORB_4_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_R2000KU_TRUE_RNG) },
{ USB_DEVICE(FTDI_VID, FTDI_VARDAAN_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_AUTO_M3_OP_COM_V2_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0100_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0101_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0102_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0103_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0104_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0105_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0106_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0107_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0108_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0109_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010A_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010B_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010C_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010D_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010E_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010F_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0110_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0111_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0112_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0113_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0114_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0115_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0116_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0117_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0118_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0119_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011A_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011B_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011C_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011D_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011E_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011F_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0120_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0121_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0122_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0123_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0124_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0125_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0126_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0127_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0128_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0129_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012A_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012B_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012C_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012D_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012E_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012F_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0130_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0131_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0132_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0133_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0134_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0135_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0136_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0137_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0138_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0139_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013A_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013B_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013C_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013D_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013E_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013F_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0140_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0141_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0142_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0143_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0144_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0145_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0146_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0147_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0148_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0149_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014A_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014B_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014C_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014D_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014E_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014F_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0150_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0151_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0152_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0153_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0154_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0155_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0156_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0157_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0158_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0159_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015A_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015B_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015C_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015D_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015E_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015F_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0160_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0161_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0162_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0163_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0164_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0165_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0166_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0167_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0168_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0169_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016A_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016B_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016C_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016D_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016E_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016F_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0170_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0171_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0172_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0173_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0174_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0175_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0176_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0177_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0178_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0179_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017A_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017B_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017C_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017D_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017E_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017F_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0180_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0181_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0182_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0183_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0184_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0185_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0186_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0187_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0188_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0189_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018A_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018B_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018C_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018D_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018E_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018F_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0190_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0191_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0192_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0193_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0194_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0195_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0196_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0197_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0198_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0199_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019A_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019B_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019C_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019D_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019E_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019F_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A0_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A1_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A2_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A3_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A4_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A5_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A6_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A7_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A8_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A9_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AA_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AB_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AC_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AD_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AE_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AF_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B0_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B1_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B2_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B3_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B4_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B5_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B6_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B7_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B8_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B9_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BA_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BB_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BC_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BD_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BE_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BF_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C0_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C1_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C2_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C3_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C4_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C5_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C6_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C7_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C8_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C9_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CA_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CB_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CC_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CD_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CE_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CF_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D0_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D1_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D2_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D3_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D4_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D5_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D6_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D7_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D8_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D9_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DA_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DB_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DC_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DD_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DE_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DF_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E0_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E1_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E2_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E3_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E4_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E5_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E6_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E7_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E8_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E9_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01EA_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01EB_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01EC_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01ED_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01EE_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01EF_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F0_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F1_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F2_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F3_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F4_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F5_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F6_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F7_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F8_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F9_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FA_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FB_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FC_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FD_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FE_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FF_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_4701_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9300_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9301_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9302_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9303_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9304_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9305_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9306_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9307_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9308_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9309_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930A_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930B_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930C_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930D_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930E_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930F_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9310_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9311_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9312_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9313_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9314_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9315_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9316_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9317_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9318_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9319_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931A_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931B_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931C_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931D_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931E_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931F_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_USBX_707_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2101_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2102_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2103_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2104_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2106_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2201_1_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2201_2_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2202_1_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2202_2_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2203_1_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2203_2_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2401_1_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2401_2_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2401_3_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2401_4_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2402_1_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2402_2_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2402_3_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2402_4_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2403_1_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2403_2_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2403_3_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2403_4_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_1_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_2_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_3_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_4_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_5_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_6_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_7_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_8_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_1_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_2_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_3_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_4_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_5_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_6_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_7_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_8_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_1_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_2_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_3_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_4_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_5_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_6_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_7_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_8_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_1_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_2_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_3_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_4_PID) },
{ USB_DEVICE(IDTECH_VID, IDTECH_IDT1221U_PID) },
{ USB_DEVICE(OCT_VID, OCT_US101_PID) },
{ USB_DEVICE(OCT_VID, OCT_DK201_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_HE_TIRA1_PID),
.driver_info = (kernel_ulong_t)&ftdi_HE_TIRA1_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_USB_UIRT_PID),
.driver_info = (kernel_ulong_t)&ftdi_USB_UIRT_quirk },
{ USB_DEVICE(FTDI_VID, PROTEGO_SPECIAL_1) },
{ USB_DEVICE(FTDI_VID, PROTEGO_R2X0) },
{ USB_DEVICE(FTDI_VID, PROTEGO_SPECIAL_3) },
{ USB_DEVICE(FTDI_VID, PROTEGO_SPECIAL_4) },
{ USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E808_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E809_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80A_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80B_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80C_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80D_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80E_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80F_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E888_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E889_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88A_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88B_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88C_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88D_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88E_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88F_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_UO100_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_UM100_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_UR100_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_ALC8500_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PYRAMID_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1000PC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_IBS_US485_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_IBS_PICPRO_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_IBS_PCMCIA_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_IBS_PK1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_IBS_RS232MON_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_IBS_APP70_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_TIAO_UMPA_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONMX_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2WI_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX3_PID) },
/*
* ELV devices:
*/
{ USB_DEVICE(FTDI_ELV_VID, FTDI_ELV_WS300_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_WS550_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_EC3000_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_WS888_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_TWS550_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_FEM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_CLI7000_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_PPS7330_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_TFM100_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_UDF77_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_UIO88_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_UAD8_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_UDA7_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_USI2_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_T1100_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_PCD200_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_ULA200_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_CSI8_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_EM1000DL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_PCK100_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_RFP500_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_UTP8_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_WS444PC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_UMS100_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PALMSENS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_IVIUM_XSTAT_PID) },
{ USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
{ USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
{ USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
{ USB_DEVICE(FTDI_VID, LINX_FUTURE_1_PID) },
{ USB_DEVICE(FTDI_VID, LINX_FUTURE_2_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CCSICDU20_0_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CCSICDU40_1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CCSMACHX_2_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CCSLOAD_N_GO_3_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CCSICDU64_4_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CCSPRIME8_5_PID) },
{ USB_DEVICE(FTDI_VID, INSIDE_ACCESSO) },
{ USB_DEVICE(INTREPID_VID, INTREPID_VALUECAN_PID) },
{ USB_DEVICE(INTREPID_VID, INTREPID_NEOVI_PID) },
{ USB_DEVICE(FALCOM_VID, FALCOM_TWIST_PID) },
{ USB_DEVICE(FALCOM_VID, FALCOM_SAMBA_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SUUNTO_SPORTS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) },
{ USB_DEVICE(TTI_VID, TTI_QL355P_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
{ USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) },
{ USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
{ USB_DEVICE(MITSUBISHI_VID, MITSUBISHI_FXUSB_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USOPTL4_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USPTL4_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USO9ML2DR_2_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USO9ML2DR_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USOPTL4DR2_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USOPTL4DR_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_485USB9F_2W_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_485USB9F_4W_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_232USB9M_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_485USBTB_2W_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_485USBTB_4W_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_TTL5USB9M_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_TTL3USB9M_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_ZZ_PROG1_USB_PID) },
{ USB_DEVICE(FTDI_VID, EVER_ECO_PRO_CDS) },
{ USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_3_PID) },
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_0_PID) },
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_1_PID) },
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_2_PID) },
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_3_PID) },
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_4_PID) },
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
{ USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) },
{ USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
{ USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
{ USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) },
{ USB_DEVICE(XSENS_VID, XSENS_MTIUSBCONVERTER_PID) },
{ USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
{ USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MHAM_YS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MHAM_Y6_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MHAM_Y8_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MHAM_IC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MHAM_DB9_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MHAM_RS232_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MHAM_Y9_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_TERATRONIK_VCP_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_TERATRONIK_D2XX_PID) },
{ USB_DEVICE(EVOLUTION_VID, EVOLUTION_ER1_PID) },
{ USB_DEVICE(EVOLUTION_VID, EVO_HYBRID_PID) },
{ USB_DEVICE(EVOLUTION_VID, EVO_RCM4_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ARTEMIS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16C_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16HR_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16HRC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16IC_PID) },
{ USB_DEVICE(KOBIL_VID, KOBIL_CONV_B1_PID) },
{ USB_DEVICE(KOBIL_VID, KOBIL_CONV_KAAN_PID) },
{ USB_DEVICE(POSIFLEX_VID, POSIFLEX_PP7000_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_TTUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ECLO_COM_1WIRE_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_WESTREX_MODEL_777_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_WESTREX_MODEL_8900F_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_NZR_SEM_USB_PID) },
{ USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) },
{ USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) },
{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) },
{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2C2_PID) },
{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2D_PID) },
{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2VT_PID) },
{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2VR_PID) },
{ USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVT_PID) },
{ USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVR_PID) },
{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVT_PID) },
{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVR_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
{ USB_DEVICE(TESTO_VID, TESTO_1_PID) },
{ USB_DEVICE(TESTO_VID, TESTO_3_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13M_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13U_PID) },
{ USB_DEVICE(ELEKTOR_VID, ELEKTOR_FT323R_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_NDI_HUC_PID),
.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_NDI_SPECTRA_SCU_PID),
.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_NDI_FUTURE_2_PID),
.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_NDI_FUTURE_3_PID),
.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
{ USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) },
{ USB_DEVICE(FTDI_VID, RTSYSTEMS_USB_VX8_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57B_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29A_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29B_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29F_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62B_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S01_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29C_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_81B_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_82B_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5D_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K4Y_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5G_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S05_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_60_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_61_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63B_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_64_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_65_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92D_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_W5R_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_A5R_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_PW1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
{ USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
{ USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID, 1) },
{ USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID, 1) },
{ USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_PID, 1) },
{ USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_H_PID, 1) },
{ USB_DEVICE(FIC_VID, FIC_NEO1973_DEBUG_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, LMI_LM3S_DEVEL_BOARD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, LMI_LM3S_EVAL_BOARD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, LMI_LM3S_ICDI_BOARD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_TURTELIZER_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_SCU18) },
{ USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
/* Papouch devices based on FTDI chip */
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_AP485_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB422_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485_2_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_AP485_2_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB422_2_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485S_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485C_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_LEC_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB232_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_TMU_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_IRAMP_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_DRAK5_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO8x8_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO2x2_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO10x1_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO30x3_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO60x3_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO2x16_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO3x32_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_DRAK6_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_UPSUSB_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_MU_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_SIMUKEY_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_AD4USB_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_GMUX_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_GMSR_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) },
{ USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) },
{ USB_DEVICE(FTDI_VID, DIEBOLD_BCS_SE923_PID) },
{ USB_DEVICE(ATMEL_VID, STK541_PID) },
{ USB_DEVICE(DE_VID, STB_PID) },
{ USB_DEVICE(DE_VID, WHT_PID) },
{ USB_DEVICE(ADI_VID, ADI_GNICE_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(ADI_VID, ADI_GNICEPLUS_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE_AND_INTERFACE_INFO(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID,
USB_CLASS_VENDOR_SPEC,
USB_SUBCLASS_VENDOR_SPEC, 0x00) },
{ USB_DEVICE_INTERFACE_NUMBER(ACTEL_VID, MICROSEMI_ARROW_SF2PLUS_BOARD_PID, 2) },
{ USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
{ USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
{ USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) },
{ USB_DEVICE(FTDI_VID, PI_C865_PID) },
{ USB_DEVICE(FTDI_VID, PI_C857_PID) },
{ USB_DEVICE(PI_VID, PI_C866_PID) },
{ USB_DEVICE(PI_VID, PI_C663_PID) },
{ USB_DEVICE(PI_VID, PI_C725_PID) },
{ USB_DEVICE(PI_VID, PI_E517_PID) },
{ USB_DEVICE(PI_VID, PI_C863_PID) },
{ USB_DEVICE(PI_VID, PI_E861_PID) },
{ USB_DEVICE(PI_VID, PI_C867_PID) },
{ USB_DEVICE(PI_VID, PI_E609_PID) },
{ USB_DEVICE(PI_VID, PI_E709_PID) },
{ USB_DEVICE(PI_VID, PI_100F_PID) },
{ USB_DEVICE(PI_VID, PI_1011_PID) },
{ USB_DEVICE(PI_VID, PI_1012_PID) },
{ USB_DEVICE(PI_VID, PI_1013_PID) },
{ USB_DEVICE(PI_VID, PI_1014_PID) },
{ USB_DEVICE(PI_VID, PI_1015_PID) },
{ USB_DEVICE(PI_VID, PI_1016_PID) },
{ USB_DEVICE(KONDO_VID, KONDO_USB_SERIAL_PID) },
{ USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },
{ USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, TI_XDS100V2_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) },
{ USB_DEVICE(FTDI_VID, HAMEG_HO720_PID) },
{ USB_DEVICE(FTDI_VID, HAMEG_HO730_PID) },
{ USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) },
{ USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) },
{ USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) },
{ USB_DEVICE(FTDI_VID, MJSG_HD_RADIO_PID) },
{ USB_DEVICE(FTDI_VID, MJSG_XM_RADIO_PID) },
{ USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_ST_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SLITE_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH2_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) },
{ USB_DEVICE(FTDI_VID, ACCESIO_COM4SM_PID) },
{ USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_24_MASTER_WING_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_PC_WING_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_USB_DMX_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MIDI_TIMECODE_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MINI_WING_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MAXI_WING_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MEDIA_WING_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_WING_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CINTERION_MC55I_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_FHE_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
{ USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(ST_VID, ST_STMCLT_2232_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(ST_VID, ST_STMCLT_4232_PID),
.driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_RF_R106) },
{ USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
/* Crucible Devices */
{ USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
/* Cressi Devices */
{ USB_DEVICE(FTDI_VID, FTDI_CRESSI_PID) },
/* Brainboxes Devices */
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_001_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_012_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_023_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_034_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_101_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_159_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_1_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_2_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_3_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_4_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_5_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_6_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_7_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_8_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_235_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_257_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_1_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_2_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_3_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_4_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_313_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_320_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_324_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_1_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_2_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_357_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_1_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_2_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_3_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_701_1_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_701_2_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_1_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) },
/* ekey Devices */
{ USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
/* Infineon Devices */
{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC1798_PID, 1) },
{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC2X7_PID, 1) },
/* GE Healthcare devices */
{ USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
/* Active Research (Actisense) devices */
{ USB_DEVICE(FTDI_VID, ACTISENSE_NDC_PID) },
{ USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) },
{ USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) },
{ USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) },
{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) },
{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) },
{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) },
{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_NMEA2000_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ETHERNET_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_WIFI_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
/* Belimo Automation devices */
{ USB_DEVICE(FTDI_VID, BELIMO_ZTH_PID) },
{ USB_DEVICE(FTDI_VID, BELIMO_ZIP_PID) },
/* ICP DAS I-756xU devices */
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
{ USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
{ USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
{ USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
/* EZPrototypes devices */
{ USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) },
{ USB_DEVICE_INTERFACE_NUMBER(UNJO_VID, UNJO_ISODEBUG_V1_PID, 1) },
/* Sienna devices */
{ USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) },
{ USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) },
/* IDS GmbH devices */
{ USB_DEVICE(IDS_VID, IDS_SI31A_PID) },
{ USB_DEVICE(IDS_VID, IDS_CM31A_PID) },
/* Omron devices */
{ USB_DEVICE(OMRON_VID, OMRON_CS1W_CIF31_PID) },
/* U-Blox devices */
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
/* FreeCalypso USB adapters */
{ USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_BUF_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_UNBUF_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table_combined);
static const char *ftdi_chip_name[] = {
[SIO] = "SIO", /* the serial part of FT8U100AX */
[FT232A] = "FT232A",
[FT232B] = "FT232B",
[FT2232C] = "FT2232C/D",
[FT232R] = "FT232R",
[FT232H] = "FT232H",
[FT2232H] = "FT2232H",
[FT4232H] = "FT4232H",
[FT4232HA] = "FT4232HA",
[FT232HP] = "FT232HP",
[FT233HP] = "FT233HP",
[FT2232HP] = "FT2232HP",
[FT2233HP] = "FT2233HP",
[FT4232HP] = "FT4232HP",
[FT4233HP] = "FT4233HP",
[FTX] = "FT-X",
};
/* Used for TIOCMIWAIT */
#define FTDI_STATUS_B0_MASK (FTDI_RS0_CTS | FTDI_RS0_DSR | FTDI_RS0_RI | FTDI_RS0_RLSD)
#define FTDI_STATUS_B1_MASK (FTDI_RS_BI)
/* End TIOCMIWAIT */
static void ftdi_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios);
static int ftdi_get_modem_status(struct usb_serial_port *port,
unsigned char status[2]);
#define WDR_TIMEOUT 5000 /* default urb timeout */
#define WDR_SHORT_TIMEOUT 1000 /* shorter urb timeout */
/*
* ***************************************************************************
* Utility functions
* ***************************************************************************
*/
static unsigned short int ftdi_232am_baud_base_to_divisor(int baud, int base)
{
unsigned short int divisor;
/* divisor shifted 3 bits to the left */
int divisor3 = DIV_ROUND_CLOSEST(base, 2 * baud);
if ((divisor3 & 0x7) == 7)
divisor3++; /* round x.7/8 up to x+1 */
divisor = divisor3 >> 3;
divisor3 &= 0x7;
if (divisor3 == 1)
divisor |= 0xc000; /* +0.125 */
else if (divisor3 >= 4)
divisor |= 0x4000; /* +0.5 */
else if (divisor3 != 0)
divisor |= 0x8000; /* +0.25 */
else if (divisor == 1)
divisor = 0; /* special case for maximum baud rate */
return divisor;
}
static unsigned short int ftdi_232am_baud_to_divisor(int baud)
{
return ftdi_232am_baud_base_to_divisor(baud, 48000000);
}
static u32 ftdi_232bm_baud_base_to_divisor(int baud, int base)
{
static const unsigned char divfrac[8] = { 0, 3, 2, 4, 1, 5, 6, 7 };
u32 divisor;
/* divisor shifted 3 bits to the left */
int divisor3 = DIV_ROUND_CLOSEST(base, 2 * baud);
divisor = divisor3 >> 3;
divisor |= (u32)divfrac[divisor3 & 0x7] << 14;
/* Deal with special cases for highest baud rates. */
if (divisor == 1) /* 1.0 */
divisor = 0;
else if (divisor == 0x4001) /* 1.5 */
divisor = 1;
return divisor;
}
static u32 ftdi_232bm_baud_to_divisor(int baud)
{
return ftdi_232bm_baud_base_to_divisor(baud, 48000000);
}
static u32 ftdi_2232h_baud_base_to_divisor(int baud, int base)
{
static const unsigned char divfrac[8] = { 0, 3, 2, 4, 1, 5, 6, 7 };
u32 divisor;
int divisor3;
/* hi-speed baud rate is 10-bit sampling instead of 16-bit */
divisor3 = DIV_ROUND_CLOSEST(8 * base, 10 * baud);
divisor = divisor3 >> 3;
divisor |= (u32)divfrac[divisor3 & 0x7] << 14;
/* Deal with special cases for highest baud rates. */
if (divisor == 1) /* 1.0 */
divisor = 0;
else if (divisor == 0x4001) /* 1.5 */
divisor = 1;
/*
* Set this bit to turn off a divide by 2.5 on baud rate generator
* This enables baud rates up to 12Mbaud but cannot reach below 1200
* baud with this bit set
*/
divisor |= 0x00020000;
return divisor;
}
static u32 ftdi_2232h_baud_to_divisor(int baud)
{
return ftdi_2232h_baud_base_to_divisor(baud, 120000000);
}
#define set_mctrl(port, set) update_mctrl((port), (set), 0)
#define clear_mctrl(port, clear) update_mctrl((port), 0, (clear))
static int update_mctrl(struct usb_serial_port *port, unsigned int set,
unsigned int clear)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
unsigned value;
int rv;
if (((set | clear) & (TIOCM_DTR | TIOCM_RTS)) == 0) {
dev_dbg(dev, "%s - DTR|RTS not being set|cleared\n", __func__);
return 0; /* no change */
}
clear &= ~set; /* 'set' takes precedence over 'clear' */
value = 0;
if (clear & TIOCM_DTR)
value |= FTDI_SIO_SET_DTR_LOW;
if (clear & TIOCM_RTS)
value |= FTDI_SIO_SET_RTS_LOW;
if (set & TIOCM_DTR)
value |= FTDI_SIO_SET_DTR_HIGH;
if (set & TIOCM_RTS)
value |= FTDI_SIO_SET_RTS_HIGH;
rv = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
FTDI_SIO_SET_MODEM_CTRL_REQUEST,
FTDI_SIO_SET_MODEM_CTRL_REQUEST_TYPE,
value, priv->channel,
NULL, 0, WDR_TIMEOUT);
if (rv < 0) {
dev_dbg(dev, "%s Error from MODEM_CTRL urb: DTR %s, RTS %s\n",
__func__,
(set & TIOCM_DTR) ? "HIGH" : (clear & TIOCM_DTR) ? "LOW" : "unchanged",
(set & TIOCM_RTS) ? "HIGH" : (clear & TIOCM_RTS) ? "LOW" : "unchanged");
rv = usb_translate_errors(rv);
} else {
dev_dbg(dev, "%s - DTR %s, RTS %s\n", __func__,
(set & TIOCM_DTR) ? "HIGH" : (clear & TIOCM_DTR) ? "LOW" : "unchanged",
(set & TIOCM_RTS) ? "HIGH" : (clear & TIOCM_RTS) ? "LOW" : "unchanged");
/* FIXME: locking on last_dtr_rts */
priv->last_dtr_rts = (priv->last_dtr_rts & ~clear) | set;
}
return rv;
}
static u32 get_ftdi_divisor(struct tty_struct *tty,
struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
u32 div_value = 0;
int div_okay = 1;
int baud;
baud = tty_get_baud_rate(tty);
dev_dbg(dev, "%s - tty_get_baud_rate reports speed %d\n", __func__, baud);
/*
* Observe deprecated async-compatible custom_divisor hack, update
* baudrate if needed.
*/
if (baud == 38400 &&
((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) &&
(priv->custom_divisor)) {
baud = priv->baud_base / priv->custom_divisor;
dev_dbg(dev, "%s - custom divisor %d sets baud rate to %d\n",
__func__, priv->custom_divisor, baud);
}
if (!baud)
baud = 9600;
switch (priv->chip_type) {
case SIO:
switch (baud) {
case 300: div_value = ftdi_sio_b300; break;
case 600: div_value = ftdi_sio_b600; break;
case 1200: div_value = ftdi_sio_b1200; break;
case 2400: div_value = ftdi_sio_b2400; break;
case 4800: div_value = ftdi_sio_b4800; break;
case 9600: div_value = ftdi_sio_b9600; break;
case 19200: div_value = ftdi_sio_b19200; break;
case 38400: div_value = ftdi_sio_b38400; break;
case 57600: div_value = ftdi_sio_b57600; break;
case 115200: div_value = ftdi_sio_b115200; break;
default:
dev_dbg(dev, "%s - Baudrate (%d) requested is not supported\n",
__func__, baud);
div_value = ftdi_sio_b9600;
baud = 9600;
div_okay = 0;
}
break;
case FT232A:
if (baud <= 3000000) {
div_value = ftdi_232am_baud_to_divisor(baud);
} else {
dev_dbg(dev, "%s - Baud rate too high!\n", __func__);
baud = 9600;
div_value = ftdi_232am_baud_to_divisor(9600);
div_okay = 0;
}
break;
case FT232B:
case FT2232C:
case FT232R:
case FTX:
if (baud <= 3000000) {
u16 product_id = le16_to_cpu(
port->serial->dev->descriptor.idProduct);
if (((product_id == FTDI_NDI_HUC_PID) ||
(product_id == FTDI_NDI_SPECTRA_SCU_PID) ||
(product_id == FTDI_NDI_FUTURE_2_PID) ||
(product_id == FTDI_NDI_FUTURE_3_PID) ||
(product_id == FTDI_NDI_AURORA_SCU_PID)) &&
(baud == 19200)) {
baud = 1200000;
}
div_value = ftdi_232bm_baud_to_divisor(baud);
} else {
dev_dbg(dev, "%s - Baud rate too high!\n", __func__);
div_value = ftdi_232bm_baud_to_divisor(9600);
div_okay = 0;
baud = 9600;
}
break;
default:
if ((baud <= 12000000) && (baud >= 1200)) {
div_value = ftdi_2232h_baud_to_divisor(baud);
} else if (baud < 1200) {
div_value = ftdi_232bm_baud_to_divisor(baud);
} else {
dev_dbg(dev, "%s - Baud rate too high!\n", __func__);
div_value = ftdi_232bm_baud_to_divisor(9600);
div_okay = 0;
baud = 9600;
}
break;
}
if (div_okay) {
dev_dbg(dev, "%s - Baud rate set to %d (divisor 0x%lX) on chip %s\n",
__func__, baud, (unsigned long)div_value,
ftdi_chip_name[priv->chip_type]);
}
tty_encode_baud_rate(tty, baud, baud);
return div_value;
}
static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
u16 value;
u16 index;
u32 index_value;
int rv;
index_value = get_ftdi_divisor(tty, port);
value = (u16)index_value;
index = (u16)(index_value >> 16);
if (priv->channel)
index = (u16)((index << 8) | priv->channel);
rv = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
FTDI_SIO_SET_BAUDRATE_REQUEST,
FTDI_SIO_SET_BAUDRATE_REQUEST_TYPE,
value, index,
NULL, 0, WDR_SHORT_TIMEOUT);
return rv;
}
static int write_latency_timer(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_device *udev = port->serial->dev;
int rv;
int l = priv->latency;
if (priv->chip_type == SIO || priv->chip_type == FT232A)
return -EINVAL;
if (priv->flags & ASYNC_LOW_LATENCY)
l = 1;
dev_dbg(&port->dev, "%s: setting latency timer = %i\n", __func__, l);
rv = usb_control_msg(udev,
usb_sndctrlpipe(udev, 0),
FTDI_SIO_SET_LATENCY_TIMER_REQUEST,
FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE,
l, priv->channel,
NULL, 0, WDR_TIMEOUT);
if (rv < 0)
dev_err(&port->dev, "Unable to write latency timer: %i\n", rv);
return rv;
}
static int _read_latency_timer(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_device *udev = port->serial->dev;
u8 buf;
int rv;
rv = usb_control_msg_recv(udev, 0, FTDI_SIO_GET_LATENCY_TIMER_REQUEST,
FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE, 0,
priv->channel, &buf, 1, WDR_TIMEOUT,
GFP_KERNEL);
if (rv == 0)
rv = buf;
return rv;
}
static int read_latency_timer(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
int rv;
if (priv->chip_type == SIO || priv->chip_type == FT232A)
return -EINVAL;
rv = _read_latency_timer(port);
if (rv < 0) {
dev_err(&port->dev, "Unable to read latency timer: %i\n", rv);
return rv;
}
priv->latency = rv;
return 0;
}
static void get_serial_info(struct tty_struct *tty, struct serial_struct *ss)
{
struct usb_serial_port *port = tty->driver_data;
struct ftdi_private *priv = usb_get_serial_port_data(port);
ss->flags = priv->flags;
ss->baud_base = priv->baud_base;
ss->custom_divisor = priv->custom_divisor;
}
static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
{
struct usb_serial_port *port = tty->driver_data;
struct ftdi_private *priv = usb_get_serial_port_data(port);
int old_flags, old_divisor;
mutex_lock(&priv->cfg_lock);
if (!capable(CAP_SYS_ADMIN)) {
if ((ss->flags ^ priv->flags) & ~ASYNC_USR_MASK) {
mutex_unlock(&priv->cfg_lock);
return -EPERM;
}
}
old_flags = priv->flags;
old_divisor = priv->custom_divisor;
priv->flags = ss->flags & ASYNC_FLAGS;
priv->custom_divisor = ss->custom_divisor;
write_latency_timer(port);
if ((priv->flags ^ old_flags) & ASYNC_SPD_MASK ||
((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST &&
priv->custom_divisor != old_divisor)) {
/* warn about deprecation unless clearing */
if (priv->flags & ASYNC_SPD_MASK)
dev_warn_ratelimited(&port->dev, "use of SPD flags is deprecated\n");
change_speed(tty, port);
}
mutex_unlock(&priv->cfg_lock);
return 0;
}
static int get_lsr_info(struct usb_serial_port *port,
unsigned int __user *retinfo)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
unsigned int result = 0;
if (priv->transmit_empty)
result = TIOCSER_TEMT;
if (copy_to_user(retinfo, &result, sizeof(unsigned int)))
return -EFAULT;
return 0;
}
static int ftdi_determine_type(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
struct usb_device *udev = serial->dev;
unsigned int version, ifnum;
version = le16_to_cpu(udev->descriptor.bcdDevice);
ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
/* Assume Hi-Speed type */
priv->baud_base = 120000000 / 2;
priv->channel = CHANNEL_A + ifnum;
switch (version) {
case 0x200:
priv->chip_type = FT232A;
priv->baud_base = 48000000 / 2;
priv->channel = 0;
/*
* FT232B devices have a bug where bcdDevice gets set to 0x200
* when iSerialNumber is 0. Assume it is an FT232B in case the
* latency timer is readable.
*/
if (udev->descriptor.iSerialNumber == 0 &&
_read_latency_timer(port) >= 0) {
priv->chip_type = FT232B;
}
break;
case 0x400:
priv->chip_type = FT232B;
priv->baud_base = 48000000 / 2;
priv->channel = 0;
break;
case 0x500:
priv->chip_type = FT2232C;
priv->baud_base = 48000000 / 2;
break;
case 0x600:
priv->chip_type = FT232R;
priv->baud_base = 48000000 / 2;
priv->channel = 0;
break;
case 0x700:
priv->chip_type = FT2232H;
break;
case 0x800:
priv->chip_type = FT4232H;
break;
case 0x900:
priv->chip_type = FT232H;
break;
case 0x1000:
priv->chip_type = FTX;
priv->baud_base = 48000000 / 2;
break;
case 0x2800:
priv->chip_type = FT2233HP;
break;
case 0x2900:
priv->chip_type = FT4233HP;
break;
case 0x3000:
priv->chip_type = FT2232HP;
break;
case 0x3100:
priv->chip_type = FT4232HP;
break;
case 0x3200:
priv->chip_type = FT233HP;
break;
case 0x3300:
priv->chip_type = FT232HP;
break;
case 0x3600:
priv->chip_type = FT4232HA;
break;
default:
if (version < 0x200) {
priv->chip_type = SIO;
priv->baud_base = 12000000 / 16;
priv->channel = 0;
} else {
dev_err(&port->dev, "unknown device type: 0x%02x\n", version);
return -ENODEV;
}
}
dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]);
return 0;
}
/*
* Determine the maximum packet size for the device. This depends on the chip
* type and the USB host capabilities. The value should be obtained from the
* device descriptor as the chip will use the appropriate values for the host.
*/
static void ftdi_set_max_packet_size(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_interface *interface = port->serial->interface;
struct usb_endpoint_descriptor *ep_desc;
unsigned num_endpoints;
unsigned i;
num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
if (!num_endpoints)
return;
/*
* NOTE: Some customers have programmed FT232R/FT245R devices
* with an endpoint size of 0 - not good. In this case, we
* want to override the endpoint descriptor setting and use a
* value of 64 for wMaxPacketSize.
*/
for (i = 0; i < num_endpoints; i++) {
ep_desc = &interface->cur_altsetting->endpoint[i].desc;
if (!ep_desc->wMaxPacketSize) {
ep_desc->wMaxPacketSize = cpu_to_le16(0x40);
dev_warn(&port->dev, "Overriding wMaxPacketSize on endpoint %d\n",
usb_endpoint_num(ep_desc));
}
}
/* Set max packet size based on last descriptor. */
priv->max_packet_size = usb_endpoint_maxp(ep_desc);
}
/*
* ***************************************************************************
* Sysfs Attribute
* ***************************************************************************
*/
static ssize_t latency_timer_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
struct ftdi_private *priv = usb_get_serial_port_data(port);
if (priv->flags & ASYNC_LOW_LATENCY)
return sprintf(buf, "1\n");
else
return sprintf(buf, "%u\n", priv->latency);
}
/* Write a new value of the latency timer, in units of milliseconds. */
static ssize_t latency_timer_store(struct device *dev,
struct device_attribute *attr,
const char *valbuf, size_t count)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
struct ftdi_private *priv = usb_get_serial_port_data(port);
u8 v;
int rv;
if (kstrtou8(valbuf, 10, &v))
return -EINVAL;
priv->latency = v;
rv = write_latency_timer(port);
if (rv < 0)
return -EIO;
return count;
}
static DEVICE_ATTR_RW(latency_timer);
/* Write an event character directly to the FTDI register. The ASCII
value is in the low 8 bits, with the enable bit in the 9th bit. */
static ssize_t event_char_store(struct device *dev,
struct device_attribute *attr, const char *valbuf, size_t count)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_device *udev = port->serial->dev;
unsigned int v;
int rv;
if (kstrtouint(valbuf, 0, &v) || v >= 0x200)
return -EINVAL;
dev_dbg(&port->dev, "%s: setting event char = 0x%03x\n", __func__, v);
rv = usb_control_msg(udev,
usb_sndctrlpipe(udev, 0),
FTDI_SIO_SET_EVENT_CHAR_REQUEST,
FTDI_SIO_SET_EVENT_CHAR_REQUEST_TYPE,
v, priv->channel,
NULL, 0, WDR_TIMEOUT);
if (rv < 0) {
dev_dbg(&port->dev, "Unable to write event character: %i\n", rv);
return -EIO;
}
return count;
}
static DEVICE_ATTR_WO(event_char);
static struct attribute *ftdi_attrs[] = {
&dev_attr_event_char.attr,
&dev_attr_latency_timer.attr,
NULL
};
static umode_t ftdi_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{
struct device *dev = kobj_to_dev(kobj);
struct usb_serial_port *port = to_usb_serial_port(dev);
struct ftdi_private *priv = usb_get_serial_port_data(port);
enum ftdi_chip_type type = priv->chip_type;
if (attr == &dev_attr_event_char.attr) {
if (type == SIO)
return 0;
}
if (attr == &dev_attr_latency_timer.attr) {
if (type == SIO || type == FT232A)
return 0;
}
return attr->mode;
}
static const struct attribute_group ftdi_group = {
.attrs = ftdi_attrs,
.is_visible = ftdi_is_visible,
};
static const struct attribute_group *ftdi_groups[] = {
&ftdi_group,
NULL
};
#ifdef CONFIG_GPIOLIB
static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
int result;
u16 val;
result = usb_autopm_get_interface(serial->interface);
if (result)
return result;
val = (mode << 8) | (priv->gpio_output << 4) | priv->gpio_value;
result = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0),
FTDI_SIO_SET_BITMODE_REQUEST,
FTDI_SIO_SET_BITMODE_REQUEST_TYPE, val,
priv->channel, NULL, 0, WDR_TIMEOUT);
if (result < 0) {
dev_err(&serial->interface->dev,
"bitmode request failed for value 0x%04x: %d\n",
val, result);
}
usb_autopm_put_interface(serial->interface);
return result;
}
static int ftdi_set_cbus_pins(struct usb_serial_port *port)
{
return ftdi_set_bitmode(port, FTDI_SIO_BITMODE_CBUS);
}
static int ftdi_exit_cbus_mode(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
priv->gpio_output = 0;
priv->gpio_value = 0;
return ftdi_set_bitmode(port, FTDI_SIO_BITMODE_RESET);
}
static int ftdi_gpio_request(struct gpio_chip *gc, unsigned int offset)
{
struct usb_serial_port *port = gpiochip_get_data(gc);
struct ftdi_private *priv = usb_get_serial_port_data(port);
int result;
mutex_lock(&priv->gpio_lock);
if (!priv->gpio_used) {
/* Set default pin states, as we cannot get them from device */
priv->gpio_output = 0x00;
priv->gpio_value = 0x00;
result = ftdi_set_cbus_pins(port);
if (result) {
mutex_unlock(&priv->gpio_lock);
return result;
}
priv->gpio_used = true;
}
mutex_unlock(&priv->gpio_lock);
return 0;
}
static int ftdi_read_cbus_pins(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
u8 buf;
int result;
result = usb_autopm_get_interface(serial->interface);
if (result)
return result;
result = usb_control_msg_recv(serial->dev, 0,
FTDI_SIO_READ_PINS_REQUEST,
FTDI_SIO_READ_PINS_REQUEST_TYPE, 0,
priv->channel, &buf, 1, WDR_TIMEOUT,
GFP_KERNEL);
if (result == 0)
result = buf;
usb_autopm_put_interface(serial->interface);
return result;
}
static int ftdi_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct usb_serial_port *port = gpiochip_get_data(gc);
int result;
result = ftdi_read_cbus_pins(port);
if (result < 0)
return result;
return !!(result & BIT(gpio));
}
static void ftdi_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
struct usb_serial_port *port = gpiochip_get_data(gc);
struct ftdi_private *priv = usb_get_serial_port_data(port);
mutex_lock(&priv->gpio_lock);
if (value)
priv->gpio_value |= BIT(gpio);
else
priv->gpio_value &= ~BIT(gpio);
ftdi_set_cbus_pins(port);
mutex_unlock(&priv->gpio_lock);
}
static int ftdi_gpio_get_multiple(struct gpio_chip *gc, unsigned long *mask,
unsigned long *bits)
{
struct usb_serial_port *port = gpiochip_get_data(gc);
int result;
result = ftdi_read_cbus_pins(port);
if (result < 0)
return result;
*bits = result & *mask;
return 0;
}
static void ftdi_gpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
unsigned long *bits)
{
struct usb_serial_port *port = gpiochip_get_data(gc);
struct ftdi_private *priv = usb_get_serial_port_data(port);
mutex_lock(&priv->gpio_lock);
priv->gpio_value &= ~(*mask);
priv->gpio_value |= *bits & *mask;
ftdi_set_cbus_pins(port);
mutex_unlock(&priv->gpio_lock);
}
static int ftdi_gpio_direction_get(struct gpio_chip *gc, unsigned int gpio)
{
struct usb_serial_port *port = gpiochip_get_data(gc);
struct ftdi_private *priv = usb_get_serial_port_data(port);
return !(priv->gpio_output & BIT(gpio));
}
static int ftdi_gpio_direction_input(struct gpio_chip *gc, unsigned int gpio)
{
struct usb_serial_port *port = gpiochip_get_data(gc);
struct ftdi_private *priv = usb_get_serial_port_data(port);
int result;
mutex_lock(&priv->gpio_lock);
priv->gpio_output &= ~BIT(gpio);
result = ftdi_set_cbus_pins(port);
mutex_unlock(&priv->gpio_lock);
return result;
}
static int ftdi_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio,
int value)
{
struct usb_serial_port *port = gpiochip_get_data(gc);
struct ftdi_private *priv = usb_get_serial_port_data(port);
int result;
mutex_lock(&priv->gpio_lock);
priv->gpio_output |= BIT(gpio);
if (value)
priv->gpio_value |= BIT(gpio);
else
priv->gpio_value &= ~BIT(gpio);
result = ftdi_set_cbus_pins(port);
mutex_unlock(&priv->gpio_lock);
return result;
}
static int ftdi_gpio_init_valid_mask(struct gpio_chip *gc,
unsigned long *valid_mask,
unsigned int ngpios)
{
struct usb_serial_port *port = gpiochip_get_data(gc);
struct ftdi_private *priv = usb_get_serial_port_data(port);
unsigned long map = priv->gpio_altfunc;
bitmap_complement(valid_mask, &map, ngpios);
if (bitmap_empty(valid_mask, ngpios))
dev_dbg(&port->dev, "no CBUS pin configured for GPIO\n");
else
dev_dbg(&port->dev, "CBUS%*pbl configured for GPIO\n", ngpios,
valid_mask);
return 0;
}
static int ftdi_read_eeprom(struct usb_serial *serial, void *dst, u16 addr,
u16 nbytes)
{
int read = 0;
if (addr % 2 != 0)
return -EINVAL;
if (nbytes % 2 != 0)
return -EINVAL;
/* Read EEPROM two bytes at a time */
while (read < nbytes) {
int rv;
rv = usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
FTDI_SIO_READ_EEPROM_REQUEST,
FTDI_SIO_READ_EEPROM_REQUEST_TYPE,
0, (addr + read) / 2, dst + read, 2,
WDR_TIMEOUT);
if (rv < 2) {
if (rv >= 0)
return -EIO;
else
return rv;
}
read += rv;
}
return 0;
}
static int ftdi_gpio_init_ft232h(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
u16 cbus_config;
u8 *buf;
int ret;
int i;
buf = kmalloc(4, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = ftdi_read_eeprom(port->serial, buf, 0x1a, 4);
if (ret < 0)
goto out_free;
/*
* FT232H CBUS Memory Map
*
* 0x1a: X- (upper nibble -> AC5)
* 0x1b: -X (lower nibble -> AC6)
* 0x1c: XX (upper nibble -> AC9 | lower nibble -> AC8)
*/
cbus_config = buf[2] << 8 | (buf[1] & 0xf) << 4 | (buf[0] & 0xf0) >> 4;
priv->gc.ngpio = 4;
priv->gpio_altfunc = 0xff;
for (i = 0; i < priv->gc.ngpio; ++i) {
if ((cbus_config & 0xf) == FTDI_FTX_CBUS_MUX_GPIO)
priv->gpio_altfunc &= ~BIT(i);
cbus_config >>= 4;
}
out_free:
kfree(buf);
return ret;
}
static int ftdi_gpio_init_ft232r(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
u16 cbus_config;
u8 *buf;
int ret;
int i;
buf = kmalloc(2, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = ftdi_read_eeprom(port->serial, buf, 0x14, 2);
if (ret < 0)
goto out_free;
cbus_config = le16_to_cpup((__le16 *)buf);
dev_dbg(&port->dev, "cbus_config = 0x%04x\n", cbus_config);
priv->gc.ngpio = 4;
priv->gpio_altfunc = 0xff;
for (i = 0; i < priv->gc.ngpio; ++i) {
if ((cbus_config & 0xf) == FTDI_FT232R_CBUS_MUX_GPIO)
priv->gpio_altfunc &= ~BIT(i);
cbus_config >>= 4;
}
out_free:
kfree(buf);
return ret;
}
static int ftdi_gpio_init_ftx(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
const u16 cbus_cfg_addr = 0x1a;
const u16 cbus_cfg_size = 4;
u8 *cbus_cfg_buf;
int result;
u8 i;
cbus_cfg_buf = kmalloc(cbus_cfg_size, GFP_KERNEL);
if (!cbus_cfg_buf)
return -ENOMEM;
result = ftdi_read_eeprom(serial, cbus_cfg_buf,
cbus_cfg_addr, cbus_cfg_size);
if (result < 0)
goto out_free;
/* FIXME: FT234XD alone has 1 GPIO, but how to recognize this IC? */
priv->gc.ngpio = 4;
/* Determine which pins are configured for CBUS bitbanging */
priv->gpio_altfunc = 0xff;
for (i = 0; i < priv->gc.ngpio; ++i) {
if (cbus_cfg_buf[i] == FTDI_FTX_CBUS_MUX_GPIO)
priv->gpio_altfunc &= ~BIT(i);
}
out_free:
kfree(cbus_cfg_buf);
return result;
}
static int ftdi_gpio_init(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
int result;
switch (priv->chip_type) {
case FT232H:
result = ftdi_gpio_init_ft232h(port);
break;
case FT232R:
result = ftdi_gpio_init_ft232r(port);
break;
case FTX:
result = ftdi_gpio_init_ftx(port);
break;
default:
return 0;
}
if (result < 0)
return result;
mutex_init(&priv->gpio_lock);
priv->gc.label = "ftdi-cbus";
priv->gc.request = ftdi_gpio_request;
priv->gc.get_direction = ftdi_gpio_direction_get;
priv->gc.direction_input = ftdi_gpio_direction_input;
priv->gc.direction_output = ftdi_gpio_direction_output;
priv->gc.init_valid_mask = ftdi_gpio_init_valid_mask;
priv->gc.get = ftdi_gpio_get;
priv->gc.set = ftdi_gpio_set;
priv->gc.get_multiple = ftdi_gpio_get_multiple;
priv->gc.set_multiple = ftdi_gpio_set_multiple;
priv->gc.owner = THIS_MODULE;
priv->gc.parent = &serial->interface->dev;
priv->gc.base = -1;
priv->gc.can_sleep = true;
result = gpiochip_add_data(&priv->gc, port);
if (!result)
priv->gpio_registered = true;
return result;
}
static void ftdi_gpio_remove(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
if (priv->gpio_registered) {
gpiochip_remove(&priv->gc);
priv->gpio_registered = false;
}
if (priv->gpio_used) {
/* Exiting CBUS-mode does not reset pin states. */
ftdi_exit_cbus_mode(port);
priv->gpio_used = false;
}
}
#else
static int ftdi_gpio_init(struct usb_serial_port *port)
{
return 0;
}
static void ftdi_gpio_remove(struct usb_serial_port *port) { }
#endif /* CONFIG_GPIOLIB */
/*
* ***************************************************************************
* FTDI driver specific functions
* ***************************************************************************
*/
static int ftdi_probe(struct usb_serial *serial, const struct usb_device_id *id)
{
const struct ftdi_quirk *quirk = (struct ftdi_quirk *)id->driver_info;
if (quirk && quirk->probe) {
int ret = quirk->probe(serial);
if (ret != 0)
return ret;
}
usb_set_serial_data(serial, (void *)id->driver_info);
return 0;
}
static int ftdi_port_probe(struct usb_serial_port *port)
{
const struct ftdi_quirk *quirk = usb_get_serial_data(port->serial);
struct ftdi_private *priv;
int result;
priv = kzalloc(sizeof(struct ftdi_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
mutex_init(&priv->cfg_lock);
if (quirk && quirk->port_probe)
quirk->port_probe(priv);
usb_set_serial_port_data(port, priv);
result = ftdi_determine_type(port);
if (result)
goto err_free;
ftdi_set_max_packet_size(port);
if (read_latency_timer(port) < 0)
priv->latency = 16;
write_latency_timer(port);
result = ftdi_gpio_init(port);
if (result < 0) {
dev_err(&port->serial->interface->dev,
"GPIO initialisation failed: %d\n",
result);
}
return 0;
err_free:
kfree(priv);
return result;
}
/* Setup for the USB-UIRT device, which requires hardwired
* baudrate (38400 gets mapped to 312500) */
/* Called from usbserial:serial_probe */
static void ftdi_USB_UIRT_setup(struct ftdi_private *priv)
{
priv->flags |= ASYNC_SPD_CUST;
priv->custom_divisor = 77;
priv->force_baud = 38400;
}
/* Setup for the HE-TIRA1 device, which requires hardwired
* baudrate (38400 gets mapped to 100000) and RTS-CTS enabled. */
static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv)
{
priv->flags |= ASYNC_SPD_CUST;
priv->custom_divisor = 240;
priv->force_baud = 38400;
priv->force_rtscts = 1;
}
/*
* Module parameter to control latency timer for NDI FTDI-based USB devices.
* If this value is not set in /etc/modprobe.d/ its value will be set
* to 1ms.
*/
static int ndi_latency_timer = 1;
/* Setup for the NDI FTDI-based USB devices, which requires hardwired
* baudrate (19200 gets mapped to 1200000).
*
* Called from usbserial:serial_probe.
*/
static int ftdi_NDI_device_setup(struct usb_serial *serial)
{
struct usb_device *udev = serial->dev;
int latency = ndi_latency_timer;
if (latency == 0)
latency = 1;
if (latency > 99)
latency = 99;
dev_dbg(&udev->dev, "%s setting NDI device latency to %d\n", __func__, latency);
dev_info(&udev->dev, "NDI device with a latency value of %d\n", latency);
/* FIXME: errors are not returned */
usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
FTDI_SIO_SET_LATENCY_TIMER_REQUEST,
FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE,
latency, 0, NULL, 0, WDR_TIMEOUT);
return 0;
}
/*
* First port on JTAG adaptors such as Olimex arm-usb-ocd or the FIC/OpenMoko
* Neo1973 Debug Board is reserved for JTAG interface and can be accessed from
* userspace using openocd.
*/
static int ftdi_jtag_probe(struct usb_serial *serial)
{
struct usb_interface *intf = serial->interface;
int ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
if (ifnum == 0) {
dev_info(&intf->dev, "Ignoring interface reserved for JTAG\n");
return -ENODEV;
}
return 0;
}
static int ftdi_8u2232c_probe(struct usb_serial *serial)
{
struct usb_device *udev = serial->dev;
if (udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems"))
return ftdi_jtag_probe(serial);
if (udev->product &&
(!strcmp(udev->product, "Arrow USB Blaster") ||
!strcmp(udev->product, "BeagleBone/XDS100V2") ||
!strcmp(udev->product, "SNAP Connect E10")))
return ftdi_jtag_probe(serial);
return 0;
}
/*
* First two ports on JTAG adaptors using an FT4232 such as STMicroelectronics's
* ST Micro Connect Lite are reserved for JTAG or other non-UART interfaces and
* can be accessed from userspace.
* The next two ports are enabled as UARTs by default, where port 2 is
* a conventional RS-232 UART.
*/
static int ftdi_stmclite_probe(struct usb_serial *serial)
{
struct usb_interface *intf = serial->interface;
int ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
if (ifnum < 2) {
dev_info(&intf->dev, "Ignoring interface reserved for JTAG\n");
return -ENODEV;
}
return 0;
}
static void ftdi_port_remove(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
ftdi_gpio_remove(port);
kfree(priv);
}
static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_device *dev = port->serial->dev;
struct ftdi_private *priv = usb_get_serial_port_data(port);
/* No error checking for this (will get errors later anyway) */
/* See ftdi_sio.h for description of what is reset */
usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
FTDI_SIO_RESET_REQUEST, FTDI_SIO_RESET_REQUEST_TYPE,
FTDI_SIO_RESET_SIO,
priv->channel, NULL, 0, WDR_TIMEOUT);
/* Termios defaults are set by usb_serial_init. We don't change
port->tty->termios - this would lose speed settings, etc.
This is same behaviour as serial.c/rs_open() - Kuba */
/* ftdi_set_termios will send usb control messages */
if (tty)
ftdi_set_termios(tty, port, NULL);
return usb_serial_generic_open(tty, port);
}
static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
/* Disable flow control */
if (!on) {
if (usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
FTDI_SIO_SET_FLOW_CTRL_REQUEST,
FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
0, priv->channel, NULL, 0,
WDR_TIMEOUT) < 0) {
dev_err(&port->dev, "error from flowcontrol urb\n");
}
}
/* drop RTS and DTR */
if (on)
set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
else
clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
}
/* The SIO requires the first byte to have:
* B0 1
* B1 0
* B2..7 length of message excluding byte 0
*
* The new devices do not require this byte
*/
static int ftdi_prepare_write_buffer(struct usb_serial_port *port,
void *dest, size_t size)
{
struct ftdi_private *priv;
int count;
unsigned long flags;
priv = usb_get_serial_port_data(port);
if (priv->chip_type == SIO) {
unsigned char *buffer = dest;
int i, len, c;
count = 0;
spin_lock_irqsave(&port->lock, flags);
for (i = 0; i < size - 1; i += priv->max_packet_size) {
len = min_t(int, size - i, priv->max_packet_size) - 1;
c = kfifo_out(&port->write_fifo, &buffer[i + 1], len);
if (!c)
break;
port->icount.tx += c;
buffer[i] = (c << 2) + 1;
count += c + 1;
}
spin_unlock_irqrestore(&port->lock, flags);
} else {
count = kfifo_out_locked(&port->write_fifo, dest, size,
&port->lock);
port->icount.tx += count;
}
return count;
}
#define FTDI_RS_ERR_MASK (FTDI_RS_BI | FTDI_RS_PE | FTDI_RS_FE | FTDI_RS_OE)
static int ftdi_process_packet(struct usb_serial_port *port,
struct ftdi_private *priv, unsigned char *buf, int len)
{
unsigned char status;
bool brkint = false;
int i;
char flag;
if (len < 2) {
dev_dbg(&port->dev, "malformed packet\n");
return 0;
}
/* Compare new line status to the old one, signal if different/
N.B. packet may be processed more than once, but differences
are only processed once. */
status = buf[0] & FTDI_STATUS_B0_MASK;
if (status != priv->prev_status) {
char diff_status = status ^ priv->prev_status;
if (diff_status & FTDI_RS0_CTS)
port->icount.cts++;
if (diff_status & FTDI_RS0_DSR)
port->icount.dsr++;
if (diff_status & FTDI_RS0_RI)
port->icount.rng++;
if (diff_status & FTDI_RS0_RLSD) {
struct tty_struct *tty;
port->icount.dcd++;
tty = tty_port_tty_get(&port->port);
if (tty)
usb_serial_handle_dcd_change(port, tty,
status & FTDI_RS0_RLSD);
tty_kref_put(tty);
}
wake_up_interruptible(&port->port.delta_msr_wait);
priv->prev_status = status;
}
/* save if the transmitter is empty or not */
if (buf[1] & FTDI_RS_TEMT)
priv->transmit_empty = 1;
else
priv->transmit_empty = 0;
if (len == 2)
return 0; /* status only */
/*
* Break and error status must only be processed for packets with
* data payload to avoid over-reporting.
*/
flag = TTY_NORMAL;
if (buf[1] & FTDI_RS_ERR_MASK) {
/*
* Break takes precedence over parity, which takes precedence
* over framing errors. Note that break is only associated
* with the last character in the buffer and only when it's a
* NUL.
*/
if (buf[1] & FTDI_RS_BI && buf[len - 1] == '\0') {
port->icount.brk++;
brkint = true;
}
if (buf[1] & FTDI_RS_PE) {
flag = TTY_PARITY;
port->icount.parity++;
} else if (buf[1] & FTDI_RS_FE) {
flag = TTY_FRAME;
port->icount.frame++;
}
/* Overrun is special, not associated with a char */
if (buf[1] & FTDI_RS_OE) {
port->icount.overrun++;
tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
}
}
port->icount.rx += len - 2;
if (brkint || port->sysrq) {
for (i = 2; i < len; i++) {
if (brkint && i == len - 1) {
if (usb_serial_handle_break(port))
return len - 3;
flag = TTY_BREAK;
}
if (usb_serial_handle_sysrq_char(port, buf[i]))
continue;
tty_insert_flip_char(&port->port, buf[i], flag);
}
} else {
tty_insert_flip_string_fixed_flag(&port->port, buf + 2, flag,
len - 2);
}
return len - 2;
}
static void ftdi_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct ftdi_private *priv = usb_get_serial_port_data(port);
char *data = urb->transfer_buffer;
int i;
int len;
int count = 0;
for (i = 0; i < urb->actual_length; i += priv->max_packet_size) {
len = min_t(int, urb->actual_length - i, priv->max_packet_size);
count += ftdi_process_packet(port, priv, &data[i], len);
}
if (count)
tty_flip_buffer_push(&port->port);
}
static int ftdi_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct ftdi_private *priv = usb_get_serial_port_data(port);
u16 value;
int ret;
/* break_state = -1 to turn on break, and 0 to turn off break */
/* see drivers/char/tty_io.c to see it used */
/* last_set_data_value NEVER has the break bit set in it */
if (break_state)
value = priv->last_set_data_value | FTDI_SIO_SET_BREAK;
else
value = priv->last_set_data_value;
ret = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
FTDI_SIO_SET_DATA_REQUEST,
FTDI_SIO_SET_DATA_REQUEST_TYPE,
value, priv->channel,
NULL, 0, WDR_TIMEOUT);
if (ret < 0) {
dev_err(&port->dev, "%s FAILED to enable/disable break state (state was %d)\n",
__func__, break_state);
return ret;
}
dev_dbg(&port->dev, "%s break state is %d - urb is %d\n", __func__,
break_state, value);
return 0;
}
static bool ftdi_tx_empty(struct usb_serial_port *port)
{
unsigned char buf[2];
int ret;
ret = ftdi_get_modem_status(port, buf);
if (ret == 2) {
if (!(buf[1] & FTDI_RS_TEMT))
return false;
}
return true;
}
/* old_termios contains the original termios settings and tty->termios contains
* the new setting to be used
* WARNING: set_termios calls this with old_termios in kernel space
*/
static void ftdi_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct usb_device *dev = port->serial->dev;
struct device *ddev = &port->dev;
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct ktermios *termios = &tty->termios;
unsigned int cflag = termios->c_cflag;
u16 value, index;
int ret;
/* Force baud rate if this device requires it, unless it is set to
B0. */
if (priv->force_baud && ((termios->c_cflag & CBAUD) != B0)) {
dev_dbg(ddev, "%s: forcing baud rate for this device\n", __func__);
tty_encode_baud_rate(tty, priv->force_baud,
priv->force_baud);
}
/* Force RTS-CTS if this device requires it. */
if (priv->force_rtscts) {
dev_dbg(ddev, "%s: forcing rtscts for this device\n", __func__);
termios->c_cflag |= CRTSCTS;
}
/*
* All FTDI UART chips are limited to CS7/8. We shouldn't pretend to
* support CS5/6 and revert the CSIZE setting instead.
*
* CS5 however is used to control some smartcard readers which abuse
* this limitation to switch modes. Original FTDI chips fall back to
* eight data bits.
*
* TODO: Implement a quirk to only allow this with mentioned
* readers. One I know of (Argolis Smartreader V1)
* returns "USB smartcard server" as iInterface string.
* The vendor didn't bother with a custom VID/PID of
* course.
*/
if (C_CSIZE(tty) == CS6) {
dev_warn(ddev, "requested CSIZE setting not supported\n");
termios->c_cflag &= ~CSIZE;
if (old_termios)
termios->c_cflag |= old_termios->c_cflag & CSIZE;
else
termios->c_cflag |= CS8;
}
cflag = termios->c_cflag;
if (!old_termios)
goto no_skip;
if (old_termios->c_cflag == termios->c_cflag
&& old_termios->c_ispeed == termios->c_ispeed
&& old_termios->c_ospeed == termios->c_ospeed)
goto no_c_cflag_changes;
/* NOTE These routines can get interrupted by
ftdi_sio_read_bulk_callback - need to examine what this means -
don't see any problems yet */
if ((old_termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)) ==
(termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)))
goto no_data_parity_stop_changes;
no_skip:
/* Set number of data bits, parity, stop bits */
value = 0;
value |= (cflag & CSTOPB ? FTDI_SIO_SET_DATA_STOP_BITS_2 :
FTDI_SIO_SET_DATA_STOP_BITS_1);
if (cflag & PARENB) {
if (cflag & CMSPAR)
value |= cflag & PARODD ?
FTDI_SIO_SET_DATA_PARITY_MARK :
FTDI_SIO_SET_DATA_PARITY_SPACE;
else
value |= cflag & PARODD ?
FTDI_SIO_SET_DATA_PARITY_ODD :
FTDI_SIO_SET_DATA_PARITY_EVEN;
} else {
value |= FTDI_SIO_SET_DATA_PARITY_NONE;
}
switch (cflag & CSIZE) {
case CS5:
dev_dbg(ddev, "Setting CS5 quirk\n");
break;
case CS7:
value |= 7;
dev_dbg(ddev, "Setting CS7\n");
break;
default:
case CS8:
value |= 8;
dev_dbg(ddev, "Setting CS8\n");
break;
}
/* This is needed by the break command since it uses the same command
- but is or'ed with this value */
priv->last_set_data_value = value;
if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
FTDI_SIO_SET_DATA_REQUEST,
FTDI_SIO_SET_DATA_REQUEST_TYPE,
value, priv->channel,
NULL, 0, WDR_SHORT_TIMEOUT) < 0) {
dev_err(ddev, "%s FAILED to set databits/stopbits/parity\n",
__func__);
}
/* Now do the baudrate */
no_data_parity_stop_changes:
if ((cflag & CBAUD) == B0) {
/* Disable flow control */
if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
FTDI_SIO_SET_FLOW_CTRL_REQUEST,
FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
0, priv->channel,
NULL, 0, WDR_TIMEOUT) < 0) {
dev_err(ddev, "%s error from disable flowcontrol urb\n",
__func__);
}
/* Drop RTS and DTR */
clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
} else {
/* set the baudrate determined before */
mutex_lock(&priv->cfg_lock);
if (change_speed(tty, port))
dev_err(ddev, "%s urb failed to set baudrate\n", __func__);
mutex_unlock(&priv->cfg_lock);
/* Ensure RTS and DTR are raised when baudrate changed from 0 */
if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
}
no_c_cflag_changes:
/* Set hardware-assisted flow control */
value = 0;
if (C_CRTSCTS(tty)) {
dev_dbg(&port->dev, "enabling rts/cts flow control\n");
index = FTDI_SIO_RTS_CTS_HS;
} else if (I_IXON(tty)) {
dev_dbg(&port->dev, "enabling xon/xoff flow control\n");
index = FTDI_SIO_XON_XOFF_HS;
value = STOP_CHAR(tty) << 8 | START_CHAR(tty);
} else {
dev_dbg(&port->dev, "disabling flow control\n");
index = FTDI_SIO_DISABLE_FLOW_CTRL;
}
index |= priv->channel;
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
FTDI_SIO_SET_FLOW_CTRL_REQUEST,
FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
value, index, NULL, 0, WDR_TIMEOUT);
if (ret < 0)
dev_err(&port->dev, "failed to set flow control: %d\n", ret);
}
/*
* Get modem-control status.
*
* Returns the number of status bytes retrieved (device dependant), or
* negative error code.
*/
static int ftdi_get_modem_status(struct usb_serial_port *port,
unsigned char status[2])
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
unsigned char *buf;
int len;
int ret;
buf = kmalloc(2, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/*
* The device returns a two byte value (the SIO a 1 byte value) in the
* same format as the data returned from the IN endpoint.
*/
if (priv->chip_type == SIO)
len = 1;
else
len = 2;
ret = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
FTDI_SIO_GET_MODEM_STATUS_REQUEST,
FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE,
0, priv->channel,
buf, len, WDR_TIMEOUT);
/* NOTE: We allow short responses and handle that below. */
if (ret < 1) {
dev_err(&port->dev, "failed to get modem status: %d\n", ret);
if (ret >= 0)
ret = -EIO;
ret = usb_translate_errors(ret);
goto out;
}
status[0] = buf[0];
if (ret > 1)
status[1] = buf[1];
else
status[1] = 0;
dev_dbg(&port->dev, "%s - 0x%02x%02x\n", __func__, status[0],
status[1]);
out:
kfree(buf);
return ret;
}
static int ftdi_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct ftdi_private *priv = usb_get_serial_port_data(port);
unsigned char buf[2];
int ret;
ret = ftdi_get_modem_status(port, buf);
if (ret < 0)
return ret;
ret = (buf[0] & FTDI_SIO_DSR_MASK ? TIOCM_DSR : 0) |
(buf[0] & FTDI_SIO_CTS_MASK ? TIOCM_CTS : 0) |
(buf[0] & FTDI_SIO_RI_MASK ? TIOCM_RI : 0) |
(buf[0] & FTDI_SIO_RLSD_MASK ? TIOCM_CD : 0) |
priv->last_dtr_rts;
return ret;
}
static int ftdi_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
return update_mctrl(port, set, clear);
}
static int ftdi_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
void __user *argp = (void __user *)arg;
switch (cmd) {
case TIOCSERGETLSR:
return get_lsr_info(port, argp);
default:
break;
}
return -ENOIOCTLCMD;
}
static struct usb_serial_driver ftdi_device = {
.driver = {
.owner = THIS_MODULE,
.name = "ftdi_sio",
.dev_groups = ftdi_groups,
},
.description = "FTDI USB Serial Device",
.id_table = id_table_combined,
.num_ports = 1,
.bulk_in_size = 512,
.bulk_out_size = 256,
.probe = ftdi_probe,
.port_probe = ftdi_port_probe,
.port_remove = ftdi_port_remove,
.open = ftdi_open,
.dtr_rts = ftdi_dtr_rts,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
.process_read_urb = ftdi_process_read_urb,
.prepare_write_buffer = ftdi_prepare_write_buffer,
.tiocmget = ftdi_tiocmget,
.tiocmset = ftdi_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
.ioctl = ftdi_ioctl,
.get_serial = get_serial_info,
.set_serial = set_serial_info,
.set_termios = ftdi_set_termios,
.break_ctl = ftdi_break_ctl,
.tx_empty = ftdi_tx_empty,
};
static struct usb_serial_driver * const serial_drivers[] = {
&ftdi_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table_combined);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(ndi_latency_timer, int, 0644);
MODULE_PARM_DESC(ndi_latency_timer, "NDI device latency timer override");
| linux-master | drivers/usb/serial/ftdi_sio.c |
// SPDX-License-Identifier: GPL-2.0+
/*
Keyspan USB to Serial Converter driver
(C) Copyright (C) 2000-2001 Hugh Blemings <[email protected]>
(C) Copyright (C) 2002 Greg Kroah-Hartman <[email protected]>
See http://blemings.org/hugh/keyspan.html for more information.
Code in this driver inspired by and in a number of places taken
from Brian Warner's original Keyspan-PDA driver.
This driver has been put together with the support of Innosys, Inc.
and Keyspan, Inc the manufacturers of the Keyspan USB-serial products.
Thanks Guys :)
Thanks to Paulus for miscellaneous tidy ups, some largish chunks
of much nicer and/or completely new code and (perhaps most uniquely)
having the patience to sit down and explain why and where he'd changed
stuff.
Tip 'o the hat to IBM (and previously Linuxcare :) for supporting
staff in their work on open source projects.
*/
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/usb/ezusb.h>
#define DRIVER_AUTHOR "Hugh Blemings <[email protected]"
#define DRIVER_DESC "Keyspan USB to Serial Converter Driver"
static void keyspan_send_setup(struct usb_serial_port *port, int reset_port);
static int keyspan_usa19_calc_baud(struct usb_serial_port *port,
u32 baud_rate, u32 baudclk,
u8 *rate_hi, u8 *rate_low,
u8 *prescaler, int portnum);
static int keyspan_usa19w_calc_baud(struct usb_serial_port *port,
u32 baud_rate, u32 baudclk,
u8 *rate_hi, u8 *rate_low,
u8 *prescaler, int portnum);
static int keyspan_usa28_calc_baud(struct usb_serial_port *port,
u32 baud_rate, u32 baudclk,
u8 *rate_hi, u8 *rate_low,
u8 *prescaler, int portnum);
static int keyspan_usa19hs_calc_baud(struct usb_serial_port *port,
u32 baud_rate, u32 baudclk,
u8 *rate_hi, u8 *rate_low,
u8 *prescaler, int portnum);
static int keyspan_usa28_send_setup(struct usb_serial *serial,
struct usb_serial_port *port,
int reset_port);
static int keyspan_usa26_send_setup(struct usb_serial *serial,
struct usb_serial_port *port,
int reset_port);
static int keyspan_usa49_send_setup(struct usb_serial *serial,
struct usb_serial_port *port,
int reset_port);
static int keyspan_usa90_send_setup(struct usb_serial *serial,
struct usb_serial_port *port,
int reset_port);
static int keyspan_usa67_send_setup(struct usb_serial *serial,
struct usb_serial_port *port,
int reset_port);
/* Values used for baud rate calculation - device specific */
#define KEYSPAN_INVALID_BAUD_RATE (-1)
#define KEYSPAN_BAUD_RATE_OK (0)
#define KEYSPAN_USA18X_BAUDCLK (12000000L) /* a guess */
#define KEYSPAN_USA19_BAUDCLK (12000000L)
#define KEYSPAN_USA19W_BAUDCLK (24000000L)
#define KEYSPAN_USA19HS_BAUDCLK (14769231L)
#define KEYSPAN_USA28_BAUDCLK (1843200L)
#define KEYSPAN_USA28X_BAUDCLK (12000000L)
#define KEYSPAN_USA49W_BAUDCLK (48000000L)
/* Some constants used to characterise each device. */
#define KEYSPAN_MAX_NUM_PORTS (4)
#define KEYSPAN_MAX_FLIPS (2)
/*
* Device info for the Keyspan serial converter, used by the overall
* usb-serial probe function.
*/
#define KEYSPAN_VENDOR_ID (0x06cd)
/* Product IDs for the products supported, pre-renumeration */
#define keyspan_usa18x_pre_product_id 0x0105
#define keyspan_usa19_pre_product_id 0x0103
#define keyspan_usa19qi_pre_product_id 0x010b
#define keyspan_mpr_pre_product_id 0x011b
#define keyspan_usa19qw_pre_product_id 0x0118
#define keyspan_usa19w_pre_product_id 0x0106
#define keyspan_usa28_pre_product_id 0x0101
#define keyspan_usa28x_pre_product_id 0x0102
#define keyspan_usa28xa_pre_product_id 0x0114
#define keyspan_usa28xb_pre_product_id 0x0113
#define keyspan_usa49w_pre_product_id 0x0109
#define keyspan_usa49wlc_pre_product_id 0x011a
/*
* Product IDs post-renumeration. Note that the 28x and 28xb have the same
* id's post-renumeration but behave identically so it's not an issue. As
* such, the 28xb is not listed in any of the device tables.
*/
#define keyspan_usa18x_product_id 0x0112
#define keyspan_usa19_product_id 0x0107
#define keyspan_usa19qi_product_id 0x010c
#define keyspan_usa19hs_product_id 0x0121
#define keyspan_mpr_product_id 0x011c
#define keyspan_usa19qw_product_id 0x0119
#define keyspan_usa19w_product_id 0x0108
#define keyspan_usa28_product_id 0x010f
#define keyspan_usa28x_product_id 0x0110
#define keyspan_usa28xa_product_id 0x0115
#define keyspan_usa28xb_product_id 0x0110
#define keyspan_usa28xg_product_id 0x0135
#define keyspan_usa49w_product_id 0x010a
#define keyspan_usa49wlc_product_id 0x012a
#define keyspan_usa49wg_product_id 0x0131
struct keyspan_device_details {
/* product ID value */
int product_id;
enum {msg_usa26, msg_usa28, msg_usa49, msg_usa90, msg_usa67} msg_format;
/* Number of physical ports */
int num_ports;
/* 1 if endpoint flipping used on input, 0 if not */
int indat_endp_flip;
/* 1 if endpoint flipping used on output, 0 if not */
int outdat_endp_flip;
/*
* Table mapping input data endpoint IDs to physical port
* number and flip if used
*/
int indat_endpoints[KEYSPAN_MAX_NUM_PORTS];
/* Same for output endpoints */
int outdat_endpoints[KEYSPAN_MAX_NUM_PORTS];
/* Input acknowledge endpoints */
int inack_endpoints[KEYSPAN_MAX_NUM_PORTS];
/* Output control endpoints */
int outcont_endpoints[KEYSPAN_MAX_NUM_PORTS];
/* Endpoint used for input status */
int instat_endpoint;
/* Endpoint used for input data 49WG only */
int indat_endpoint;
/* Endpoint used for global control functions */
int glocont_endpoint;
int (*calculate_baud_rate)(struct usb_serial_port *port,
u32 baud_rate, u32 baudclk,
u8 *rate_hi, u8 *rate_low, u8 *prescaler,
int portnum);
u32 baudclk;
};
/*
* Now for each device type we setup the device detail structure with the
* appropriate information (provided in Keyspan's documentation)
*/
static const struct keyspan_device_details usa18x_device_details = {
.product_id = keyspan_usa18x_product_id,
.msg_format = msg_usa26,
.num_ports = 1,
.indat_endp_flip = 0,
.outdat_endp_flip = 1,
.indat_endpoints = {0x81},
.outdat_endpoints = {0x01},
.inack_endpoints = {0x85},
.outcont_endpoints = {0x05},
.instat_endpoint = 0x87,
.indat_endpoint = -1,
.glocont_endpoint = 0x07,
.calculate_baud_rate = keyspan_usa19w_calc_baud,
.baudclk = KEYSPAN_USA18X_BAUDCLK,
};
static const struct keyspan_device_details usa19_device_details = {
.product_id = keyspan_usa19_product_id,
.msg_format = msg_usa28,
.num_ports = 1,
.indat_endp_flip = 1,
.outdat_endp_flip = 1,
.indat_endpoints = {0x81},
.outdat_endpoints = {0x01},
.inack_endpoints = {0x83},
.outcont_endpoints = {0x03},
.instat_endpoint = 0x84,
.indat_endpoint = -1,
.glocont_endpoint = -1,
.calculate_baud_rate = keyspan_usa19_calc_baud,
.baudclk = KEYSPAN_USA19_BAUDCLK,
};
static const struct keyspan_device_details usa19qi_device_details = {
.product_id = keyspan_usa19qi_product_id,
.msg_format = msg_usa28,
.num_ports = 1,
.indat_endp_flip = 1,
.outdat_endp_flip = 1,
.indat_endpoints = {0x81},
.outdat_endpoints = {0x01},
.inack_endpoints = {0x83},
.outcont_endpoints = {0x03},
.instat_endpoint = 0x84,
.indat_endpoint = -1,
.glocont_endpoint = -1,
.calculate_baud_rate = keyspan_usa28_calc_baud,
.baudclk = KEYSPAN_USA19_BAUDCLK,
};
static const struct keyspan_device_details mpr_device_details = {
.product_id = keyspan_mpr_product_id,
.msg_format = msg_usa28,
.num_ports = 1,
.indat_endp_flip = 1,
.outdat_endp_flip = 1,
.indat_endpoints = {0x81},
.outdat_endpoints = {0x01},
.inack_endpoints = {0x83},
.outcont_endpoints = {0x03},
.instat_endpoint = 0x84,
.indat_endpoint = -1,
.glocont_endpoint = -1,
.calculate_baud_rate = keyspan_usa28_calc_baud,
.baudclk = KEYSPAN_USA19_BAUDCLK,
};
static const struct keyspan_device_details usa19qw_device_details = {
.product_id = keyspan_usa19qw_product_id,
.msg_format = msg_usa26,
.num_ports = 1,
.indat_endp_flip = 0,
.outdat_endp_flip = 1,
.indat_endpoints = {0x81},
.outdat_endpoints = {0x01},
.inack_endpoints = {0x85},
.outcont_endpoints = {0x05},
.instat_endpoint = 0x87,
.indat_endpoint = -1,
.glocont_endpoint = 0x07,
.calculate_baud_rate = keyspan_usa19w_calc_baud,
.baudclk = KEYSPAN_USA19W_BAUDCLK,
};
static const struct keyspan_device_details usa19w_device_details = {
.product_id = keyspan_usa19w_product_id,
.msg_format = msg_usa26,
.num_ports = 1,
.indat_endp_flip = 0,
.outdat_endp_flip = 1,
.indat_endpoints = {0x81},
.outdat_endpoints = {0x01},
.inack_endpoints = {0x85},
.outcont_endpoints = {0x05},
.instat_endpoint = 0x87,
.indat_endpoint = -1,
.glocont_endpoint = 0x07,
.calculate_baud_rate = keyspan_usa19w_calc_baud,
.baudclk = KEYSPAN_USA19W_BAUDCLK,
};
static const struct keyspan_device_details usa19hs_device_details = {
.product_id = keyspan_usa19hs_product_id,
.msg_format = msg_usa90,
.num_ports = 1,
.indat_endp_flip = 0,
.outdat_endp_flip = 0,
.indat_endpoints = {0x81},
.outdat_endpoints = {0x01},
.inack_endpoints = {-1},
.outcont_endpoints = {0x02},
.instat_endpoint = 0x82,
.indat_endpoint = -1,
.glocont_endpoint = -1,
.calculate_baud_rate = keyspan_usa19hs_calc_baud,
.baudclk = KEYSPAN_USA19HS_BAUDCLK,
};
static const struct keyspan_device_details usa28_device_details = {
.product_id = keyspan_usa28_product_id,
.msg_format = msg_usa28,
.num_ports = 2,
.indat_endp_flip = 1,
.outdat_endp_flip = 1,
.indat_endpoints = {0x81, 0x83},
.outdat_endpoints = {0x01, 0x03},
.inack_endpoints = {0x85, 0x86},
.outcont_endpoints = {0x05, 0x06},
.instat_endpoint = 0x87,
.indat_endpoint = -1,
.glocont_endpoint = 0x07,
.calculate_baud_rate = keyspan_usa28_calc_baud,
.baudclk = KEYSPAN_USA28_BAUDCLK,
};
static const struct keyspan_device_details usa28x_device_details = {
.product_id = keyspan_usa28x_product_id,
.msg_format = msg_usa26,
.num_ports = 2,
.indat_endp_flip = 0,
.outdat_endp_flip = 1,
.indat_endpoints = {0x81, 0x83},
.outdat_endpoints = {0x01, 0x03},
.inack_endpoints = {0x85, 0x86},
.outcont_endpoints = {0x05, 0x06},
.instat_endpoint = 0x87,
.indat_endpoint = -1,
.glocont_endpoint = 0x07,
.calculate_baud_rate = keyspan_usa19w_calc_baud,
.baudclk = KEYSPAN_USA28X_BAUDCLK,
};
static const struct keyspan_device_details usa28xa_device_details = {
.product_id = keyspan_usa28xa_product_id,
.msg_format = msg_usa26,
.num_ports = 2,
.indat_endp_flip = 0,
.outdat_endp_flip = 1,
.indat_endpoints = {0x81, 0x83},
.outdat_endpoints = {0x01, 0x03},
.inack_endpoints = {0x85, 0x86},
.outcont_endpoints = {0x05, 0x06},
.instat_endpoint = 0x87,
.indat_endpoint = -1,
.glocont_endpoint = 0x07,
.calculate_baud_rate = keyspan_usa19w_calc_baud,
.baudclk = KEYSPAN_USA28X_BAUDCLK,
};
static const struct keyspan_device_details usa28xg_device_details = {
.product_id = keyspan_usa28xg_product_id,
.msg_format = msg_usa67,
.num_ports = 2,
.indat_endp_flip = 0,
.outdat_endp_flip = 0,
.indat_endpoints = {0x84, 0x88},
.outdat_endpoints = {0x02, 0x06},
.inack_endpoints = {-1, -1},
.outcont_endpoints = {-1, -1},
.instat_endpoint = 0x81,
.indat_endpoint = -1,
.glocont_endpoint = 0x01,
.calculate_baud_rate = keyspan_usa19w_calc_baud,
.baudclk = KEYSPAN_USA28X_BAUDCLK,
};
/*
* We don't need a separate entry for the usa28xb as it appears as a 28x
* anyway.
*/
static const struct keyspan_device_details usa49w_device_details = {
.product_id = keyspan_usa49w_product_id,
.msg_format = msg_usa49,
.num_ports = 4,
.indat_endp_flip = 0,
.outdat_endp_flip = 0,
.indat_endpoints = {0x81, 0x82, 0x83, 0x84},
.outdat_endpoints = {0x01, 0x02, 0x03, 0x04},
.inack_endpoints = {-1, -1, -1, -1},
.outcont_endpoints = {-1, -1, -1, -1},
.instat_endpoint = 0x87,
.indat_endpoint = -1,
.glocont_endpoint = 0x07,
.calculate_baud_rate = keyspan_usa19w_calc_baud,
.baudclk = KEYSPAN_USA49W_BAUDCLK,
};
static const struct keyspan_device_details usa49wlc_device_details = {
.product_id = keyspan_usa49wlc_product_id,
.msg_format = msg_usa49,
.num_ports = 4,
.indat_endp_flip = 0,
.outdat_endp_flip = 0,
.indat_endpoints = {0x81, 0x82, 0x83, 0x84},
.outdat_endpoints = {0x01, 0x02, 0x03, 0x04},
.inack_endpoints = {-1, -1, -1, -1},
.outcont_endpoints = {-1, -1, -1, -1},
.instat_endpoint = 0x87,
.indat_endpoint = -1,
.glocont_endpoint = 0x07,
.calculate_baud_rate = keyspan_usa19w_calc_baud,
.baudclk = KEYSPAN_USA19W_BAUDCLK,
};
static const struct keyspan_device_details usa49wg_device_details = {
.product_id = keyspan_usa49wg_product_id,
.msg_format = msg_usa49,
.num_ports = 4,
.indat_endp_flip = 0,
.outdat_endp_flip = 0,
.indat_endpoints = {-1, -1, -1, -1}, /* single 'global' data in EP */
.outdat_endpoints = {0x01, 0x02, 0x04, 0x06},
.inack_endpoints = {-1, -1, -1, -1},
.outcont_endpoints = {-1, -1, -1, -1},
.instat_endpoint = 0x81,
.indat_endpoint = 0x88,
.glocont_endpoint = 0x00, /* uses control EP */
.calculate_baud_rate = keyspan_usa19w_calc_baud,
.baudclk = KEYSPAN_USA19W_BAUDCLK,
};
static const struct keyspan_device_details *keyspan_devices[] = {
&usa18x_device_details,
&usa19_device_details,
&usa19qi_device_details,
&mpr_device_details,
&usa19qw_device_details,
&usa19w_device_details,
&usa19hs_device_details,
&usa28_device_details,
&usa28x_device_details,
&usa28xa_device_details,
&usa28xg_device_details,
/* 28xb not required as it renumerates as a 28x */
&usa49w_device_details,
&usa49wlc_device_details,
&usa49wg_device_details,
NULL,
};
static const struct usb_device_id keyspan_ids_combined[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19w_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qi_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qw_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_mpr_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xa_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xb_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49w_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wlc_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19w_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qi_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qw_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19hs_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_mpr_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xa_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xg_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49w_product_id)},
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wlc_product_id)},
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wg_product_id)},
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, keyspan_ids_combined);
/* usb_device_id table for the pre-firmware download keyspan devices */
static const struct usb_device_id keyspan_pre_ids[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qi_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qw_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19w_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_mpr_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xa_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xb_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49w_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wlc_pre_product_id) },
{ } /* Terminating entry */
};
static const struct usb_device_id keyspan_1port_ids[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qi_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qw_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19w_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19hs_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_mpr_product_id) },
{ } /* Terminating entry */
};
static const struct usb_device_id keyspan_2port_ids[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xa_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xg_product_id) },
{ } /* Terminating entry */
};
static const struct usb_device_id keyspan_4port_ids[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49w_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wlc_product_id)},
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wg_product_id)},
{ } /* Terminating entry */
};
#define INSTAT_BUFLEN 32
#define GLOCONT_BUFLEN 64
#define INDAT49W_BUFLEN 512
#define IN_BUFLEN 64
#define OUT_BUFLEN 64
#define INACK_BUFLEN 1
#define OUTCONT_BUFLEN 64
/* Per device and per port private data */
struct keyspan_serial_private {
const struct keyspan_device_details *device_details;
struct urb *instat_urb;
char *instat_buf;
/* added to support 49wg, where data from all 4 ports comes in
on 1 EP and high-speed supported */
struct urb *indat_urb;
char *indat_buf;
/* XXX this one probably will need a lock */
struct urb *glocont_urb;
char *glocont_buf;
char *ctrl_buf; /* for EP0 control message */
};
struct keyspan_port_private {
/* Keep track of which input & output endpoints to use */
int in_flip;
int out_flip;
/* Keep duplicate of device details in each port
structure as well - simplifies some of the
callback functions etc. */
const struct keyspan_device_details *device_details;
/* Input endpoints and buffer for this port */
struct urb *in_urbs[2];
char *in_buffer[2];
/* Output endpoints and buffer for this port */
struct urb *out_urbs[2];
char *out_buffer[2];
/* Input ack endpoint */
struct urb *inack_urb;
char *inack_buffer;
/* Output control endpoint */
struct urb *outcont_urb;
char *outcont_buffer;
/* Settings for the port */
int baud;
int old_baud;
unsigned int cflag;
unsigned int old_cflag;
enum {flow_none, flow_cts, flow_xon} flow_control;
int rts_state; /* Handshaking pins (outputs) */
int dtr_state;
int cts_state; /* Handshaking pins (inputs) */
int dsr_state;
int dcd_state;
int ri_state;
int break_on;
unsigned long tx_start_time[2];
int resend_cont; /* need to resend control packet */
};
/* Include Keyspan message headers. All current Keyspan Adapters
make use of one of five message formats which are referred
to as USA-26, USA-28, USA-49, USA-90, USA-67 by Keyspan and
within this driver. */
#include "keyspan_usa26msg.h"
#include "keyspan_usa28msg.h"
#include "keyspan_usa49msg.h"
#include "keyspan_usa90msg.h"
#include "keyspan_usa67msg.h"
static int keyspan_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct keyspan_port_private *p_priv;
p_priv = usb_get_serial_port_data(port);
if (break_state == -1)
p_priv->break_on = 1;
else
p_priv->break_on = 0;
/* FIXME: return errors */
keyspan_send_setup(port, 0);
return 0;
}
static void keyspan_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
int baud_rate, device_port;
struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
unsigned int cflag;
p_priv = usb_get_serial_port_data(port);
d_details = p_priv->device_details;
cflag = tty->termios.c_cflag;
device_port = port->port_number;
/* Baud rate calculation takes baud rate as an integer
so other rates can be generated if desired. */
baud_rate = tty_get_baud_rate(tty);
/* If no match or invalid, don't change */
if (d_details->calculate_baud_rate(port, baud_rate, d_details->baudclk,
NULL, NULL, NULL, device_port) == KEYSPAN_BAUD_RATE_OK) {
/* FIXME - more to do here to ensure rate changes cleanly */
/* FIXME - calculate exact rate from divisor ? */
p_priv->baud = baud_rate;
} else
baud_rate = tty_termios_baud_rate(old_termios);
tty_encode_baud_rate(tty, baud_rate, baud_rate);
/* set CTS/RTS handshake etc. */
p_priv->cflag = cflag;
p_priv->flow_control = (cflag & CRTSCTS) ? flow_cts : flow_none;
/* Mark/Space not supported */
tty->termios.c_cflag &= ~CMSPAR;
keyspan_send_setup(port, 0);
}
static int keyspan_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct keyspan_port_private *p_priv = usb_get_serial_port_data(port);
unsigned int value;
value = ((p_priv->rts_state) ? TIOCM_RTS : 0) |
((p_priv->dtr_state) ? TIOCM_DTR : 0) |
((p_priv->cts_state) ? TIOCM_CTS : 0) |
((p_priv->dsr_state) ? TIOCM_DSR : 0) |
((p_priv->dcd_state) ? TIOCM_CAR : 0) |
((p_priv->ri_state) ? TIOCM_RNG : 0);
return value;
}
static int keyspan_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct keyspan_port_private *p_priv = usb_get_serial_port_data(port);
if (set & TIOCM_RTS)
p_priv->rts_state = 1;
if (set & TIOCM_DTR)
p_priv->dtr_state = 1;
if (clear & TIOCM_RTS)
p_priv->rts_state = 0;
if (clear & TIOCM_DTR)
p_priv->dtr_state = 0;
keyspan_send_setup(port, 0);
return 0;
}
/* Write function is similar for the four protocols used
with only a minor change for usa90 (usa19hs) required */
static int keyspan_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count)
{
struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
int flip;
int left, todo;
struct urb *this_urb;
int err, maxDataLen, dataOffset;
p_priv = usb_get_serial_port_data(port);
d_details = p_priv->device_details;
if (d_details->msg_format == msg_usa90) {
maxDataLen = 64;
dataOffset = 0;
} else {
maxDataLen = 63;
dataOffset = 1;
}
dev_dbg(&port->dev, "%s - %d chars, flip=%d\n", __func__, count,
p_priv->out_flip);
for (left = count; left > 0; left -= todo) {
todo = left;
if (todo > maxDataLen)
todo = maxDataLen;
flip = p_priv->out_flip;
/* Check we have a valid urb/endpoint before we use it... */
this_urb = p_priv->out_urbs[flip];
if (this_urb == NULL) {
/* no bulk out, so return 0 bytes written */
dev_dbg(&port->dev, "%s - no output urb :(\n", __func__);
return count;
}
dev_dbg(&port->dev, "%s - endpoint %x flip %d\n",
__func__, usb_pipeendpoint(this_urb->pipe), flip);
if (this_urb->status == -EINPROGRESS) {
if (time_before(jiffies,
p_priv->tx_start_time[flip] + 10 * HZ))
break;
usb_unlink_urb(this_urb);
break;
}
/* First byte in buffer is "last flag" (except for usa19hx)
- unused so for now so set to zero */
((char *)this_urb->transfer_buffer)[0] = 0;
memcpy(this_urb->transfer_buffer + dataOffset, buf, todo);
buf += todo;
/* send the data out the bulk port */
this_urb->transfer_buffer_length = todo + dataOffset;
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dev_dbg(&port->dev, "usb_submit_urb(write bulk) failed (%d)\n", err);
p_priv->tx_start_time[flip] = jiffies;
/* Flip for next time if usa26 or usa28 interface
(not used on usa49) */
p_priv->out_flip = (flip + 1) & d_details->outdat_endp_flip;
}
return count - left;
}
static void usa26_indat_callback(struct urb *urb)
{
int i, err;
int endpoint;
struct usb_serial_port *port;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
endpoint = usb_pipeendpoint(urb->pipe);
if (status) {
dev_dbg(&urb->dev->dev, "%s - nonzero status %d on endpoint %x\n",
__func__, status, endpoint);
return;
}
port = urb->context;
if (urb->actual_length) {
/* 0x80 bit is error flag */
if ((data[0] & 0x80) == 0) {
/* no errors on individual bytes, only
possible overrun err */
if (data[0] & RXERROR_OVERRUN) {
tty_insert_flip_char(&port->port, 0,
TTY_OVERRUN);
}
for (i = 1; i < urb->actual_length ; ++i)
tty_insert_flip_char(&port->port, data[i],
TTY_NORMAL);
} else {
/* some bytes had errors, every byte has status */
dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__);
for (i = 0; i + 1 < urb->actual_length; i += 2) {
int stat = data[i];
int flag = TTY_NORMAL;
if (stat & RXERROR_OVERRUN) {
tty_insert_flip_char(&port->port, 0,
TTY_OVERRUN);
}
/* XXX should handle break (0x10) */
if (stat & RXERROR_PARITY)
flag = TTY_PARITY;
else if (stat & RXERROR_FRAMING)
flag = TTY_FRAME;
tty_insert_flip_char(&port->port, data[i+1],
flag);
}
}
tty_flip_buffer_push(&port->port);
}
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dev_dbg(&port->dev, "%s - resubmit read urb failed. (%d)\n", __func__, err);
}
/* Outdat handling is common for all devices */
static void usa2x_outdat_callback(struct urb *urb)
{
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
port = urb->context;
p_priv = usb_get_serial_port_data(port);
dev_dbg(&port->dev, "%s - urb %d\n", __func__, urb == p_priv->out_urbs[1]);
usb_serial_port_softint(port);
}
static void usa26_inack_callback(struct urb *urb)
{
}
static void usa26_outcont_callback(struct urb *urb)
{
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
port = urb->context;
p_priv = usb_get_serial_port_data(port);
if (p_priv->resend_cont) {
dev_dbg(&port->dev, "%s - sending setup\n", __func__);
keyspan_usa26_send_setup(port->serial, port,
p_priv->resend_cont - 1);
}
}
static void usa26_instat_callback(struct urb *urb)
{
unsigned char *data = urb->transfer_buffer;
struct keyspan_usa26_portStatusMessage *msg;
struct usb_serial *serial;
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
int old_dcd_state, err;
int status = urb->status;
serial = urb->context;
if (status) {
dev_dbg(&urb->dev->dev, "%s - nonzero status: %d\n",
__func__, status);
return;
}
if (urb->actual_length != 9) {
dev_dbg(&urb->dev->dev, "%s - %d byte report??\n", __func__, urb->actual_length);
goto exit;
}
msg = (struct keyspan_usa26_portStatusMessage *)data;
/* Check port number from message and retrieve private data */
if (msg->port >= serial->num_ports) {
dev_dbg(&urb->dev->dev, "%s - Unexpected port number %d\n", __func__, msg->port);
goto exit;
}
port = serial->port[msg->port];
p_priv = usb_get_serial_port_data(port);
if (!p_priv)
goto resubmit;
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
p_priv->cts_state = ((msg->hskia_cts) ? 1 : 0);
p_priv->dsr_state = ((msg->dsr) ? 1 : 0);
p_priv->dcd_state = ((msg->gpia_dcd) ? 1 : 0);
p_priv->ri_state = ((msg->ri) ? 1 : 0);
if (old_dcd_state != p_priv->dcd_state)
tty_port_tty_hangup(&port->port, true);
resubmit:
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dev_dbg(&port->dev, "%s - resubmit read urb failed. (%d)\n", __func__, err);
exit: ;
}
static void usa26_glocont_callback(struct urb *urb)
{
}
static void usa28_indat_callback(struct urb *urb)
{
int err;
struct usb_serial_port *port;
unsigned char *data;
struct keyspan_port_private *p_priv;
int status = urb->status;
port = urb->context;
p_priv = usb_get_serial_port_data(port);
data = urb->transfer_buffer;
if (urb != p_priv->in_urbs[p_priv->in_flip])
return;
do {
if (status) {
dev_dbg(&urb->dev->dev, "%s - nonzero status %d on endpoint %x\n",
__func__, status, usb_pipeendpoint(urb->pipe));
return;
}
port = urb->context;
p_priv = usb_get_serial_port_data(port);
data = urb->transfer_buffer;
if (urb->actual_length) {
tty_insert_flip_string(&port->port, data,
urb->actual_length);
tty_flip_buffer_push(&port->port);
}
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dev_dbg(&port->dev, "%s - resubmit read urb failed. (%d)\n",
__func__, err);
p_priv->in_flip ^= 1;
urb = p_priv->in_urbs[p_priv->in_flip];
} while (urb->status != -EINPROGRESS);
}
static void usa28_inack_callback(struct urb *urb)
{
}
static void usa28_outcont_callback(struct urb *urb)
{
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
port = urb->context;
p_priv = usb_get_serial_port_data(port);
if (p_priv->resend_cont) {
dev_dbg(&port->dev, "%s - sending setup\n", __func__);
keyspan_usa28_send_setup(port->serial, port,
p_priv->resend_cont - 1);
}
}
static void usa28_instat_callback(struct urb *urb)
{
int err;
unsigned char *data = urb->transfer_buffer;
struct keyspan_usa28_portStatusMessage *msg;
struct usb_serial *serial;
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
int old_dcd_state;
int status = urb->status;
serial = urb->context;
if (status) {
dev_dbg(&urb->dev->dev, "%s - nonzero status: %d\n",
__func__, status);
return;
}
if (urb->actual_length != sizeof(struct keyspan_usa28_portStatusMessage)) {
dev_dbg(&urb->dev->dev, "%s - bad length %d\n", __func__, urb->actual_length);
goto exit;
}
msg = (struct keyspan_usa28_portStatusMessage *)data;
/* Check port number from message and retrieve private data */
if (msg->port >= serial->num_ports) {
dev_dbg(&urb->dev->dev, "%s - Unexpected port number %d\n", __func__, msg->port);
goto exit;
}
port = serial->port[msg->port];
p_priv = usb_get_serial_port_data(port);
if (!p_priv)
goto resubmit;
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
p_priv->cts_state = ((msg->cts) ? 1 : 0);
p_priv->dsr_state = ((msg->dsr) ? 1 : 0);
p_priv->dcd_state = ((msg->dcd) ? 1 : 0);
p_priv->ri_state = ((msg->ri) ? 1 : 0);
if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
tty_port_tty_hangup(&port->port, true);
resubmit:
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dev_dbg(&port->dev, "%s - resubmit read urb failed. (%d)\n", __func__, err);
exit: ;
}
static void usa28_glocont_callback(struct urb *urb)
{
}
static void usa49_glocont_callback(struct urb *urb)
{
struct usb_serial *serial;
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
int i;
serial = urb->context;
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
p_priv = usb_get_serial_port_data(port);
if (!p_priv)
continue;
if (p_priv->resend_cont) {
dev_dbg(&port->dev, "%s - sending setup\n", __func__);
keyspan_usa49_send_setup(serial, port,
p_priv->resend_cont - 1);
break;
}
}
}
/* This is actually called glostat in the Keyspan
doco */
static void usa49_instat_callback(struct urb *urb)
{
int err;
unsigned char *data = urb->transfer_buffer;
struct keyspan_usa49_portStatusMessage *msg;
struct usb_serial *serial;
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
int old_dcd_state;
int status = urb->status;
serial = urb->context;
if (status) {
dev_dbg(&urb->dev->dev, "%s - nonzero status: %d\n",
__func__, status);
return;
}
if (urb->actual_length !=
sizeof(struct keyspan_usa49_portStatusMessage)) {
dev_dbg(&urb->dev->dev, "%s - bad length %d\n", __func__, urb->actual_length);
goto exit;
}
msg = (struct keyspan_usa49_portStatusMessage *)data;
/* Check port number from message and retrieve private data */
if (msg->portNumber >= serial->num_ports) {
dev_dbg(&urb->dev->dev, "%s - Unexpected port number %d\n",
__func__, msg->portNumber);
goto exit;
}
port = serial->port[msg->portNumber];
p_priv = usb_get_serial_port_data(port);
if (!p_priv)
goto resubmit;
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
p_priv->cts_state = ((msg->cts) ? 1 : 0);
p_priv->dsr_state = ((msg->dsr) ? 1 : 0);
p_priv->dcd_state = ((msg->dcd) ? 1 : 0);
p_priv->ri_state = ((msg->ri) ? 1 : 0);
if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
tty_port_tty_hangup(&port->port, true);
resubmit:
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dev_dbg(&port->dev, "%s - resubmit read urb failed. (%d)\n", __func__, err);
exit: ;
}
static void usa49_inack_callback(struct urb *urb)
{
}
static void usa49_indat_callback(struct urb *urb)
{
int i, err;
int endpoint;
struct usb_serial_port *port;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
endpoint = usb_pipeendpoint(urb->pipe);
if (status) {
dev_dbg(&urb->dev->dev, "%s - nonzero status %d on endpoint %x\n",
__func__, status, endpoint);
return;
}
port = urb->context;
if (urb->actual_length) {
/* 0x80 bit is error flag */
if ((data[0] & 0x80) == 0) {
/* no error on any byte */
tty_insert_flip_string(&port->port, data + 1,
urb->actual_length - 1);
} else {
/* some bytes had errors, every byte has status */
for (i = 0; i + 1 < urb->actual_length; i += 2) {
int stat = data[i];
int flag = TTY_NORMAL;
if (stat & RXERROR_OVERRUN) {
tty_insert_flip_char(&port->port, 0,
TTY_OVERRUN);
}
/* XXX should handle break (0x10) */
if (stat & RXERROR_PARITY)
flag = TTY_PARITY;
else if (stat & RXERROR_FRAMING)
flag = TTY_FRAME;
tty_insert_flip_char(&port->port, data[i+1],
flag);
}
}
tty_flip_buffer_push(&port->port);
}
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dev_dbg(&port->dev, "%s - resubmit read urb failed. (%d)\n", __func__, err);
}
static void usa49wg_indat_callback(struct urb *urb)
{
int i, len, x, err;
struct usb_serial *serial;
struct usb_serial_port *port;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
serial = urb->context;
if (status) {
dev_dbg(&urb->dev->dev, "%s - nonzero status: %d\n",
__func__, status);
return;
}
/* inbound data is in the form P#, len, status, data */
i = 0;
len = 0;
while (i < urb->actual_length) {
/* Check port number from message */
if (data[i] >= serial->num_ports) {
dev_dbg(&urb->dev->dev, "%s - Unexpected port number %d\n",
__func__, data[i]);
return;
}
port = serial->port[data[i++]];
len = data[i++];
/* 0x80 bit is error flag */
if ((data[i] & 0x80) == 0) {
/* no error on any byte */
i++;
for (x = 1; x < len && i < urb->actual_length; ++x)
tty_insert_flip_char(&port->port,
data[i++], 0);
} else {
/*
* some bytes had errors, every byte has status
*/
for (x = 0; x + 1 < len &&
i + 1 < urb->actual_length; x += 2) {
int stat = data[i];
int flag = TTY_NORMAL;
if (stat & RXERROR_OVERRUN) {
tty_insert_flip_char(&port->port, 0,
TTY_OVERRUN);
}
/* XXX should handle break (0x10) */
if (stat & RXERROR_PARITY)
flag = TTY_PARITY;
else if (stat & RXERROR_FRAMING)
flag = TTY_FRAME;
tty_insert_flip_char(&port->port, data[i+1],
flag);
i += 2;
}
}
tty_flip_buffer_push(&port->port);
}
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dev_dbg(&urb->dev->dev, "%s - resubmit read urb failed. (%d)\n", __func__, err);
}
/* not used, usa-49 doesn't have per-port control endpoints */
static void usa49_outcont_callback(struct urb *urb)
{
}
static void usa90_indat_callback(struct urb *urb)
{
int i, err;
int endpoint;
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
endpoint = usb_pipeendpoint(urb->pipe);
if (status) {
dev_dbg(&urb->dev->dev, "%s - nonzero status %d on endpoint %x\n",
__func__, status, endpoint);
return;
}
port = urb->context;
p_priv = usb_get_serial_port_data(port);
if (urb->actual_length) {
/* if current mode is DMA, looks like usa28 format
otherwise looks like usa26 data format */
if (p_priv->baud > 57600)
tty_insert_flip_string(&port->port, data,
urb->actual_length);
else {
/* 0x80 bit is error flag */
if ((data[0] & 0x80) == 0) {
/* no errors on individual bytes, only
possible overrun err*/
if (data[0] & RXERROR_OVERRUN) {
tty_insert_flip_char(&port->port, 0,
TTY_OVERRUN);
}
for (i = 1; i < urb->actual_length ; ++i)
tty_insert_flip_char(&port->port,
data[i], TTY_NORMAL);
} else {
/* some bytes had errors, every byte has status */
dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__);
for (i = 0; i + 1 < urb->actual_length; i += 2) {
int stat = data[i];
int flag = TTY_NORMAL;
if (stat & RXERROR_OVERRUN) {
tty_insert_flip_char(
&port->port, 0,
TTY_OVERRUN);
}
/* XXX should handle break (0x10) */
if (stat & RXERROR_PARITY)
flag = TTY_PARITY;
else if (stat & RXERROR_FRAMING)
flag = TTY_FRAME;
tty_insert_flip_char(&port->port,
data[i+1], flag);
}
}
}
tty_flip_buffer_push(&port->port);
}
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dev_dbg(&port->dev, "%s - resubmit read urb failed. (%d)\n", __func__, err);
}
static void usa90_instat_callback(struct urb *urb)
{
unsigned char *data = urb->transfer_buffer;
struct keyspan_usa90_portStatusMessage *msg;
struct usb_serial *serial;
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
int old_dcd_state, err;
int status = urb->status;
serial = urb->context;
if (status) {
dev_dbg(&urb->dev->dev, "%s - nonzero status: %d\n",
__func__, status);
return;
}
if (urb->actual_length < 14) {
dev_dbg(&urb->dev->dev, "%s - %d byte report??\n", __func__, urb->actual_length);
goto exit;
}
msg = (struct keyspan_usa90_portStatusMessage *)data;
/* Now do something useful with the data */
port = serial->port[0];
p_priv = usb_get_serial_port_data(port);
if (!p_priv)
goto resubmit;
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
p_priv->cts_state = ((msg->cts) ? 1 : 0);
p_priv->dsr_state = ((msg->dsr) ? 1 : 0);
p_priv->dcd_state = ((msg->dcd) ? 1 : 0);
p_priv->ri_state = ((msg->ri) ? 1 : 0);
if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
tty_port_tty_hangup(&port->port, true);
resubmit:
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dev_dbg(&port->dev, "%s - resubmit read urb failed. (%d)\n", __func__, err);
exit:
;
}
static void usa90_outcont_callback(struct urb *urb)
{
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
port = urb->context;
p_priv = usb_get_serial_port_data(port);
if (p_priv->resend_cont) {
dev_dbg(&urb->dev->dev, "%s - sending setup\n", __func__);
keyspan_usa90_send_setup(port->serial, port,
p_priv->resend_cont - 1);
}
}
/* Status messages from the 28xg */
static void usa67_instat_callback(struct urb *urb)
{
int err;
unsigned char *data = urb->transfer_buffer;
struct keyspan_usa67_portStatusMessage *msg;
struct usb_serial *serial;
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
int old_dcd_state;
int status = urb->status;
serial = urb->context;
if (status) {
dev_dbg(&urb->dev->dev, "%s - nonzero status: %d\n",
__func__, status);
return;
}
if (urb->actual_length !=
sizeof(struct keyspan_usa67_portStatusMessage)) {
dev_dbg(&urb->dev->dev, "%s - bad length %d\n", __func__, urb->actual_length);
return;
}
/* Now do something useful with the data */
msg = (struct keyspan_usa67_portStatusMessage *)data;
/* Check port number from message and retrieve private data */
if (msg->port >= serial->num_ports) {
dev_dbg(&urb->dev->dev, "%s - Unexpected port number %d\n", __func__, msg->port);
return;
}
port = serial->port[msg->port];
p_priv = usb_get_serial_port_data(port);
if (!p_priv)
goto resubmit;
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
p_priv->cts_state = ((msg->hskia_cts) ? 1 : 0);
p_priv->dcd_state = ((msg->gpia_dcd) ? 1 : 0);
if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
tty_port_tty_hangup(&port->port, true);
resubmit:
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dev_dbg(&port->dev, "%s - resubmit read urb failed. (%d)\n", __func__, err);
}
static void usa67_glocont_callback(struct urb *urb)
{
struct usb_serial *serial;
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
int i;
serial = urb->context;
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
p_priv = usb_get_serial_port_data(port);
if (!p_priv)
continue;
if (p_priv->resend_cont) {
dev_dbg(&port->dev, "%s - sending setup\n", __func__);
keyspan_usa67_send_setup(serial, port,
p_priv->resend_cont - 1);
break;
}
}
}
static unsigned int keyspan_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
int flip;
unsigned int data_len;
struct urb *this_urb;
p_priv = usb_get_serial_port_data(port);
d_details = p_priv->device_details;
/* FIXME: locking */
if (d_details->msg_format == msg_usa90)
data_len = 64;
else
data_len = 63;
flip = p_priv->out_flip;
/* Check both endpoints to see if any are available. */
this_urb = p_priv->out_urbs[flip];
if (this_urb != NULL) {
if (this_urb->status != -EINPROGRESS)
return data_len;
flip = (flip + 1) & d_details->outdat_endp_flip;
this_urb = p_priv->out_urbs[flip];
if (this_urb != NULL) {
if (this_urb->status != -EINPROGRESS)
return data_len;
}
}
return 0;
}
static int keyspan_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
int i, err;
int baud_rate, device_port;
struct urb *urb;
unsigned int cflag = 0;
p_priv = usb_get_serial_port_data(port);
d_details = p_priv->device_details;
/* Set some sane defaults */
p_priv->rts_state = 1;
p_priv->dtr_state = 1;
p_priv->baud = 9600;
/* force baud and lcr to be set on open */
p_priv->old_baud = 0;
p_priv->old_cflag = 0;
p_priv->out_flip = 0;
p_priv->in_flip = 0;
/* Reset low level data toggle and start reading from endpoints */
for (i = 0; i < 2; i++) {
urb = p_priv->in_urbs[i];
if (urb == NULL)
continue;
/* make sure endpoint data toggle is synchronized
with the device */
usb_clear_halt(urb->dev, urb->pipe);
err = usb_submit_urb(urb, GFP_KERNEL);
if (err != 0)
dev_dbg(&port->dev, "%s - submit urb %d failed (%d)\n", __func__, i, err);
}
/* Reset low level data toggle on out endpoints */
for (i = 0; i < 2; i++) {
urb = p_priv->out_urbs[i];
if (urb == NULL)
continue;
/* usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe), 0); */
}
/* get the terminal config for the setup message now so we don't
* need to send 2 of them */
device_port = port->port_number;
if (tty) {
cflag = tty->termios.c_cflag;
/* Baud rate calculation takes baud rate as an integer
so other rates can be generated if desired. */
baud_rate = tty_get_baud_rate(tty);
/* If no match or invalid, leave as default */
if (baud_rate >= 0
&& d_details->calculate_baud_rate(port, baud_rate, d_details->baudclk,
NULL, NULL, NULL, device_port) == KEYSPAN_BAUD_RATE_OK) {
p_priv->baud = baud_rate;
}
}
/* set CTS/RTS handshake etc. */
p_priv->cflag = cflag;
p_priv->flow_control = (cflag & CRTSCTS) ? flow_cts : flow_none;
keyspan_send_setup(port, 1);
/* mdelay(100); */
/* keyspan_set_termios(port, NULL); */
return 0;
}
static void keyspan_dtr_rts(struct usb_serial_port *port, int on)
{
struct keyspan_port_private *p_priv = usb_get_serial_port_data(port);
p_priv->rts_state = on;
p_priv->dtr_state = on;
keyspan_send_setup(port, 0);
}
static void keyspan_close(struct usb_serial_port *port)
{
int i;
struct keyspan_port_private *p_priv;
p_priv = usb_get_serial_port_data(port);
p_priv->rts_state = 0;
p_priv->dtr_state = 0;
keyspan_send_setup(port, 2);
/* pilot-xfer seems to work best with this delay */
mdelay(100);
p_priv->out_flip = 0;
p_priv->in_flip = 0;
usb_kill_urb(p_priv->inack_urb);
for (i = 0; i < 2; i++) {
usb_kill_urb(p_priv->in_urbs[i]);
usb_kill_urb(p_priv->out_urbs[i]);
}
}
/* download the firmware to a pre-renumeration device */
static int keyspan_fake_startup(struct usb_serial *serial)
{
char *fw_name;
dev_dbg(&serial->dev->dev, "Keyspan startup version %04x product %04x\n",
le16_to_cpu(serial->dev->descriptor.bcdDevice),
le16_to_cpu(serial->dev->descriptor.idProduct));
if ((le16_to_cpu(serial->dev->descriptor.bcdDevice) & 0x8000)
!= 0x8000) {
dev_dbg(&serial->dev->dev, "Firmware already loaded. Quitting.\n");
return 1;
}
/* Select firmware image on the basis of idProduct */
switch (le16_to_cpu(serial->dev->descriptor.idProduct)) {
case keyspan_usa28_pre_product_id:
fw_name = "keyspan/usa28.fw";
break;
case keyspan_usa28x_pre_product_id:
fw_name = "keyspan/usa28x.fw";
break;
case keyspan_usa28xa_pre_product_id:
fw_name = "keyspan/usa28xa.fw";
break;
case keyspan_usa28xb_pre_product_id:
fw_name = "keyspan/usa28xb.fw";
break;
case keyspan_usa19_pre_product_id:
fw_name = "keyspan/usa19.fw";
break;
case keyspan_usa19qi_pre_product_id:
fw_name = "keyspan/usa19qi.fw";
break;
case keyspan_mpr_pre_product_id:
fw_name = "keyspan/mpr.fw";
break;
case keyspan_usa19qw_pre_product_id:
fw_name = "keyspan/usa19qw.fw";
break;
case keyspan_usa18x_pre_product_id:
fw_name = "keyspan/usa18x.fw";
break;
case keyspan_usa19w_pre_product_id:
fw_name = "keyspan/usa19w.fw";
break;
case keyspan_usa49w_pre_product_id:
fw_name = "keyspan/usa49w.fw";
break;
case keyspan_usa49wlc_pre_product_id:
fw_name = "keyspan/usa49wlc.fw";
break;
default:
dev_err(&serial->dev->dev, "Unknown product ID (%04x)\n",
le16_to_cpu(serial->dev->descriptor.idProduct));
return 1;
}
dev_dbg(&serial->dev->dev, "Uploading Keyspan %s firmware.\n", fw_name);
if (ezusb_fx1_ihex_firmware_download(serial->dev, fw_name) < 0) {
dev_err(&serial->dev->dev, "failed to load firmware \"%s\"\n",
fw_name);
return -ENOENT;
}
/* after downloading firmware Renumeration will occur in a
moment and the new device will bind to the real driver */
/* we don't want this device to have a driver assigned to it. */
return 1;
}
/* Helper functions used by keyspan_setup_urbs */
static struct usb_endpoint_descriptor const *find_ep(struct usb_serial const *serial,
int endpoint)
{
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *ep;
int i;
iface_desc = serial->interface->cur_altsetting;
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
ep = &iface_desc->endpoint[i].desc;
if (ep->bEndpointAddress == endpoint)
return ep;
}
dev_warn(&serial->interface->dev, "found no endpoint descriptor for endpoint %x\n",
endpoint);
return NULL;
}
static struct urb *keyspan_setup_urb(struct usb_serial *serial, int endpoint,
int dir, void *ctx, char *buf, int len,
void (*callback)(struct urb *))
{
struct urb *urb;
struct usb_endpoint_descriptor const *ep_desc;
char const *ep_type_name;
if (endpoint == -1)
return NULL; /* endpoint not needed */
dev_dbg(&serial->interface->dev, "%s - alloc for endpoint %x\n",
__func__, endpoint);
urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */
if (!urb)
return NULL;
if (endpoint == 0) {
/* control EP filled in when used */
return urb;
}
ep_desc = find_ep(serial, endpoint);
if (!ep_desc) {
usb_free_urb(urb);
return NULL;
}
if (usb_endpoint_xfer_int(ep_desc)) {
ep_type_name = "INT";
usb_fill_int_urb(urb, serial->dev,
usb_sndintpipe(serial->dev, endpoint) | dir,
buf, len, callback, ctx,
ep_desc->bInterval);
} else if (usb_endpoint_xfer_bulk(ep_desc)) {
ep_type_name = "BULK";
usb_fill_bulk_urb(urb, serial->dev,
usb_sndbulkpipe(serial->dev, endpoint) | dir,
buf, len, callback, ctx);
} else {
dev_warn(&serial->interface->dev,
"unsupported endpoint type %x\n",
usb_endpoint_type(ep_desc));
usb_free_urb(urb);
return NULL;
}
dev_dbg(&serial->interface->dev, "%s - using urb %p for %s endpoint %x\n",
__func__, urb, ep_type_name, endpoint);
return urb;
}
static struct callbacks {
void (*instat_callback)(struct urb *);
void (*glocont_callback)(struct urb *);
void (*indat_callback)(struct urb *);
void (*outdat_callback)(struct urb *);
void (*inack_callback)(struct urb *);
void (*outcont_callback)(struct urb *);
} keyspan_callbacks[] = {
{
/* msg_usa26 callbacks */
.instat_callback = usa26_instat_callback,
.glocont_callback = usa26_glocont_callback,
.indat_callback = usa26_indat_callback,
.outdat_callback = usa2x_outdat_callback,
.inack_callback = usa26_inack_callback,
.outcont_callback = usa26_outcont_callback,
}, {
/* msg_usa28 callbacks */
.instat_callback = usa28_instat_callback,
.glocont_callback = usa28_glocont_callback,
.indat_callback = usa28_indat_callback,
.outdat_callback = usa2x_outdat_callback,
.inack_callback = usa28_inack_callback,
.outcont_callback = usa28_outcont_callback,
}, {
/* msg_usa49 callbacks */
.instat_callback = usa49_instat_callback,
.glocont_callback = usa49_glocont_callback,
.indat_callback = usa49_indat_callback,
.outdat_callback = usa2x_outdat_callback,
.inack_callback = usa49_inack_callback,
.outcont_callback = usa49_outcont_callback,
}, {
/* msg_usa90 callbacks */
.instat_callback = usa90_instat_callback,
.glocont_callback = usa28_glocont_callback,
.indat_callback = usa90_indat_callback,
.outdat_callback = usa2x_outdat_callback,
.inack_callback = usa28_inack_callback,
.outcont_callback = usa90_outcont_callback,
}, {
/* msg_usa67 callbacks */
.instat_callback = usa67_instat_callback,
.glocont_callback = usa67_glocont_callback,
.indat_callback = usa26_indat_callback,
.outdat_callback = usa2x_outdat_callback,
.inack_callback = usa26_inack_callback,
.outcont_callback = usa26_outcont_callback,
}
};
/* Generic setup urbs function that uses
data in device_details */
static void keyspan_setup_urbs(struct usb_serial *serial)
{
struct keyspan_serial_private *s_priv;
const struct keyspan_device_details *d_details;
struct callbacks *cback;
s_priv = usb_get_serial_data(serial);
d_details = s_priv->device_details;
/* Setup values for the various callback routines */
cback = &keyspan_callbacks[d_details->msg_format];
/* Allocate and set up urbs for each one that is in use,
starting with instat endpoints */
s_priv->instat_urb = keyspan_setup_urb
(serial, d_details->instat_endpoint, USB_DIR_IN,
serial, s_priv->instat_buf, INSTAT_BUFLEN,
cback->instat_callback);
s_priv->indat_urb = keyspan_setup_urb
(serial, d_details->indat_endpoint, USB_DIR_IN,
serial, s_priv->indat_buf, INDAT49W_BUFLEN,
usa49wg_indat_callback);
s_priv->glocont_urb = keyspan_setup_urb
(serial, d_details->glocont_endpoint, USB_DIR_OUT,
serial, s_priv->glocont_buf, GLOCONT_BUFLEN,
cback->glocont_callback);
}
/* usa19 function doesn't require prescaler */
static int keyspan_usa19_calc_baud(struct usb_serial_port *port,
u32 baud_rate, u32 baudclk, u8 *rate_hi,
u8 *rate_low, u8 *prescaler, int portnum)
{
u32 b16, /* baud rate times 16 (actual rate used internally) */
div, /* divisor */
cnt; /* inverse of divisor (programmed into 8051) */
dev_dbg(&port->dev, "%s - %d.\n", __func__, baud_rate);
/* prevent divide by zero... */
b16 = baud_rate * 16L;
if (b16 == 0)
return KEYSPAN_INVALID_BAUD_RATE;
/* Any "standard" rate over 57k6 is marginal on the USA-19
as we run out of divisor resolution. */
if (baud_rate > 57600)
return KEYSPAN_INVALID_BAUD_RATE;
/* calculate the divisor and the counter (its inverse) */
div = baudclk / b16;
if (div == 0)
return KEYSPAN_INVALID_BAUD_RATE;
else
cnt = 0 - div;
if (div > 0xffff)
return KEYSPAN_INVALID_BAUD_RATE;
/* return the counter values if non-null */
if (rate_low)
*rate_low = (u8) (cnt & 0xff);
if (rate_hi)
*rate_hi = (u8) ((cnt >> 8) & 0xff);
if (rate_low && rate_hi)
dev_dbg(&port->dev, "%s - %d %02x %02x.\n",
__func__, baud_rate, *rate_hi, *rate_low);
return KEYSPAN_BAUD_RATE_OK;
}
/* usa19hs function doesn't require prescaler */
static int keyspan_usa19hs_calc_baud(struct usb_serial_port *port,
u32 baud_rate, u32 baudclk, u8 *rate_hi,
u8 *rate_low, u8 *prescaler, int portnum)
{
u32 b16, /* baud rate times 16 (actual rate used internally) */
div; /* divisor */
dev_dbg(&port->dev, "%s - %d.\n", __func__, baud_rate);
/* prevent divide by zero... */
b16 = baud_rate * 16L;
if (b16 == 0)
return KEYSPAN_INVALID_BAUD_RATE;
/* calculate the divisor */
div = baudclk / b16;
if (div == 0)
return KEYSPAN_INVALID_BAUD_RATE;
if (div > 0xffff)
return KEYSPAN_INVALID_BAUD_RATE;
/* return the counter values if non-null */
if (rate_low)
*rate_low = (u8) (div & 0xff);
if (rate_hi)
*rate_hi = (u8) ((div >> 8) & 0xff);
if (rate_low && rate_hi)
dev_dbg(&port->dev, "%s - %d %02x %02x.\n",
__func__, baud_rate, *rate_hi, *rate_low);
return KEYSPAN_BAUD_RATE_OK;
}
static int keyspan_usa19w_calc_baud(struct usb_serial_port *port,
u32 baud_rate, u32 baudclk, u8 *rate_hi,
u8 *rate_low, u8 *prescaler, int portnum)
{
u32 b16, /* baud rate times 16 (actual rate used internally) */
clk, /* clock with 13/8 prescaler */
div, /* divisor using 13/8 prescaler */
res, /* resulting baud rate using 13/8 prescaler */
diff, /* error using 13/8 prescaler */
smallest_diff;
u8 best_prescaler;
int i;
dev_dbg(&port->dev, "%s - %d.\n", __func__, baud_rate);
/* prevent divide by zero */
b16 = baud_rate * 16L;
if (b16 == 0)
return KEYSPAN_INVALID_BAUD_RATE;
/* Calculate prescaler by trying them all and looking
for best fit */
/* start with largest possible difference */
smallest_diff = 0xffffffff;
/* 0 is an invalid prescaler, used as a flag */
best_prescaler = 0;
for (i = 8; i <= 0xff; ++i) {
clk = (baudclk * 8) / (u32) i;
div = clk / b16;
if (div == 0)
continue;
res = clk / div;
diff = (res > b16) ? (res-b16) : (b16-res);
if (diff < smallest_diff) {
best_prescaler = i;
smallest_diff = diff;
}
}
if (best_prescaler == 0)
return KEYSPAN_INVALID_BAUD_RATE;
clk = (baudclk * 8) / (u32) best_prescaler;
div = clk / b16;
/* return the divisor and prescaler if non-null */
if (rate_low)
*rate_low = (u8) (div & 0xff);
if (rate_hi)
*rate_hi = (u8) ((div >> 8) & 0xff);
if (prescaler) {
*prescaler = best_prescaler;
/* dev_dbg(&port->dev, "%s - %d %d\n", __func__, *prescaler, div); */
}
return KEYSPAN_BAUD_RATE_OK;
}
/* USA-28 supports different maximum baud rates on each port */
static int keyspan_usa28_calc_baud(struct usb_serial_port *port,
u32 baud_rate, u32 baudclk, u8 *rate_hi,
u8 *rate_low, u8 *prescaler, int portnum)
{
u32 b16, /* baud rate times 16 (actual rate used internally) */
div, /* divisor */
cnt; /* inverse of divisor (programmed into 8051) */
dev_dbg(&port->dev, "%s - %d.\n", __func__, baud_rate);
/* prevent divide by zero */
b16 = baud_rate * 16L;
if (b16 == 0)
return KEYSPAN_INVALID_BAUD_RATE;
/* calculate the divisor and the counter (its inverse) */
div = KEYSPAN_USA28_BAUDCLK / b16;
if (div == 0)
return KEYSPAN_INVALID_BAUD_RATE;
else
cnt = 0 - div;
/* check for out of range, based on portnum,
and return result */
if (portnum == 0) {
if (div > 0xffff)
return KEYSPAN_INVALID_BAUD_RATE;
} else {
if (portnum == 1) {
if (div > 0xff)
return KEYSPAN_INVALID_BAUD_RATE;
} else
return KEYSPAN_INVALID_BAUD_RATE;
}
/* return the counter values if not NULL
(port 1 will ignore retHi) */
if (rate_low)
*rate_low = (u8) (cnt & 0xff);
if (rate_hi)
*rate_hi = (u8) ((cnt >> 8) & 0xff);
dev_dbg(&port->dev, "%s - %d OK.\n", __func__, baud_rate);
return KEYSPAN_BAUD_RATE_OK;
}
static int keyspan_usa26_send_setup(struct usb_serial *serial,
struct usb_serial_port *port,
int reset_port)
{
struct keyspan_usa26_portControlMessage msg;
struct keyspan_serial_private *s_priv;
struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
struct urb *this_urb;
int device_port, err;
dev_dbg(&port->dev, "%s reset=%d\n", __func__, reset_port);
s_priv = usb_get_serial_data(serial);
p_priv = usb_get_serial_port_data(port);
d_details = s_priv->device_details;
device_port = port->port_number;
this_urb = p_priv->outcont_urb;
/* Make sure we have an urb then send the message */
if (this_urb == NULL) {
dev_dbg(&port->dev, "%s - oops no urb.\n", __func__);
return -1;
}
dev_dbg(&port->dev, "%s - endpoint %x\n",
__func__, usb_pipeendpoint(this_urb->pipe));
/* Save reset port val for resend.
Don't overwrite resend for open/close condition. */
if ((reset_port + 1) > p_priv->resend_cont)
p_priv->resend_cont = reset_port + 1;
if (this_urb->status == -EINPROGRESS) {
/* dev_dbg(&port->dev, "%s - already writing\n", __func__); */
mdelay(5);
return -1;
}
memset(&msg, 0, sizeof(struct keyspan_usa26_portControlMessage));
/* Only set baud rate if it's changed */
if (p_priv->old_baud != p_priv->baud) {
p_priv->old_baud = p_priv->baud;
msg.setClocking = 0xff;
if (d_details->calculate_baud_rate(port, p_priv->baud, d_details->baudclk,
&msg.baudHi, &msg.baudLo, &msg.prescaler,
device_port) == KEYSPAN_INVALID_BAUD_RATE) {
dev_dbg(&port->dev, "%s - Invalid baud rate %d requested, using 9600.\n",
__func__, p_priv->baud);
msg.baudLo = 0;
msg.baudHi = 125; /* Values for 9600 baud */
msg.prescaler = 10;
}
msg.setPrescaler = 0xff;
}
msg.lcr = (p_priv->cflag & CSTOPB) ? STOPBITS_678_2 : STOPBITS_5678_1;
switch (p_priv->cflag & CSIZE) {
case CS5:
msg.lcr |= USA_DATABITS_5;
break;
case CS6:
msg.lcr |= USA_DATABITS_6;
break;
case CS7:
msg.lcr |= USA_DATABITS_7;
break;
case CS8:
msg.lcr |= USA_DATABITS_8;
break;
}
if (p_priv->cflag & PARENB) {
/* note USA_PARITY_NONE == 0 */
msg.lcr |= (p_priv->cflag & PARODD) ?
USA_PARITY_ODD : USA_PARITY_EVEN;
}
msg.setLcr = 0xff;
msg.ctsFlowControl = (p_priv->flow_control == flow_cts);
msg.xonFlowControl = 0;
msg.setFlowControl = 0xff;
msg.forwardingLength = 16;
msg.xonChar = 17;
msg.xoffChar = 19;
/* Opening port */
if (reset_port == 1) {
msg._txOn = 1;
msg._txOff = 0;
msg.txFlush = 0;
msg.txBreak = 0;
msg.rxOn = 1;
msg.rxOff = 0;
msg.rxFlush = 1;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0xff;
}
/* Closing port */
else if (reset_port == 2) {
msg._txOn = 0;
msg._txOff = 1;
msg.txFlush = 0;
msg.txBreak = 0;
msg.rxOn = 0;
msg.rxOff = 1;
msg.rxFlush = 1;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0;
}
/* Sending intermediate configs */
else {
msg._txOn = (!p_priv->break_on);
msg._txOff = 0;
msg.txFlush = 0;
msg.txBreak = (p_priv->break_on);
msg.rxOn = 0;
msg.rxOff = 0;
msg.rxFlush = 0;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0x0;
}
/* Do handshaking outputs */
msg.setTxTriState_setRts = 0xff;
msg.txTriState_rts = p_priv->rts_state;
msg.setHskoa_setDtr = 0xff;
msg.hskoa_dtr = p_priv->dtr_state;
p_priv->resend_cont = 0;
memcpy(this_urb->transfer_buffer, &msg, sizeof(msg));
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dev_dbg(&port->dev, "%s - usb_submit_urb(setup) failed (%d)\n", __func__, err);
return 0;
}
static int keyspan_usa28_send_setup(struct usb_serial *serial,
struct usb_serial_port *port,
int reset_port)
{
struct keyspan_usa28_portControlMessage msg;
struct keyspan_serial_private *s_priv;
struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
struct urb *this_urb;
int device_port, err;
s_priv = usb_get_serial_data(serial);
p_priv = usb_get_serial_port_data(port);
d_details = s_priv->device_details;
device_port = port->port_number;
/* only do something if we have a bulk out endpoint */
this_urb = p_priv->outcont_urb;
if (this_urb == NULL) {
dev_dbg(&port->dev, "%s - oops no urb.\n", __func__);
return -1;
}
/* Save reset port val for resend.
Don't overwrite resend for open/close condition. */
if ((reset_port + 1) > p_priv->resend_cont)
p_priv->resend_cont = reset_port + 1;
if (this_urb->status == -EINPROGRESS) {
dev_dbg(&port->dev, "%s already writing\n", __func__);
mdelay(5);
return -1;
}
memset(&msg, 0, sizeof(struct keyspan_usa28_portControlMessage));
msg.setBaudRate = 1;
if (d_details->calculate_baud_rate(port, p_priv->baud, d_details->baudclk,
&msg.baudHi, &msg.baudLo, NULL,
device_port) == KEYSPAN_INVALID_BAUD_RATE) {
dev_dbg(&port->dev, "%s - Invalid baud rate requested %d.\n",
__func__, p_priv->baud);
msg.baudLo = 0xff;
msg.baudHi = 0xb2; /* Values for 9600 baud */
}
/* If parity is enabled, we must calculate it ourselves. */
msg.parity = 0; /* XXX for now */
msg.ctsFlowControl = (p_priv->flow_control == flow_cts);
msg.xonFlowControl = 0;
/* Do handshaking outputs, DTR is inverted relative to RTS */
msg.rts = p_priv->rts_state;
msg.dtr = p_priv->dtr_state;
msg.forwardingLength = 16;
msg.forwardMs = 10;
msg.breakThreshold = 45;
msg.xonChar = 17;
msg.xoffChar = 19;
/*msg.returnStatus = 1;
msg.resetDataToggle = 0xff;*/
/* Opening port */
if (reset_port == 1) {
msg._txOn = 1;
msg._txOff = 0;
msg.txFlush = 0;
msg.txForceXoff = 0;
msg.txBreak = 0;
msg.rxOn = 1;
msg.rxOff = 0;
msg.rxFlush = 1;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0xff;
}
/* Closing port */
else if (reset_port == 2) {
msg._txOn = 0;
msg._txOff = 1;
msg.txFlush = 0;
msg.txForceXoff = 0;
msg.txBreak = 0;
msg.rxOn = 0;
msg.rxOff = 1;
msg.rxFlush = 1;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0;
}
/* Sending intermediate configs */
else {
msg._txOn = (!p_priv->break_on);
msg._txOff = 0;
msg.txFlush = 0;
msg.txForceXoff = 0;
msg.txBreak = (p_priv->break_on);
msg.rxOn = 0;
msg.rxOff = 0;
msg.rxFlush = 0;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0x0;
}
p_priv->resend_cont = 0;
memcpy(this_urb->transfer_buffer, &msg, sizeof(msg));
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dev_dbg(&port->dev, "%s - usb_submit_urb(setup) failed\n", __func__);
return 0;
}
static int keyspan_usa49_send_setup(struct usb_serial *serial,
struct usb_serial_port *port,
int reset_port)
{
struct keyspan_usa49_portControlMessage msg;
struct usb_ctrlrequest *dr = NULL;
struct keyspan_serial_private *s_priv;
struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
struct urb *this_urb;
int err, device_port;
s_priv = usb_get_serial_data(serial);
p_priv = usb_get_serial_port_data(port);
d_details = s_priv->device_details;
this_urb = s_priv->glocont_urb;
/* Work out which port within the device is being setup */
device_port = port->port_number;
/* Make sure we have an urb then send the message */
if (this_urb == NULL) {
dev_dbg(&port->dev, "%s - oops no urb for port.\n", __func__);
return -1;
}
dev_dbg(&port->dev, "%s - endpoint %x (%d)\n",
__func__, usb_pipeendpoint(this_urb->pipe), device_port);
/* Save reset port val for resend.
Don't overwrite resend for open/close condition. */
if ((reset_port + 1) > p_priv->resend_cont)
p_priv->resend_cont = reset_port + 1;
if (this_urb->status == -EINPROGRESS) {
/* dev_dbg(&port->dev, "%s - already writing\n", __func__); */
mdelay(5);
return -1;
}
memset(&msg, 0, sizeof(struct keyspan_usa49_portControlMessage));
msg.portNumber = device_port;
/* Only set baud rate if it's changed */
if (p_priv->old_baud != p_priv->baud) {
p_priv->old_baud = p_priv->baud;
msg.setClocking = 0xff;
if (d_details->calculate_baud_rate(port, p_priv->baud, d_details->baudclk,
&msg.baudHi, &msg.baudLo, &msg.prescaler,
device_port) == KEYSPAN_INVALID_BAUD_RATE) {
dev_dbg(&port->dev, "%s - Invalid baud rate %d requested, using 9600.\n",
__func__, p_priv->baud);
msg.baudLo = 0;
msg.baudHi = 125; /* Values for 9600 baud */
msg.prescaler = 10;
}
/* msg.setPrescaler = 0xff; */
}
msg.lcr = (p_priv->cflag & CSTOPB) ? STOPBITS_678_2 : STOPBITS_5678_1;
switch (p_priv->cflag & CSIZE) {
case CS5:
msg.lcr |= USA_DATABITS_5;
break;
case CS6:
msg.lcr |= USA_DATABITS_6;
break;
case CS7:
msg.lcr |= USA_DATABITS_7;
break;
case CS8:
msg.lcr |= USA_DATABITS_8;
break;
}
if (p_priv->cflag & PARENB) {
/* note USA_PARITY_NONE == 0 */
msg.lcr |= (p_priv->cflag & PARODD) ?
USA_PARITY_ODD : USA_PARITY_EVEN;
}
msg.setLcr = 0xff;
msg.ctsFlowControl = (p_priv->flow_control == flow_cts);
msg.xonFlowControl = 0;
msg.setFlowControl = 0xff;
msg.forwardingLength = 16;
msg.xonChar = 17;
msg.xoffChar = 19;
/* Opening port */
if (reset_port == 1) {
msg._txOn = 1;
msg._txOff = 0;
msg.txFlush = 0;
msg.txBreak = 0;
msg.rxOn = 1;
msg.rxOff = 0;
msg.rxFlush = 1;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0xff;
msg.enablePort = 1;
msg.disablePort = 0;
}
/* Closing port */
else if (reset_port == 2) {
msg._txOn = 0;
msg._txOff = 1;
msg.txFlush = 0;
msg.txBreak = 0;
msg.rxOn = 0;
msg.rxOff = 1;
msg.rxFlush = 1;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0;
msg.enablePort = 0;
msg.disablePort = 1;
}
/* Sending intermediate configs */
else {
msg._txOn = (!p_priv->break_on);
msg._txOff = 0;
msg.txFlush = 0;
msg.txBreak = (p_priv->break_on);
msg.rxOn = 0;
msg.rxOff = 0;
msg.rxFlush = 0;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0x0;
msg.enablePort = 0;
msg.disablePort = 0;
}
/* Do handshaking outputs */
msg.setRts = 0xff;
msg.rts = p_priv->rts_state;
msg.setDtr = 0xff;
msg.dtr = p_priv->dtr_state;
p_priv->resend_cont = 0;
/* if the device is a 49wg, we send control message on usb
control EP 0 */
if (d_details->product_id == keyspan_usa49wg_product_id) {
dr = (void *)(s_priv->ctrl_buf);
dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT;
dr->bRequest = 0xB0; /* 49wg control message */
dr->wValue = 0;
dr->wIndex = 0;
dr->wLength = cpu_to_le16(sizeof(msg));
memcpy(s_priv->glocont_buf, &msg, sizeof(msg));
usb_fill_control_urb(this_urb, serial->dev,
usb_sndctrlpipe(serial->dev, 0),
(unsigned char *)dr, s_priv->glocont_buf,
sizeof(msg), usa49_glocont_callback, serial);
} else {
memcpy(this_urb->transfer_buffer, &msg, sizeof(msg));
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
}
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dev_dbg(&port->dev, "%s - usb_submit_urb(setup) failed (%d)\n", __func__, err);
return 0;
}
static int keyspan_usa90_send_setup(struct usb_serial *serial,
struct usb_serial_port *port,
int reset_port)
{
struct keyspan_usa90_portControlMessage msg;
struct keyspan_serial_private *s_priv;
struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
struct urb *this_urb;
int err;
u8 prescaler;
s_priv = usb_get_serial_data(serial);
p_priv = usb_get_serial_port_data(port);
d_details = s_priv->device_details;
/* only do something if we have a bulk out endpoint */
this_urb = p_priv->outcont_urb;
if (this_urb == NULL) {
dev_dbg(&port->dev, "%s - oops no urb.\n", __func__);
return -1;
}
/* Save reset port val for resend.
Don't overwrite resend for open/close condition. */
if ((reset_port + 1) > p_priv->resend_cont)
p_priv->resend_cont = reset_port + 1;
if (this_urb->status == -EINPROGRESS) {
dev_dbg(&port->dev, "%s already writing\n", __func__);
mdelay(5);
return -1;
}
memset(&msg, 0, sizeof(struct keyspan_usa90_portControlMessage));
/* Only set baud rate if it's changed */
if (p_priv->old_baud != p_priv->baud) {
p_priv->old_baud = p_priv->baud;
msg.setClocking = 0x01;
if (d_details->calculate_baud_rate(port, p_priv->baud, d_details->baudclk,
&msg.baudHi, &msg.baudLo, &prescaler, 0) == KEYSPAN_INVALID_BAUD_RATE) {
dev_dbg(&port->dev, "%s - Invalid baud rate %d requested, using 9600.\n",
__func__, p_priv->baud);
p_priv->baud = 9600;
d_details->calculate_baud_rate(port, p_priv->baud, d_details->baudclk,
&msg.baudHi, &msg.baudLo, &prescaler, 0);
}
msg.setRxMode = 1;
msg.setTxMode = 1;
}
/* modes must always be correctly specified */
if (p_priv->baud > 57600) {
msg.rxMode = RXMODE_DMA;
msg.txMode = TXMODE_DMA;
} else {
msg.rxMode = RXMODE_BYHAND;
msg.txMode = TXMODE_BYHAND;
}
msg.lcr = (p_priv->cflag & CSTOPB) ? STOPBITS_678_2 : STOPBITS_5678_1;
switch (p_priv->cflag & CSIZE) {
case CS5:
msg.lcr |= USA_DATABITS_5;
break;
case CS6:
msg.lcr |= USA_DATABITS_6;
break;
case CS7:
msg.lcr |= USA_DATABITS_7;
break;
case CS8:
msg.lcr |= USA_DATABITS_8;
break;
}
if (p_priv->cflag & PARENB) {
/* note USA_PARITY_NONE == 0 */
msg.lcr |= (p_priv->cflag & PARODD) ?
USA_PARITY_ODD : USA_PARITY_EVEN;
}
if (p_priv->old_cflag != p_priv->cflag) {
p_priv->old_cflag = p_priv->cflag;
msg.setLcr = 0x01;
}
if (p_priv->flow_control == flow_cts)
msg.txFlowControl = TXFLOW_CTS;
msg.setTxFlowControl = 0x01;
msg.setRxFlowControl = 0x01;
msg.rxForwardingLength = 16;
msg.rxForwardingTimeout = 16;
msg.txAckSetting = 0;
msg.xonChar = 17;
msg.xoffChar = 19;
/* Opening port */
if (reset_port == 1) {
msg.portEnabled = 1;
msg.rxFlush = 1;
msg.txBreak = (p_priv->break_on);
}
/* Closing port */
else if (reset_port == 2)
msg.portEnabled = 0;
/* Sending intermediate configs */
else {
msg.portEnabled = 1;
msg.txBreak = (p_priv->break_on);
}
/* Do handshaking outputs */
msg.setRts = 0x01;
msg.rts = p_priv->rts_state;
msg.setDtr = 0x01;
msg.dtr = p_priv->dtr_state;
p_priv->resend_cont = 0;
memcpy(this_urb->transfer_buffer, &msg, sizeof(msg));
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dev_dbg(&port->dev, "%s - usb_submit_urb(setup) failed (%d)\n", __func__, err);
return 0;
}
static int keyspan_usa67_send_setup(struct usb_serial *serial,
struct usb_serial_port *port,
int reset_port)
{
struct keyspan_usa67_portControlMessage msg;
struct keyspan_serial_private *s_priv;
struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
struct urb *this_urb;
int err, device_port;
s_priv = usb_get_serial_data(serial);
p_priv = usb_get_serial_port_data(port);
d_details = s_priv->device_details;
this_urb = s_priv->glocont_urb;
/* Work out which port within the device is being setup */
device_port = port->port_number;
/* Make sure we have an urb then send the message */
if (this_urb == NULL) {
dev_dbg(&port->dev, "%s - oops no urb for port.\n", __func__);
return -1;
}
/* Save reset port val for resend.
Don't overwrite resend for open/close condition. */
if ((reset_port + 1) > p_priv->resend_cont)
p_priv->resend_cont = reset_port + 1;
if (this_urb->status == -EINPROGRESS) {
/* dev_dbg(&port->dev, "%s - already writing\n", __func__); */
mdelay(5);
return -1;
}
memset(&msg, 0, sizeof(struct keyspan_usa67_portControlMessage));
msg.port = device_port;
/* Only set baud rate if it's changed */
if (p_priv->old_baud != p_priv->baud) {
p_priv->old_baud = p_priv->baud;
msg.setClocking = 0xff;
if (d_details->calculate_baud_rate(port, p_priv->baud, d_details->baudclk,
&msg.baudHi, &msg.baudLo, &msg.prescaler,
device_port) == KEYSPAN_INVALID_BAUD_RATE) {
dev_dbg(&port->dev, "%s - Invalid baud rate %d requested, using 9600.\n",
__func__, p_priv->baud);
msg.baudLo = 0;
msg.baudHi = 125; /* Values for 9600 baud */
msg.prescaler = 10;
}
msg.setPrescaler = 0xff;
}
msg.lcr = (p_priv->cflag & CSTOPB) ? STOPBITS_678_2 : STOPBITS_5678_1;
switch (p_priv->cflag & CSIZE) {
case CS5:
msg.lcr |= USA_DATABITS_5;
break;
case CS6:
msg.lcr |= USA_DATABITS_6;
break;
case CS7:
msg.lcr |= USA_DATABITS_7;
break;
case CS8:
msg.lcr |= USA_DATABITS_8;
break;
}
if (p_priv->cflag & PARENB) {
/* note USA_PARITY_NONE == 0 */
msg.lcr |= (p_priv->cflag & PARODD) ?
USA_PARITY_ODD : USA_PARITY_EVEN;
}
msg.setLcr = 0xff;
msg.ctsFlowControl = (p_priv->flow_control == flow_cts);
msg.xonFlowControl = 0;
msg.setFlowControl = 0xff;
msg.forwardingLength = 16;
msg.xonChar = 17;
msg.xoffChar = 19;
if (reset_port == 1) {
/* Opening port */
msg._txOn = 1;
msg._txOff = 0;
msg.txFlush = 0;
msg.txBreak = 0;
msg.rxOn = 1;
msg.rxOff = 0;
msg.rxFlush = 1;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0xff;
} else if (reset_port == 2) {
/* Closing port */
msg._txOn = 0;
msg._txOff = 1;
msg.txFlush = 0;
msg.txBreak = 0;
msg.rxOn = 0;
msg.rxOff = 1;
msg.rxFlush = 1;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0;
} else {
/* Sending intermediate configs */
msg._txOn = (!p_priv->break_on);
msg._txOff = 0;
msg.txFlush = 0;
msg.txBreak = (p_priv->break_on);
msg.rxOn = 0;
msg.rxOff = 0;
msg.rxFlush = 0;
msg.rxForward = 0;
msg.returnStatus = 0;
msg.resetDataToggle = 0x0;
}
/* Do handshaking outputs */
msg.setTxTriState_setRts = 0xff;
msg.txTriState_rts = p_priv->rts_state;
msg.setHskoa_setDtr = 0xff;
msg.hskoa_dtr = p_priv->dtr_state;
p_priv->resend_cont = 0;
memcpy(this_urb->transfer_buffer, &msg, sizeof(msg));
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dev_dbg(&port->dev, "%s - usb_submit_urb(setup) failed (%d)\n", __func__, err);
return 0;
}
static void keyspan_send_setup(struct usb_serial_port *port, int reset_port)
{
struct usb_serial *serial = port->serial;
struct keyspan_serial_private *s_priv;
const struct keyspan_device_details *d_details;
s_priv = usb_get_serial_data(serial);
d_details = s_priv->device_details;
switch (d_details->msg_format) {
case msg_usa26:
keyspan_usa26_send_setup(serial, port, reset_port);
break;
case msg_usa28:
keyspan_usa28_send_setup(serial, port, reset_port);
break;
case msg_usa49:
keyspan_usa49_send_setup(serial, port, reset_port);
break;
case msg_usa90:
keyspan_usa90_send_setup(serial, port, reset_port);
break;
case msg_usa67:
keyspan_usa67_send_setup(serial, port, reset_port);
break;
}
}
/* Gets called by the "real" driver (ie once firmware is loaded
and renumeration has taken place. */
static int keyspan_startup(struct usb_serial *serial)
{
int i, err;
struct keyspan_serial_private *s_priv;
const struct keyspan_device_details *d_details;
for (i = 0; (d_details = keyspan_devices[i]) != NULL; ++i)
if (d_details->product_id ==
le16_to_cpu(serial->dev->descriptor.idProduct))
break;
if (d_details == NULL) {
dev_err(&serial->dev->dev, "%s - unknown product id %x\n",
__func__, le16_to_cpu(serial->dev->descriptor.idProduct));
return -ENODEV;
}
/* Setup private data for serial driver */
s_priv = kzalloc(sizeof(struct keyspan_serial_private), GFP_KERNEL);
if (!s_priv)
return -ENOMEM;
s_priv->instat_buf = kzalloc(INSTAT_BUFLEN, GFP_KERNEL);
if (!s_priv->instat_buf)
goto err_instat_buf;
s_priv->indat_buf = kzalloc(INDAT49W_BUFLEN, GFP_KERNEL);
if (!s_priv->indat_buf)
goto err_indat_buf;
s_priv->glocont_buf = kzalloc(GLOCONT_BUFLEN, GFP_KERNEL);
if (!s_priv->glocont_buf)
goto err_glocont_buf;
s_priv->ctrl_buf = kzalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
if (!s_priv->ctrl_buf)
goto err_ctrl_buf;
s_priv->device_details = d_details;
usb_set_serial_data(serial, s_priv);
keyspan_setup_urbs(serial);
if (s_priv->instat_urb != NULL) {
err = usb_submit_urb(s_priv->instat_urb, GFP_KERNEL);
if (err != 0)
dev_dbg(&serial->dev->dev, "%s - submit instat urb failed %d\n", __func__, err);
}
if (s_priv->indat_urb != NULL) {
err = usb_submit_urb(s_priv->indat_urb, GFP_KERNEL);
if (err != 0)
dev_dbg(&serial->dev->dev, "%s - submit indat urb failed %d\n", __func__, err);
}
return 0;
err_ctrl_buf:
kfree(s_priv->glocont_buf);
err_glocont_buf:
kfree(s_priv->indat_buf);
err_indat_buf:
kfree(s_priv->instat_buf);
err_instat_buf:
kfree(s_priv);
return -ENOMEM;
}
static void keyspan_disconnect(struct usb_serial *serial)
{
struct keyspan_serial_private *s_priv;
s_priv = usb_get_serial_data(serial);
usb_kill_urb(s_priv->instat_urb);
usb_kill_urb(s_priv->glocont_urb);
usb_kill_urb(s_priv->indat_urb);
}
static void keyspan_release(struct usb_serial *serial)
{
struct keyspan_serial_private *s_priv;
s_priv = usb_get_serial_data(serial);
/* Make sure to unlink the URBs submitted in attach. */
usb_kill_urb(s_priv->instat_urb);
usb_kill_urb(s_priv->indat_urb);
usb_free_urb(s_priv->instat_urb);
usb_free_urb(s_priv->indat_urb);
usb_free_urb(s_priv->glocont_urb);
kfree(s_priv->ctrl_buf);
kfree(s_priv->glocont_buf);
kfree(s_priv->indat_buf);
kfree(s_priv->instat_buf);
kfree(s_priv);
}
static int keyspan_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct keyspan_serial_private *s_priv;
struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
struct callbacks *cback;
int endp;
int port_num;
int i;
s_priv = usb_get_serial_data(serial);
d_details = s_priv->device_details;
p_priv = kzalloc(sizeof(*p_priv), GFP_KERNEL);
if (!p_priv)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(p_priv->in_buffer); ++i) {
p_priv->in_buffer[i] = kzalloc(IN_BUFLEN, GFP_KERNEL);
if (!p_priv->in_buffer[i])
goto err_free_in_buffer;
}
for (i = 0; i < ARRAY_SIZE(p_priv->out_buffer); ++i) {
p_priv->out_buffer[i] = kzalloc(OUT_BUFLEN, GFP_KERNEL);
if (!p_priv->out_buffer[i])
goto err_free_out_buffer;
}
p_priv->inack_buffer = kzalloc(INACK_BUFLEN, GFP_KERNEL);
if (!p_priv->inack_buffer)
goto err_free_out_buffer;
p_priv->outcont_buffer = kzalloc(OUTCONT_BUFLEN, GFP_KERNEL);
if (!p_priv->outcont_buffer)
goto err_free_inack_buffer;
p_priv->device_details = d_details;
/* Setup values for the various callback routines */
cback = &keyspan_callbacks[d_details->msg_format];
port_num = port->port_number;
/* Do indat endpoints first, once for each flip */
endp = d_details->indat_endpoints[port_num];
for (i = 0; i <= d_details->indat_endp_flip; ++i, ++endp) {
p_priv->in_urbs[i] = keyspan_setup_urb(serial, endp,
USB_DIR_IN, port,
p_priv->in_buffer[i],
IN_BUFLEN,
cback->indat_callback);
}
/* outdat endpoints also have flip */
endp = d_details->outdat_endpoints[port_num];
for (i = 0; i <= d_details->outdat_endp_flip; ++i, ++endp) {
p_priv->out_urbs[i] = keyspan_setup_urb(serial, endp,
USB_DIR_OUT, port,
p_priv->out_buffer[i],
OUT_BUFLEN,
cback->outdat_callback);
}
/* inack endpoint */
p_priv->inack_urb = keyspan_setup_urb(serial,
d_details->inack_endpoints[port_num],
USB_DIR_IN, port,
p_priv->inack_buffer,
INACK_BUFLEN,
cback->inack_callback);
/* outcont endpoint */
p_priv->outcont_urb = keyspan_setup_urb(serial,
d_details->outcont_endpoints[port_num],
USB_DIR_OUT, port,
p_priv->outcont_buffer,
OUTCONT_BUFLEN,
cback->outcont_callback);
usb_set_serial_port_data(port, p_priv);
return 0;
err_free_inack_buffer:
kfree(p_priv->inack_buffer);
err_free_out_buffer:
for (i = 0; i < ARRAY_SIZE(p_priv->out_buffer); ++i)
kfree(p_priv->out_buffer[i]);
err_free_in_buffer:
for (i = 0; i < ARRAY_SIZE(p_priv->in_buffer); ++i)
kfree(p_priv->in_buffer[i]);
kfree(p_priv);
return -ENOMEM;
}
static void keyspan_port_remove(struct usb_serial_port *port)
{
struct keyspan_port_private *p_priv;
int i;
p_priv = usb_get_serial_port_data(port);
usb_kill_urb(p_priv->inack_urb);
usb_kill_urb(p_priv->outcont_urb);
for (i = 0; i < 2; i++) {
usb_kill_urb(p_priv->in_urbs[i]);
usb_kill_urb(p_priv->out_urbs[i]);
}
usb_free_urb(p_priv->inack_urb);
usb_free_urb(p_priv->outcont_urb);
for (i = 0; i < 2; i++) {
usb_free_urb(p_priv->in_urbs[i]);
usb_free_urb(p_priv->out_urbs[i]);
}
kfree(p_priv->outcont_buffer);
kfree(p_priv->inack_buffer);
for (i = 0; i < ARRAY_SIZE(p_priv->out_buffer); ++i)
kfree(p_priv->out_buffer[i]);
for (i = 0; i < ARRAY_SIZE(p_priv->in_buffer); ++i)
kfree(p_priv->in_buffer[i]);
kfree(p_priv);
}
/* Structs for the devices, pre and post renumeration. */
static struct usb_serial_driver keyspan_pre_device = {
.driver = {
.owner = THIS_MODULE,
.name = "keyspan_no_firm",
},
.description = "Keyspan - (without firmware)",
.id_table = keyspan_pre_ids,
.num_ports = 1,
.attach = keyspan_fake_startup,
};
static struct usb_serial_driver keyspan_1port_device = {
.driver = {
.owner = THIS_MODULE,
.name = "keyspan_1",
},
.description = "Keyspan 1 port adapter",
.id_table = keyspan_1port_ids,
.num_ports = 1,
.open = keyspan_open,
.close = keyspan_close,
.dtr_rts = keyspan_dtr_rts,
.write = keyspan_write,
.write_room = keyspan_write_room,
.set_termios = keyspan_set_termios,
.break_ctl = keyspan_break_ctl,
.tiocmget = keyspan_tiocmget,
.tiocmset = keyspan_tiocmset,
.attach = keyspan_startup,
.disconnect = keyspan_disconnect,
.release = keyspan_release,
.port_probe = keyspan_port_probe,
.port_remove = keyspan_port_remove,
};
static struct usb_serial_driver keyspan_2port_device = {
.driver = {
.owner = THIS_MODULE,
.name = "keyspan_2",
},
.description = "Keyspan 2 port adapter",
.id_table = keyspan_2port_ids,
.num_ports = 2,
.open = keyspan_open,
.close = keyspan_close,
.dtr_rts = keyspan_dtr_rts,
.write = keyspan_write,
.write_room = keyspan_write_room,
.set_termios = keyspan_set_termios,
.break_ctl = keyspan_break_ctl,
.tiocmget = keyspan_tiocmget,
.tiocmset = keyspan_tiocmset,
.attach = keyspan_startup,
.disconnect = keyspan_disconnect,
.release = keyspan_release,
.port_probe = keyspan_port_probe,
.port_remove = keyspan_port_remove,
};
static struct usb_serial_driver keyspan_4port_device = {
.driver = {
.owner = THIS_MODULE,
.name = "keyspan_4",
},
.description = "Keyspan 4 port adapter",
.id_table = keyspan_4port_ids,
.num_ports = 4,
.open = keyspan_open,
.close = keyspan_close,
.dtr_rts = keyspan_dtr_rts,
.write = keyspan_write,
.write_room = keyspan_write_room,
.set_termios = keyspan_set_termios,
.break_ctl = keyspan_break_ctl,
.tiocmget = keyspan_tiocmget,
.tiocmset = keyspan_tiocmset,
.attach = keyspan_startup,
.disconnect = keyspan_disconnect,
.release = keyspan_release,
.port_probe = keyspan_port_probe,
.port_remove = keyspan_port_remove,
};
static struct usb_serial_driver * const serial_drivers[] = {
&keyspan_pre_device, &keyspan_1port_device,
&keyspan_2port_device, &keyspan_4port_device, NULL
};
module_usb_serial_driver(serial_drivers, keyspan_ids_combined);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("keyspan/usa28.fw");
MODULE_FIRMWARE("keyspan/usa28x.fw");
MODULE_FIRMWARE("keyspan/usa28xa.fw");
MODULE_FIRMWARE("keyspan/usa28xb.fw");
MODULE_FIRMWARE("keyspan/usa19.fw");
MODULE_FIRMWARE("keyspan/usa19qi.fw");
MODULE_FIRMWARE("keyspan/mpr.fw");
MODULE_FIRMWARE("keyspan/usa19qw.fw");
MODULE_FIRMWARE("keyspan/usa18x.fw");
MODULE_FIRMWARE("keyspan/usa19w.fw");
MODULE_FIRMWARE("keyspan/usa49w.fw");
MODULE_FIRMWARE("keyspan/usa49wlc.fw");
| linux-master | drivers/usb/serial/keyspan.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Symbol USB barcode to serial driver
*
* Copyright (C) 2013 Johan Hovold <[email protected]>
* Copyright (C) 2009 Greg Kroah-Hartman <[email protected]>
* Copyright (C) 2009 Novell Inc.
*/
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x05e0, 0x0600) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
struct symbol_private {
spinlock_t lock; /* protects the following flags */
bool throttled;
bool actually_throttled;
};
static void symbol_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct symbol_private *priv = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
unsigned long flags;
int result;
int data_length;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&port->dev, "%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_dbg(&port->dev, "%s - nonzero urb status received: %d\n",
__func__, status);
goto exit;
}
usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
/*
* Data from the device comes with a 1 byte header:
*
* <size of data> <data>...
*/
if (urb->actual_length > 1) {
data_length = data[0];
if (data_length > (urb->actual_length - 1))
data_length = urb->actual_length - 1;
tty_insert_flip_string(&port->port, &data[1], data_length);
tty_flip_buffer_push(&port->port);
} else {
dev_dbg(&port->dev, "%s - short packet\n", __func__);
}
exit:
spin_lock_irqsave(&priv->lock, flags);
/* Continue trying to always read if we should */
if (!priv->throttled) {
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
if (result)
dev_err(&port->dev,
"%s - failed resubmitting read urb, error %d\n",
__func__, result);
} else
priv->actually_throttled = true;
spin_unlock_irqrestore(&priv->lock, flags);
}
static int symbol_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct symbol_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
int result = 0;
spin_lock_irqsave(&priv->lock, flags);
priv->throttled = false;
priv->actually_throttled = false;
spin_unlock_irqrestore(&priv->lock, flags);
/* Start reading from the device */
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result)
dev_err(&port->dev,
"%s - failed resubmitting read urb, error %d\n",
__func__, result);
return result;
}
static void symbol_close(struct usb_serial_port *port)
{
usb_kill_urb(port->interrupt_in_urb);
}
static void symbol_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct symbol_private *priv = usb_get_serial_port_data(port);
spin_lock_irq(&priv->lock);
priv->throttled = true;
spin_unlock_irq(&priv->lock);
}
static void symbol_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct symbol_private *priv = usb_get_serial_port_data(port);
int result;
bool was_throttled;
spin_lock_irq(&priv->lock);
priv->throttled = false;
was_throttled = priv->actually_throttled;
priv->actually_throttled = false;
spin_unlock_irq(&priv->lock);
if (was_throttled) {
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result)
dev_err(&port->dev,
"%s - failed submitting read urb, error %d\n",
__func__, result);
}
}
static int symbol_port_probe(struct usb_serial_port *port)
{
struct symbol_private *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
spin_lock_init(&priv->lock);
usb_set_serial_port_data(port, priv);
return 0;
}
static void symbol_port_remove(struct usb_serial_port *port)
{
struct symbol_private *priv = usb_get_serial_port_data(port);
kfree(priv);
}
static struct usb_serial_driver symbol_device = {
.driver = {
.owner = THIS_MODULE,
.name = "symbol",
},
.id_table = id_table,
.num_ports = 1,
.num_interrupt_in = 1,
.port_probe = symbol_port_probe,
.port_remove = symbol_port_remove,
.open = symbol_open,
.close = symbol_close,
.throttle = symbol_throttle,
.unthrottle = symbol_unthrottle,
.read_int_callback = symbol_int_callback,
};
static struct usb_serial_driver * const serial_drivers[] = {
&symbol_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/symbolserial.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* USB Wishbone-Serial adapter driver
*
* Copyright (C) 2013 Wesley W. Terpstra <[email protected]>
* Copyright (C) 2013 GSI Helmholtz Centre for Heavy Ion Research GmbH
*/
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
#define GSI_VENDOR_OPENCLOSE 0xB0
static const struct usb_device_id id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x1D50, 0x6062, 0xFF, 0xFF, 0xFF) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
/*
* Etherbone must be told that a new stream has begun before data arrives.
* This is necessary to restart the negotiation of Wishbone bus parameters.
* Similarly, when the stream ends, Etherbone must be told so that the cycle
* line can be driven low in the case that userspace failed to do so.
*/
static int usb_gsi_openclose(struct usb_serial_port *port, int value)
{
struct usb_device *dev = port->serial->dev;
return usb_control_msg(
dev,
usb_sndctrlpipe(dev, 0), /* Send to EP0OUT */
GSI_VENDOR_OPENCLOSE,
USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
value, /* wValue = device is open(1) or closed(0) */
port->serial->interface->cur_altsetting->desc.bInterfaceNumber,
NULL, 0, /* There is no data stage */
5000); /* Timeout till operation fails */
}
static int wishbone_serial_open(struct tty_struct *tty,
struct usb_serial_port *port)
{
int retval;
retval = usb_gsi_openclose(port, 1);
if (retval) {
dev_err(&port->serial->dev->dev,
"Could not mark device as open (%d)\n",
retval);
return retval;
}
retval = usb_serial_generic_open(tty, port);
if (retval)
usb_gsi_openclose(port, 0);
return retval;
}
static void wishbone_serial_close(struct usb_serial_port *port)
{
usb_serial_generic_close(port);
usb_gsi_openclose(port, 0);
}
static struct usb_serial_driver wishbone_serial_device = {
.driver = {
.owner = THIS_MODULE,
.name = "wishbone_serial",
},
.id_table = id_table,
.num_ports = 1,
.open = &wishbone_serial_open,
.close = &wishbone_serial_close,
};
static struct usb_serial_driver * const serial_drivers[] = {
&wishbone_serial_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR("Wesley W. Terpstra <[email protected]>");
MODULE_DESCRIPTION("USB Wishbone-Serial adapter");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/serial/wishbone-serial.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* USB Cypress M8 driver
*
* Copyright (C) 2004
* Lonnie Mendez ([email protected])
* Copyright (C) 2003,2004
* Neil Whelchel ([email protected])
*
* See Documentation/usb/usb-serial.rst for more information on using this
* driver
*
* See http://geocities.com/i0xox0i for information on this driver and the
* earthmate usb device.
*/
/* Thanks to Neil Whelchel for writing the first cypress m8 implementation
for linux. */
/* Thanks to cypress for providing references for the hid reports. */
/* Thanks to Jiang Zhang for providing links and for general help. */
/* Code originates and was built up from ftdi_sio, belkin, pl2303 and others.*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/serial.h>
#include <linux/kfifo.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include "cypress_m8.h"
static bool stats;
static int interval;
static bool unstable_bauds;
#define DRIVER_AUTHOR "Lonnie Mendez <[email protected]>, Neil Whelchel <[email protected]>"
#define DRIVER_DESC "Cypress USB to Serial Driver"
/* write buffer size defines */
#define CYPRESS_BUF_SIZE 1024
static const struct usb_device_id id_table_earthmate[] = {
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) },
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
{ } /* Terminating entry */
};
static const struct usb_device_id id_table_cyphidcomrs232[] = {
{ USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
{ USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) },
{ USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
{ USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) },
{ } /* Terminating entry */
};
static const struct usb_device_id id_table_nokiaca42v2[] = {
{ USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) },
{ } /* Terminating entry */
};
static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) },
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
{ USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
{ USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) },
{ USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
{ USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) },
{ USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table_combined);
enum packet_format {
packet_format_1, /* b0:status, b1:payload count */
packet_format_2 /* b0[7:3]:status, b0[2:0]:payload count */
};
struct cypress_private {
spinlock_t lock; /* private lock */
int chiptype; /* identifier of device, for quirks/etc */
int bytes_in; /* used for statistics */
int bytes_out; /* used for statistics */
int cmd_count; /* used for statistics */
int cmd_ctrl; /* always set this to 1 before issuing a command */
struct kfifo write_fifo; /* write fifo */
int write_urb_in_use; /* write urb in use indicator */
int write_urb_interval; /* interval to use for write urb */
int read_urb_interval; /* interval to use for read urb */
int comm_is_ok; /* true if communication is (still) ok */
__u8 line_control; /* holds dtr / rts value */
__u8 current_status; /* received from last read - info on dsr,cts,cd,ri,etc */
__u8 current_config; /* stores the current configuration byte */
__u8 rx_flags; /* throttling - used from whiteheat/ftdi_sio */
enum packet_format pkt_fmt; /* format to use for packet send / receive */
int get_cfg_unsafe; /* If true, the CYPRESS_GET_CONFIG is unsafe */
int baud_rate; /* stores current baud rate in
integer form */
char prev_status; /* used for TIOCMIWAIT */
};
/* function prototypes for the Cypress USB to serial device */
static int cypress_earthmate_port_probe(struct usb_serial_port *port);
static int cypress_hidcom_port_probe(struct usb_serial_port *port);
static int cypress_ca42v2_port_probe(struct usb_serial_port *port);
static void cypress_port_remove(struct usb_serial_port *port);
static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port);
static void cypress_close(struct usb_serial_port *port);
static void cypress_dtr_rts(struct usb_serial_port *port, int on);
static int cypress_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static void cypress_send(struct usb_serial_port *port);
static unsigned int cypress_write_room(struct tty_struct *tty);
static void cypress_earthmate_init_termios(struct tty_struct *tty);
static void cypress_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios);
static int cypress_tiocmget(struct tty_struct *tty);
static int cypress_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
static unsigned int cypress_chars_in_buffer(struct tty_struct *tty);
static void cypress_throttle(struct tty_struct *tty);
static void cypress_unthrottle(struct tty_struct *tty);
static void cypress_set_dead(struct usb_serial_port *port);
static void cypress_read_int_callback(struct urb *urb);
static void cypress_write_int_callback(struct urb *urb);
static struct usb_serial_driver cypress_earthmate_device = {
.driver = {
.owner = THIS_MODULE,
.name = "earthmate",
},
.description = "DeLorme Earthmate USB",
.id_table = id_table_earthmate,
.num_ports = 1,
.port_probe = cypress_earthmate_port_probe,
.port_remove = cypress_port_remove,
.open = cypress_open,
.close = cypress_close,
.dtr_rts = cypress_dtr_rts,
.write = cypress_write,
.write_room = cypress_write_room,
.init_termios = cypress_earthmate_init_termios,
.set_termios = cypress_set_termios,
.tiocmget = cypress_tiocmget,
.tiocmset = cypress_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.chars_in_buffer = cypress_chars_in_buffer,
.throttle = cypress_throttle,
.unthrottle = cypress_unthrottle,
.read_int_callback = cypress_read_int_callback,
.write_int_callback = cypress_write_int_callback,
};
static struct usb_serial_driver cypress_hidcom_device = {
.driver = {
.owner = THIS_MODULE,
.name = "cyphidcom",
},
.description = "HID->COM RS232 Adapter",
.id_table = id_table_cyphidcomrs232,
.num_ports = 1,
.port_probe = cypress_hidcom_port_probe,
.port_remove = cypress_port_remove,
.open = cypress_open,
.close = cypress_close,
.dtr_rts = cypress_dtr_rts,
.write = cypress_write,
.write_room = cypress_write_room,
.set_termios = cypress_set_termios,
.tiocmget = cypress_tiocmget,
.tiocmset = cypress_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.chars_in_buffer = cypress_chars_in_buffer,
.throttle = cypress_throttle,
.unthrottle = cypress_unthrottle,
.read_int_callback = cypress_read_int_callback,
.write_int_callback = cypress_write_int_callback,
};
static struct usb_serial_driver cypress_ca42v2_device = {
.driver = {
.owner = THIS_MODULE,
.name = "nokiaca42v2",
},
.description = "Nokia CA-42 V2 Adapter",
.id_table = id_table_nokiaca42v2,
.num_ports = 1,
.port_probe = cypress_ca42v2_port_probe,
.port_remove = cypress_port_remove,
.open = cypress_open,
.close = cypress_close,
.dtr_rts = cypress_dtr_rts,
.write = cypress_write,
.write_room = cypress_write_room,
.set_termios = cypress_set_termios,
.tiocmget = cypress_tiocmget,
.tiocmset = cypress_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.chars_in_buffer = cypress_chars_in_buffer,
.throttle = cypress_throttle,
.unthrottle = cypress_unthrottle,
.read_int_callback = cypress_read_int_callback,
.write_int_callback = cypress_write_int_callback,
};
static struct usb_serial_driver * const serial_drivers[] = {
&cypress_earthmate_device, &cypress_hidcom_device,
&cypress_ca42v2_device, NULL
};
/*****************************************************************************
* Cypress serial helper functions
*****************************************************************************/
/* FRWD Dongle hidcom needs to skip reset and speed checks */
static inline bool is_frwd(struct usb_device *dev)
{
return ((le16_to_cpu(dev->descriptor.idVendor) == VENDOR_ID_FRWD) &&
(le16_to_cpu(dev->descriptor.idProduct) == PRODUCT_ID_CYPHIDCOM_FRWD));
}
static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate)
{
struct cypress_private *priv;
priv = usb_get_serial_port_data(port);
if (unstable_bauds)
return new_rate;
/* FRWD Dongle uses 115200 bps */
if (is_frwd(port->serial->dev))
return new_rate;
/*
* The general purpose firmware for the Cypress M8 allows for
* a maximum speed of 57600bps (I have no idea whether DeLorme
* chose to use the general purpose firmware or not), if you
* need to modify this speed setting for your own project
* please add your own chiptype and modify the code likewise.
* The Cypress HID->COM device will work successfully up to
* 115200bps (but the actual throughput is around 3kBps).
*/
if (port->serial->dev->speed == USB_SPEED_LOW) {
/*
* Mike Isely <[email protected]> 2-Feb-2008: The
* Cypress app note that describes this mechanism
* states that the low-speed part can't handle more
* than 800 bytes/sec, in which case 4800 baud is the
* safest speed for a part like that.
*/
if (new_rate > 4800) {
dev_dbg(&port->dev,
"%s - failed setting baud rate, device incapable speed %d\n",
__func__, new_rate);
return -1;
}
}
switch (priv->chiptype) {
case CT_EARTHMATE:
if (new_rate <= 600) {
/* 300 and 600 baud rates are supported under
* the generic firmware, but are not used with
* NMEA and SiRF protocols */
dev_dbg(&port->dev,
"%s - failed setting baud rate, unsupported speed of %d on Earthmate GPS\n",
__func__, new_rate);
return -1;
}
break;
default:
break;
}
return new_rate;
}
/* This function can either set or retrieve the current serial line settings */
static int cypress_serial_control(struct tty_struct *tty,
struct usb_serial_port *port, speed_t baud_rate, int data_bits,
int stop_bits, int parity_enable, int parity_type, int reset,
int cypress_request_type)
{
int new_baudrate = 0, retval = 0, tries = 0;
struct cypress_private *priv;
struct device *dev = &port->dev;
u8 *feature_buffer;
const unsigned int feature_len = 5;
unsigned long flags;
priv = usb_get_serial_port_data(port);
if (!priv->comm_is_ok)
return -ENODEV;
feature_buffer = kcalloc(feature_len, sizeof(u8), GFP_KERNEL);
if (!feature_buffer)
return -ENOMEM;
switch (cypress_request_type) {
case CYPRESS_SET_CONFIG:
/* 0 means 'Hang up' so doesn't change the true bit rate */
new_baudrate = priv->baud_rate;
if (baud_rate && baud_rate != priv->baud_rate) {
dev_dbg(dev, "%s - baud rate is changing\n", __func__);
retval = analyze_baud_rate(port, baud_rate);
if (retval >= 0) {
new_baudrate = retval;
dev_dbg(dev, "%s - New baud rate set to %d\n",
__func__, new_baudrate);
}
}
dev_dbg(dev, "%s - baud rate is being sent as %d\n", __func__,
new_baudrate);
/* fill the feature_buffer with new configuration */
put_unaligned_le32(new_baudrate, feature_buffer);
feature_buffer[4] |= data_bits - 5; /* assign data bits in 2 bit space ( max 3 ) */
/* 1 bit gap */
feature_buffer[4] |= (stop_bits << 3); /* assign stop bits in 1 bit space */
feature_buffer[4] |= (parity_enable << 4); /* assign parity flag in 1 bit space */
feature_buffer[4] |= (parity_type << 5); /* assign parity type in 1 bit space */
/* 1 bit gap */
feature_buffer[4] |= (reset << 7); /* assign reset at end of byte, 1 bit space */
dev_dbg(dev, "%s - device is being sent this feature report:\n", __func__);
dev_dbg(dev, "%s - %02X - %02X - %02X - %02X - %02X\n", __func__,
feature_buffer[0], feature_buffer[1],
feature_buffer[2], feature_buffer[3],
feature_buffer[4]);
do {
retval = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
HID_REQ_SET_REPORT,
USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS,
0x0300, 0, feature_buffer,
feature_len, 500);
if (tries++ >= 3)
break;
} while (retval != feature_len &&
retval != -ENODEV);
if (retval != feature_len) {
dev_err(dev, "%s - failed sending serial line settings - %d\n",
__func__, retval);
cypress_set_dead(port);
} else {
spin_lock_irqsave(&priv->lock, flags);
priv->baud_rate = new_baudrate;
priv->current_config = feature_buffer[4];
spin_unlock_irqrestore(&priv->lock, flags);
/* If we asked for a speed change encode it */
if (baud_rate)
tty_encode_baud_rate(tty,
new_baudrate, new_baudrate);
}
break;
case CYPRESS_GET_CONFIG:
if (priv->get_cfg_unsafe) {
/* Not implemented for this device,
and if we try to do it we're likely
to crash the hardware. */
retval = -ENOTTY;
goto out;
}
dev_dbg(dev, "%s - retrieving serial line settings\n", __func__);
do {
retval = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
HID_REQ_GET_REPORT,
USB_DIR_IN | USB_RECIP_INTERFACE | USB_TYPE_CLASS,
0x0300, 0, feature_buffer,
feature_len, 500);
if (tries++ >= 3)
break;
} while (retval != feature_len
&& retval != -ENODEV);
if (retval != feature_len) {
dev_err(dev, "%s - failed to retrieve serial line settings - %d\n",
__func__, retval);
cypress_set_dead(port);
goto out;
} else {
spin_lock_irqsave(&priv->lock, flags);
/* store the config in one byte, and later
use bit masks to check values */
priv->current_config = feature_buffer[4];
priv->baud_rate = get_unaligned_le32(feature_buffer);
spin_unlock_irqrestore(&priv->lock, flags);
}
}
spin_lock_irqsave(&priv->lock, flags);
++priv->cmd_count;
spin_unlock_irqrestore(&priv->lock, flags);
out:
kfree(feature_buffer);
return retval;
} /* cypress_serial_control */
static void cypress_set_dead(struct usb_serial_port *port)
{
struct cypress_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
if (!priv->comm_is_ok) {
spin_unlock_irqrestore(&priv->lock, flags);
return;
}
priv->comm_is_ok = 0;
spin_unlock_irqrestore(&priv->lock, flags);
dev_err(&port->dev, "cypress_m8 suspending failing port %d - "
"interval might be too short\n", port->port_number);
}
/*****************************************************************************
* Cypress serial driver functions
*****************************************************************************/
static int cypress_generic_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct cypress_private *priv;
if (!port->interrupt_out_urb || !port->interrupt_in_urb) {
dev_err(&port->dev, "required endpoint is missing\n");
return -ENODEV;
}
priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->comm_is_ok = !0;
spin_lock_init(&priv->lock);
if (kfifo_alloc(&priv->write_fifo, CYPRESS_BUF_SIZE, GFP_KERNEL)) {
kfree(priv);
return -ENOMEM;
}
/* Skip reset for FRWD device. It is a workaound:
device hangs if it receives SET_CONFIGURE in Configured
state. */
if (!is_frwd(serial->dev))
usb_reset_configuration(serial->dev);
priv->cmd_ctrl = 0;
priv->line_control = 0;
priv->rx_flags = 0;
/* Default packet format setting is determined by packet size.
Anything with a size larger then 9 must have a separate
count field since the 3 bit count field is otherwise too
small. Otherwise we can use the slightly more compact
format. This is in accordance with the cypress_m8 serial
converter app note. */
if (port->interrupt_out_size > 9)
priv->pkt_fmt = packet_format_1;
else
priv->pkt_fmt = packet_format_2;
if (interval > 0) {
priv->write_urb_interval = interval;
priv->read_urb_interval = interval;
dev_dbg(&port->dev, "%s - read & write intervals forced to %d\n",
__func__, interval);
} else {
priv->write_urb_interval = port->interrupt_out_urb->interval;
priv->read_urb_interval = port->interrupt_in_urb->interval;
dev_dbg(&port->dev, "%s - intervals: read=%d write=%d\n",
__func__, priv->read_urb_interval,
priv->write_urb_interval);
}
usb_set_serial_port_data(port, priv);
port->port.drain_delay = 256;
return 0;
}
static int cypress_earthmate_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct cypress_private *priv;
int ret;
ret = cypress_generic_port_probe(port);
if (ret) {
dev_dbg(&port->dev, "%s - Failed setting up port\n", __func__);
return ret;
}
priv = usb_get_serial_port_data(port);
priv->chiptype = CT_EARTHMATE;
/* All Earthmate devices use the separated-count packet
format! Idiotic. */
priv->pkt_fmt = packet_format_1;
if (serial->dev->descriptor.idProduct !=
cpu_to_le16(PRODUCT_ID_EARTHMATEUSB)) {
/* The old original USB Earthmate seemed able to
handle GET_CONFIG requests; everything they've
produced since that time crashes if this command is
attempted :-( */
dev_dbg(&port->dev,
"%s - Marking this device as unsafe for GET_CONFIG commands\n",
__func__);
priv->get_cfg_unsafe = !0;
}
return 0;
}
static int cypress_hidcom_port_probe(struct usb_serial_port *port)
{
struct cypress_private *priv;
int ret;
ret = cypress_generic_port_probe(port);
if (ret) {
dev_dbg(&port->dev, "%s - Failed setting up port\n", __func__);
return ret;
}
priv = usb_get_serial_port_data(port);
priv->chiptype = CT_CYPHIDCOM;
return 0;
}
static int cypress_ca42v2_port_probe(struct usb_serial_port *port)
{
struct cypress_private *priv;
int ret;
ret = cypress_generic_port_probe(port);
if (ret) {
dev_dbg(&port->dev, "%s - Failed setting up port\n", __func__);
return ret;
}
priv = usb_get_serial_port_data(port);
priv->chiptype = CT_CA42V2;
return 0;
}
static void cypress_port_remove(struct usb_serial_port *port)
{
struct cypress_private *priv;
priv = usb_get_serial_port_data(port);
kfifo_free(&priv->write_fifo);
kfree(priv);
}
static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct cypress_private *priv = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
unsigned long flags;
int result = 0;
if (!priv->comm_is_ok)
return -EIO;
/* clear halts before open */
usb_clear_halt(serial->dev, 0x81);
usb_clear_halt(serial->dev, 0x02);
spin_lock_irqsave(&priv->lock, flags);
/* reset read/write statistics */
priv->bytes_in = 0;
priv->bytes_out = 0;
priv->cmd_count = 0;
priv->rx_flags = 0;
spin_unlock_irqrestore(&priv->lock, flags);
/* Set termios */
cypress_send(port);
if (tty)
cypress_set_termios(tty, port, NULL);
/* setup the port and start reading from the device */
usb_fill_int_urb(port->interrupt_in_urb, serial->dev,
usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress),
port->interrupt_in_urb->transfer_buffer,
port->interrupt_in_urb->transfer_buffer_length,
cypress_read_int_callback, port, priv->read_urb_interval);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev,
"%s - failed submitting read urb, error %d\n",
__func__, result);
cypress_set_dead(port);
}
return result;
} /* cypress_open */
static void cypress_dtr_rts(struct usb_serial_port *port, int on)
{
struct cypress_private *priv = usb_get_serial_port_data(port);
/* drop dtr and rts */
spin_lock_irq(&priv->lock);
if (on == 0)
priv->line_control = 0;
else
priv->line_control = CONTROL_DTR | CONTROL_RTS;
priv->cmd_ctrl = 1;
spin_unlock_irq(&priv->lock);
cypress_write(NULL, port, NULL, 0);
}
static void cypress_close(struct usb_serial_port *port)
{
struct cypress_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
kfifo_reset_out(&priv->write_fifo);
spin_unlock_irqrestore(&priv->lock, flags);
dev_dbg(&port->dev, "%s - stopping urbs\n", __func__);
usb_kill_urb(port->interrupt_in_urb);
usb_kill_urb(port->interrupt_out_urb);
if (stats)
dev_info(&port->dev, "Statistics: %d Bytes In | %d Bytes Out | %d Commands Issued\n",
priv->bytes_in, priv->bytes_out, priv->cmd_count);
} /* cypress_close */
static int cypress_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
struct cypress_private *priv = usb_get_serial_port_data(port);
dev_dbg(&port->dev, "%s - %d bytes\n", __func__, count);
/* line control commands, which need to be executed immediately,
are not put into the buffer for obvious reasons.
*/
if (priv->cmd_ctrl) {
count = 0;
goto finish;
}
if (!count)
return count;
count = kfifo_in_locked(&priv->write_fifo, buf, count, &priv->lock);
finish:
cypress_send(port);
return count;
} /* cypress_write */
static void cypress_send(struct usb_serial_port *port)
{
int count = 0, result, offset, actual_size;
struct cypress_private *priv = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
unsigned long flags;
if (!priv->comm_is_ok)
return;
dev_dbg(dev, "%s - interrupt out size is %d\n", __func__,
port->interrupt_out_size);
spin_lock_irqsave(&priv->lock, flags);
if (priv->write_urb_in_use) {
dev_dbg(dev, "%s - can't write, urb in use\n", __func__);
spin_unlock_irqrestore(&priv->lock, flags);
return;
}
spin_unlock_irqrestore(&priv->lock, flags);
/* clear buffer */
memset(port->interrupt_out_urb->transfer_buffer, 0,
port->interrupt_out_size);
spin_lock_irqsave(&priv->lock, flags);
switch (priv->pkt_fmt) {
default:
case packet_format_1:
/* this is for the CY7C64013... */
offset = 2;
port->interrupt_out_buffer[0] = priv->line_control;
break;
case packet_format_2:
/* this is for the CY7C63743... */
offset = 1;
port->interrupt_out_buffer[0] = priv->line_control;
break;
}
if (priv->line_control & CONTROL_RESET)
priv->line_control &= ~CONTROL_RESET;
if (priv->cmd_ctrl) {
priv->cmd_count++;
dev_dbg(dev, "%s - line control command being issued\n", __func__);
spin_unlock_irqrestore(&priv->lock, flags);
goto send;
} else
spin_unlock_irqrestore(&priv->lock, flags);
count = kfifo_out_locked(&priv->write_fifo,
&port->interrupt_out_buffer[offset],
port->interrupt_out_size - offset,
&priv->lock);
if (count == 0)
return;
switch (priv->pkt_fmt) {
default:
case packet_format_1:
port->interrupt_out_buffer[1] = count;
break;
case packet_format_2:
port->interrupt_out_buffer[0] |= count;
}
dev_dbg(dev, "%s - count is %d\n", __func__, count);
send:
spin_lock_irqsave(&priv->lock, flags);
priv->write_urb_in_use = 1;
spin_unlock_irqrestore(&priv->lock, flags);
if (priv->cmd_ctrl)
actual_size = 1;
else
actual_size = count +
(priv->pkt_fmt == packet_format_1 ? 2 : 1);
usb_serial_debug_data(dev, __func__, port->interrupt_out_size,
port->interrupt_out_urb->transfer_buffer);
usb_fill_int_urb(port->interrupt_out_urb, port->serial->dev,
usb_sndintpipe(port->serial->dev, port->interrupt_out_endpointAddress),
port->interrupt_out_buffer, actual_size,
cypress_write_int_callback, port, priv->write_urb_interval);
result = usb_submit_urb(port->interrupt_out_urb, GFP_ATOMIC);
if (result) {
dev_err_console(port,
"%s - failed submitting write urb, error %d\n",
__func__, result);
priv->write_urb_in_use = 0;
cypress_set_dead(port);
}
spin_lock_irqsave(&priv->lock, flags);
if (priv->cmd_ctrl)
priv->cmd_ctrl = 0;
/* do not count the line control and size bytes */
priv->bytes_out += count;
spin_unlock_irqrestore(&priv->lock, flags);
usb_serial_port_softint(port);
} /* cypress_send */
/* returns how much space is available in the soft buffer */
static unsigned int cypress_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct cypress_private *priv = usb_get_serial_port_data(port);
unsigned int room;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
room = kfifo_avail(&priv->write_fifo);
spin_unlock_irqrestore(&priv->lock, flags);
dev_dbg(&port->dev, "%s - returns %u\n", __func__, room);
return room;
}
static int cypress_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct cypress_private *priv = usb_get_serial_port_data(port);
__u8 status, control;
unsigned int result = 0;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
control = priv->line_control;
status = priv->current_status;
spin_unlock_irqrestore(&priv->lock, flags);
result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0)
| ((control & CONTROL_RTS) ? TIOCM_RTS : 0)
| ((status & UART_CTS) ? TIOCM_CTS : 0)
| ((status & UART_DSR) ? TIOCM_DSR : 0)
| ((status & UART_RI) ? TIOCM_RI : 0)
| ((status & UART_CD) ? TIOCM_CD : 0);
dev_dbg(&port->dev, "%s - result = %x\n", __func__, result);
return result;
}
static int cypress_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct cypress_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
if (set & TIOCM_RTS)
priv->line_control |= CONTROL_RTS;
if (set & TIOCM_DTR)
priv->line_control |= CONTROL_DTR;
if (clear & TIOCM_RTS)
priv->line_control &= ~CONTROL_RTS;
if (clear & TIOCM_DTR)
priv->line_control &= ~CONTROL_DTR;
priv->cmd_ctrl = 1;
spin_unlock_irqrestore(&priv->lock, flags);
return cypress_write(tty, port, NULL, 0);
}
static void cypress_earthmate_init_termios(struct tty_struct *tty)
{
tty_encode_baud_rate(tty, 4800, 4800);
}
static void cypress_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct cypress_private *priv = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
int data_bits, stop_bits, parity_type, parity_enable;
unsigned int cflag;
unsigned long flags;
__u8 oldlines;
int linechange = 0;
/* Unsupported features need clearing */
tty->termios.c_cflag &= ~(CMSPAR|CRTSCTS);
cflag = tty->termios.c_cflag;
/* set number of data bits, parity, stop bits */
/* when parity is disabled the parity type bit is ignored */
/* 1 means 2 stop bits, 0 means 1 stop bit */
stop_bits = cflag & CSTOPB ? 1 : 0;
if (cflag & PARENB) {
parity_enable = 1;
/* 1 means odd parity, 0 means even parity */
parity_type = cflag & PARODD ? 1 : 0;
} else
parity_enable = parity_type = 0;
data_bits = tty_get_char_size(cflag);
spin_lock_irqsave(&priv->lock, flags);
oldlines = priv->line_control;
if ((cflag & CBAUD) == B0) {
/* drop dtr and rts */
dev_dbg(dev, "%s - dropping the lines, baud rate 0bps\n", __func__);
priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
} else
priv->line_control = (CONTROL_DTR | CONTROL_RTS);
spin_unlock_irqrestore(&priv->lock, flags);
dev_dbg(dev, "%s - sending %d stop_bits, %d parity_enable, %d parity_type, %d data_bits (+5)\n",
__func__, stop_bits, parity_enable, parity_type, data_bits);
cypress_serial_control(tty, port, tty_get_baud_rate(tty),
data_bits, stop_bits,
parity_enable, parity_type,
0, CYPRESS_SET_CONFIG);
/* we perform a CYPRESS_GET_CONFIG so that the current settings are
* filled into the private structure this should confirm that all is
* working if it returns what we just set */
cypress_serial_control(tty, port, 0, 0, 0, 0, 0, 0, CYPRESS_GET_CONFIG);
/* Here we can define custom tty settings for devices; the main tty
* termios flag base comes from empeg.c */
spin_lock_irqsave(&priv->lock, flags);
if (priv->chiptype == CT_EARTHMATE && priv->baud_rate == 4800) {
dev_dbg(dev, "Using custom termios settings for a baud rate of 4800bps.\n");
/* define custom termios settings for NMEA protocol */
tty->termios.c_iflag /* input modes - */
&= ~(IGNBRK /* disable ignore break */
| BRKINT /* disable break causes interrupt */
| PARMRK /* disable mark parity errors */
| ISTRIP /* disable clear high bit of input char */
| INLCR /* disable translate NL to CR */
| IGNCR /* disable ignore CR */
| ICRNL /* disable translate CR to NL */
| IXON); /* disable enable XON/XOFF flow control */
tty->termios.c_oflag /* output modes */
&= ~OPOST; /* disable postprocess output char */
tty->termios.c_lflag /* line discipline modes */
&= ~(ECHO /* disable echo input characters */
| ECHONL /* disable echo new line */
| ICANON /* disable erase, kill, werase, and rprnt
special characters */
| ISIG /* disable interrupt, quit, and suspend
special characters */
| IEXTEN); /* disable non-POSIX special characters */
} /* CT_CYPHIDCOM: Application should handle this for device */
linechange = (priv->line_control != oldlines);
spin_unlock_irqrestore(&priv->lock, flags);
/* if necessary, set lines */
if (linechange) {
priv->cmd_ctrl = 1;
cypress_write(tty, port, NULL, 0);
}
} /* cypress_set_termios */
/* returns amount of data still left in soft buffer */
static unsigned int cypress_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct cypress_private *priv = usb_get_serial_port_data(port);
unsigned int chars;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
chars = kfifo_len(&priv->write_fifo);
spin_unlock_irqrestore(&priv->lock, flags);
dev_dbg(&port->dev, "%s - returns %u\n", __func__, chars);
return chars;
}
static void cypress_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct cypress_private *priv = usb_get_serial_port_data(port);
spin_lock_irq(&priv->lock);
priv->rx_flags = THROTTLED;
spin_unlock_irq(&priv->lock);
}
static void cypress_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct cypress_private *priv = usb_get_serial_port_data(port);
int actually_throttled, result;
spin_lock_irq(&priv->lock);
actually_throttled = priv->rx_flags & ACTUALLY_THROTTLED;
priv->rx_flags = 0;
spin_unlock_irq(&priv->lock);
if (!priv->comm_is_ok)
return;
if (actually_throttled) {
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "%s - failed submitting read urb, "
"error %d\n", __func__, result);
cypress_set_dead(port);
}
}
}
static void cypress_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct cypress_private *priv = usb_get_serial_port_data(port);
struct device *dev = &urb->dev->dev;
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
unsigned long flags;
char tty_flag = TTY_NORMAL;
int bytes = 0;
int result;
int i = 0;
int status = urb->status;
switch (status) {
case 0: /* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* precursor to disconnect so just go away */
return;
case -EPIPE:
/* Can't call usb_clear_halt while in_interrupt */
fallthrough;
default:
/* something ugly is going on... */
dev_err(dev, "%s - unexpected nonzero read status received: %d\n",
__func__, status);
cypress_set_dead(port);
return;
}
spin_lock_irqsave(&priv->lock, flags);
if (priv->rx_flags & THROTTLED) {
dev_dbg(dev, "%s - now throttling\n", __func__);
priv->rx_flags |= ACTUALLY_THROTTLED;
spin_unlock_irqrestore(&priv->lock, flags);
return;
}
spin_unlock_irqrestore(&priv->lock, flags);
tty = tty_port_tty_get(&port->port);
if (!tty) {
dev_dbg(dev, "%s - bad tty pointer - exiting\n", __func__);
return;
}
spin_lock_irqsave(&priv->lock, flags);
result = urb->actual_length;
switch (priv->pkt_fmt) {
default:
case packet_format_1:
/* This is for the CY7C64013... */
priv->current_status = data[0] & 0xF8;
bytes = data[1] + 2;
i = 2;
break;
case packet_format_2:
/* This is for the CY7C63743... */
priv->current_status = data[0] & 0xF8;
bytes = (data[0] & 0x07) + 1;
i = 1;
break;
}
spin_unlock_irqrestore(&priv->lock, flags);
if (result < bytes) {
dev_dbg(dev,
"%s - wrong packet size - received %d bytes but packet said %d bytes\n",
__func__, result, bytes);
goto continue_read;
}
usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
spin_lock_irqsave(&priv->lock, flags);
/* check to see if status has changed */
if (priv->current_status != priv->prev_status) {
u8 delta = priv->current_status ^ priv->prev_status;
if (delta & UART_MSR_MASK) {
if (delta & UART_CTS)
port->icount.cts++;
if (delta & UART_DSR)
port->icount.dsr++;
if (delta & UART_RI)
port->icount.rng++;
if (delta & UART_CD)
port->icount.dcd++;
wake_up_interruptible(&port->port.delta_msr_wait);
}
priv->prev_status = priv->current_status;
}
spin_unlock_irqrestore(&priv->lock, flags);
/* hangup, as defined in acm.c... this might be a bad place for it
* though */
if (tty && !C_CLOCAL(tty) && !(priv->current_status & UART_CD)) {
dev_dbg(dev, "%s - calling hangup\n", __func__);
tty_hangup(tty);
goto continue_read;
}
/* There is one error bit... I'm assuming it is a parity error
* indicator as the generic firmware will set this bit to 1 if a
* parity error occurs.
* I can not find reference to any other error events. */
spin_lock_irqsave(&priv->lock, flags);
if (priv->current_status & CYP_ERROR) {
spin_unlock_irqrestore(&priv->lock, flags);
tty_flag = TTY_PARITY;
dev_dbg(dev, "%s - Parity Error detected\n", __func__);
} else
spin_unlock_irqrestore(&priv->lock, flags);
/* process read if there is data other than line status */
if (bytes > i) {
tty_insert_flip_string_fixed_flag(&port->port, data + i,
tty_flag, bytes - i);
tty_flip_buffer_push(&port->port);
}
spin_lock_irqsave(&priv->lock, flags);
/* control and status byte(s) are also counted */
priv->bytes_in += bytes;
spin_unlock_irqrestore(&priv->lock, flags);
continue_read:
tty_kref_put(tty);
/* Continue trying to always read */
if (priv->comm_is_ok) {
usb_fill_int_urb(port->interrupt_in_urb, port->serial->dev,
usb_rcvintpipe(port->serial->dev,
port->interrupt_in_endpointAddress),
port->interrupt_in_urb->transfer_buffer,
port->interrupt_in_urb->transfer_buffer_length,
cypress_read_int_callback, port,
priv->read_urb_interval);
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
if (result && result != -EPERM) {
dev_err(dev, "%s - failed resubmitting read urb, error %d\n",
__func__, result);
cypress_set_dead(port);
}
}
} /* cypress_read_int_callback */
static void cypress_write_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct cypress_private *priv = usb_get_serial_port_data(port);
struct device *dev = &urb->dev->dev;
int status = urb->status;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(dev, "%s - urb shutting down with status: %d\n",
__func__, status);
priv->write_urb_in_use = 0;
return;
case -EPIPE:
/* Cannot call usb_clear_halt while in_interrupt */
fallthrough;
default:
dev_err(dev, "%s - unexpected nonzero write status received: %d\n",
__func__, status);
cypress_set_dead(port);
break;
}
priv->write_urb_in_use = 0;
/* send any buffered data */
cypress_send(port);
}
module_usb_serial_driver(serial_drivers, id_table_combined);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(stats, bool, 0644);
MODULE_PARM_DESC(stats, "Enable statistics or not");
module_param(interval, int, 0644);
MODULE_PARM_DESC(interval, "Overrides interrupt interval");
module_param(unstable_bauds, bool, 0644);
MODULE_PARM_DESC(unstable_bauds, "Allow unstable baud rates");
| linux-master | drivers/usb/serial/cypress_m8.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Xsens MT USB driver
*
* Copyright (C) 2013 Xsens <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
#define XSENS_VID 0x2639
#define MTi_10_IMU_PID 0x0001
#define MTi_20_VRU_PID 0x0002
#define MTi_30_AHRS_PID 0x0003
#define MTi_100_IMU_PID 0x0011
#define MTi_200_VRU_PID 0x0012
#define MTi_300_AHRS_PID 0x0013
#define MTi_G_700_GPS_INS_PID 0x0017
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(XSENS_VID, MTi_10_IMU_PID) },
{ USB_DEVICE(XSENS_VID, MTi_20_VRU_PID) },
{ USB_DEVICE(XSENS_VID, MTi_30_AHRS_PID) },
{ USB_DEVICE(XSENS_VID, MTi_100_IMU_PID) },
{ USB_DEVICE(XSENS_VID, MTi_200_VRU_PID) },
{ USB_DEVICE(XSENS_VID, MTi_300_AHRS_PID) },
{ USB_DEVICE(XSENS_VID, MTi_G_700_GPS_INS_PID) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
static int xsens_mt_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
if (serial->interface->cur_altsetting->desc.bInterfaceNumber == 1)
return 0;
return -ENODEV;
}
static struct usb_serial_driver xsens_mt_device = {
.driver = {
.owner = THIS_MODULE,
.name = "xsens_mt",
},
.id_table = id_table,
.num_ports = 1,
.probe = xsens_mt_probe,
};
static struct usb_serial_driver * const serial_drivers[] = {
&xsens_mt_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR("Frans Klaver <[email protected]>");
MODULE_DESCRIPTION("USB-serial driver for Xsens motion trackers");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/xsens_mt.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Edgeport USB Serial Converter driver
*
* Copyright (C) 2000-2002 Inside Out Networks, All rights reserved.
* Copyright (C) 2001-2002 Greg Kroah-Hartman <[email protected]>
*
* Supports the following devices:
* EP/1 EP/2 EP/4 EP/21 EP/22 EP/221 EP/42 EP/421 WATCHPORT
*
* For questions or problems with this driver, contact Inside Out
* Networks technical support, or Peter Berger <[email protected]>,
* or Al Borchers <[email protected]>.
*/
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/serial.h>
#include <linux/swab.h>
#include <linux/kfifo.h>
#include <linux/ioctl.h>
#include <linux/firmware.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include "io_16654.h"
#include "io_usbvend.h"
#include "io_ti.h"
#define DRIVER_AUTHOR "Greg Kroah-Hartman <[email protected]> and David Iacovelli"
#define DRIVER_DESC "Edgeport USB Serial Driver"
#define EPROM_PAGE_SIZE 64
/* different hardware types */
#define HARDWARE_TYPE_930 0
#define HARDWARE_TYPE_TIUMP 1
/* IOCTL_PRIVATE_TI_GET_MODE Definitions */
#define TI_MODE_CONFIGURING 0 /* Device has not entered start device */
#define TI_MODE_BOOT 1 /* Staying in boot mode */
#define TI_MODE_DOWNLOAD 2 /* Made it to download mode */
#define TI_MODE_TRANSITIONING 3 /*
* Currently in boot mode but
* transitioning to download mode
*/
/* read urb state */
#define EDGE_READ_URB_RUNNING 0
#define EDGE_READ_URB_STOPPING 1
#define EDGE_READ_URB_STOPPED 2
/* Product information read from the Edgeport */
struct product_info {
int TiMode; /* Current TI Mode */
u8 hardware_type; /* Type of hardware */
} __packed;
/*
* Edgeport firmware header
*
* "build_number" has been set to 0 in all three of the images I have
* seen, and Digi Tech Support suggests that it is safe to ignore it.
*
* "length" is the number of bytes of actual data following the header.
*
* "checksum" is the low order byte resulting from adding the values of
* all the data bytes.
*/
struct edgeport_fw_hdr {
u8 major_version;
u8 minor_version;
__le16 build_number;
__le16 length;
u8 checksum;
} __packed;
struct edgeport_port {
u16 uart_base;
u16 dma_address;
u8 shadow_msr;
u8 shadow_mcr;
u8 shadow_lsr;
u8 lsr_mask;
u32 ump_read_timeout; /*
* Number of milliseconds the UMP will
* wait without data before completing
* a read short
*/
int baud_rate;
int close_pending;
int lsr_event;
struct edgeport_serial *edge_serial;
struct usb_serial_port *port;
u8 bUartMode; /* Port type, 0: RS232, etc. */
spinlock_t ep_lock;
int ep_read_urb_state;
int ep_write_urb_in_use;
};
struct edgeport_serial {
struct product_info product_info;
u8 TI_I2C_Type; /* Type of I2C in UMP */
u8 TiReadI2C; /*
* Set to TRUE if we have read the
* I2c in Boot Mode
*/
struct mutex es_lock;
int num_ports_open;
struct usb_serial *serial;
struct delayed_work heartbeat_work;
int fw_version;
bool use_heartbeat;
};
/* Devices that this driver supports */
static const struct usb_device_id edgeport_1port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROXIMITY) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOTION) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOISTURE) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_TEMPERATURE) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_HUMIDITY) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_POWER) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_LIGHT) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_RADIATION) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_DISTANCE) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_ACCELERATION) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROX_DIST) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_HP4CD) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_PCI) },
{ }
};
static const struct usb_device_id edgeport_2port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2C) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_421) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_42) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_221C) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22C) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21C) },
/* The 4, 8 and 16 port devices show up as multiple 2 port devices */
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4S) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) },
{ }
};
/* Devices that this driver supports */
static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROXIMITY) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOTION) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOISTURE) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_TEMPERATURE) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_HUMIDITY) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_POWER) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_LIGHT) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_RADIATION) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_DISTANCE) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_ACCELERATION) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROX_DIST) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_HP4CD) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_PCI) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2C) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_421) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_42) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_221C) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22C) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21C) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4S) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) },
{ }
};
MODULE_DEVICE_TABLE(usb, id_table_combined);
static bool ignore_cpu_rev;
static int default_uart_mode; /* RS232 */
static void edge_tty_recv(struct usb_serial_port *port, unsigned char *data,
int length);
static void stop_read(struct edgeport_port *edge_port);
static int restart_read(struct edgeport_port *edge_port);
static void edge_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios);
static void edge_send(struct usb_serial_port *port, struct tty_struct *tty);
static int do_download_mode(struct edgeport_serial *serial,
const struct firmware *fw);
static int do_boot_mode(struct edgeport_serial *serial,
const struct firmware *fw);
/* sysfs attributes */
static int edge_create_sysfs_attrs(struct usb_serial_port *port);
static int edge_remove_sysfs_attrs(struct usb_serial_port *port);
/*
* Some release of Edgeport firmware "down3.bin" after version 4.80
* introduced code to automatically disconnect idle devices on some
* Edgeport models after periods of inactivity, typically ~60 seconds.
* This occurs without regard to whether ports on the device are open
* or not. Digi International Tech Support suggested:
*
* 1. Adding driver "heartbeat" code to reset the firmware timer by
* requesting a descriptor record every 15 seconds, which should be
* effective with newer firmware versions that require it, and benign
* with older versions that do not. In practice 40 seconds seems often
* enough.
* 2. The heartbeat code is currently required only on Edgeport/416 models.
*/
#define FW_HEARTBEAT_VERSION_CUTOFF ((4 << 8) + 80)
#define FW_HEARTBEAT_SECS 40
/* Timeouts in msecs: firmware downloads take longer */
#define TI_VSEND_TIMEOUT_DEFAULT 1000
#define TI_VSEND_TIMEOUT_FW_DOWNLOAD 10000
static int ti_vread_sync(struct usb_device *dev, u8 request, u16 value,
u16 index, void *data, int size)
{
int status;
status = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
(USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN),
value, index, data, size, 1000);
if (status < 0)
return status;
if (status != size) {
dev_dbg(&dev->dev, "%s - wanted to read %d, but only read %d\n",
__func__, size, status);
return -ECOMM;
}
return 0;
}
static int ti_vsend_sync(struct usb_device *dev, u8 request, u16 value,
u16 index, void *data, int size, int timeout)
{
int status;
status = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request,
(USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT),
value, index, data, size, timeout);
if (status < 0)
return status;
return 0;
}
static int read_port_cmd(struct usb_serial_port *port, u8 command, u16 value,
void *data, int size)
{
return ti_vread_sync(port->serial->dev, command, value,
UMPM_UART1_PORT + port->port_number,
data, size);
}
static int send_port_cmd(struct usb_serial_port *port, u8 command, u16 value,
void *data, int size)
{
return ti_vsend_sync(port->serial->dev, command, value,
UMPM_UART1_PORT + port->port_number,
data, size, TI_VSEND_TIMEOUT_DEFAULT);
}
/* clear tx/rx buffers and fifo in TI UMP */
static int purge_port(struct usb_serial_port *port, u16 mask)
{
int port_number = port->port_number;
dev_dbg(&port->dev, "%s - port %d, mask %x\n", __func__, port_number, mask);
return send_port_cmd(port, UMPC_PURGE_PORT, mask, NULL, 0);
}
/**
* read_download_mem - Read edgeport memory from TI chip
* @dev: usb device pointer
* @start_address: Device CPU address at which to read
* @length: Length of above data
* @address_type: Can read both XDATA and I2C
* @buffer: pointer to input data buffer
*/
static int read_download_mem(struct usb_device *dev, int start_address,
int length, u8 address_type, u8 *buffer)
{
int status = 0;
u8 read_length;
u16 be_start_address;
dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, length);
/*
* Read in blocks of 64 bytes
* (TI firmware can't handle more than 64 byte reads)
*/
while (length) {
if (length > 64)
read_length = 64;
else
read_length = (u8)length;
if (read_length > 1) {
dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, read_length);
}
/*
* NOTE: Must use swab as wIndex is sent in little-endian
* byte order regardless of host byte order.
*/
be_start_address = swab16((u16)start_address);
status = ti_vread_sync(dev, UMPC_MEMORY_READ,
(u16)address_type,
be_start_address,
buffer, read_length);
if (status) {
dev_dbg(&dev->dev, "%s - ERROR %x\n", __func__, status);
return status;
}
if (read_length > 1)
usb_serial_debug_data(&dev->dev, __func__, read_length, buffer);
/* Update pointers/length */
start_address += read_length;
buffer += read_length;
length -= read_length;
}
return status;
}
static int read_ram(struct usb_device *dev, int start_address,
int length, u8 *buffer)
{
return read_download_mem(dev, start_address, length,
DTK_ADDR_SPACE_XDATA, buffer);
}
/* Read edgeport memory to a given block */
static int read_boot_mem(struct edgeport_serial *serial,
int start_address, int length, u8 *buffer)
{
int status = 0;
int i;
for (i = 0; i < length; i++) {
status = ti_vread_sync(serial->serial->dev,
UMPC_MEMORY_READ, serial->TI_I2C_Type,
(u16)(start_address+i), &buffer[i], 0x01);
if (status) {
dev_dbg(&serial->serial->dev->dev, "%s - ERROR %x\n", __func__, status);
return status;
}
}
dev_dbg(&serial->serial->dev->dev, "%s - start_address = %x, length = %d\n",
__func__, start_address, length);
usb_serial_debug_data(&serial->serial->dev->dev, __func__, length, buffer);
serial->TiReadI2C = 1;
return status;
}
/* Write given block to TI EPROM memory */
static int write_boot_mem(struct edgeport_serial *serial,
int start_address, int length, u8 *buffer)
{
int status = 0;
int i;
u8 *temp;
/* Must do a read before write */
if (!serial->TiReadI2C) {
temp = kmalloc(1, GFP_KERNEL);
if (!temp)
return -ENOMEM;
status = read_boot_mem(serial, 0, 1, temp);
kfree(temp);
if (status)
return status;
}
for (i = 0; i < length; ++i) {
status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE,
buffer[i], (u16)(i + start_address), NULL,
0, TI_VSEND_TIMEOUT_DEFAULT);
if (status)
return status;
}
dev_dbg(&serial->serial->dev->dev, "%s - start_sddr = %x, length = %d\n", __func__, start_address, length);
usb_serial_debug_data(&serial->serial->dev->dev, __func__, length, buffer);
return status;
}
/* Write edgeport I2C memory to TI chip */
static int write_i2c_mem(struct edgeport_serial *serial,
int start_address, int length, u8 address_type, u8 *buffer)
{
struct device *dev = &serial->serial->dev->dev;
int status = 0;
int write_length;
u16 be_start_address;
/* We can only send a maximum of 1 aligned byte page at a time */
/* calculate the number of bytes left in the first page */
write_length = EPROM_PAGE_SIZE -
(start_address & (EPROM_PAGE_SIZE - 1));
if (write_length > length)
write_length = length;
dev_dbg(dev, "%s - BytesInFirstPage Addr = %x, length = %d\n",
__func__, start_address, write_length);
usb_serial_debug_data(dev, __func__, write_length, buffer);
/*
* Write first page.
*
* NOTE: Must use swab as wIndex is sent in little-endian byte order
* regardless of host byte order.
*/
be_start_address = swab16((u16)start_address);
status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE,
(u16)address_type, be_start_address,
buffer, write_length, TI_VSEND_TIMEOUT_DEFAULT);
if (status) {
dev_dbg(dev, "%s - ERROR %d\n", __func__, status);
return status;
}
length -= write_length;
start_address += write_length;
buffer += write_length;
/*
* We should be aligned now -- can write max page size bytes at a
* time.
*/
while (length) {
if (length > EPROM_PAGE_SIZE)
write_length = EPROM_PAGE_SIZE;
else
write_length = length;
dev_dbg(dev, "%s - Page Write Addr = %x, length = %d\n",
__func__, start_address, write_length);
usb_serial_debug_data(dev, __func__, write_length, buffer);
/*
* Write next page.
*
* NOTE: Must use swab as wIndex is sent in little-endian byte
* order regardless of host byte order.
*/
be_start_address = swab16((u16)start_address);
status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE,
(u16)address_type, be_start_address, buffer,
write_length, TI_VSEND_TIMEOUT_DEFAULT);
if (status) {
dev_err(dev, "%s - ERROR %d\n", __func__, status);
return status;
}
length -= write_length;
start_address += write_length;
buffer += write_length;
}
return status;
}
/*
* Examine the UMP DMA registers and LSR
*
* Check the MSBit of the X and Y DMA byte count registers.
* A zero in this bit indicates that the TX DMA buffers are empty
* then check the TX Empty bit in the UART.
*/
static int tx_active(struct edgeport_port *port)
{
int status;
struct out_endpoint_desc_block *oedb;
u8 *lsr;
int bytes_left = 0;
oedb = kmalloc(sizeof(*oedb), GFP_KERNEL);
if (!oedb)
return -ENOMEM;
/*
* Sigh, that's right, just one byte, as not all platforms can
* do DMA from stack
*/
lsr = kmalloc(1, GFP_KERNEL);
if (!lsr) {
kfree(oedb);
return -ENOMEM;
}
/* Read the DMA Count Registers */
status = read_ram(port->port->serial->dev, port->dma_address,
sizeof(*oedb), (void *)oedb);
if (status)
goto exit_is_tx_active;
dev_dbg(&port->port->dev, "%s - XByteCount 0x%X\n", __func__, oedb->XByteCount);
/* and the LSR */
status = read_ram(port->port->serial->dev,
port->uart_base + UMPMEM_OFFS_UART_LSR, 1, lsr);
if (status)
goto exit_is_tx_active;
dev_dbg(&port->port->dev, "%s - LSR = 0x%X\n", __func__, *lsr);
/* If either buffer has data or we are transmitting then return TRUE */
if ((oedb->XByteCount & 0x80) != 0)
bytes_left += 64;
if ((*lsr & UMP_UART_LSR_TX_MASK) == 0)
bytes_left += 1;
/* We return Not Active if we get any kind of error */
exit_is_tx_active:
dev_dbg(&port->port->dev, "%s - return %d\n", __func__, bytes_left);
kfree(lsr);
kfree(oedb);
return bytes_left;
}
static int choose_config(struct usb_device *dev)
{
/*
* There may be multiple configurations on this device, in which case
* we would need to read and parse all of them to find out which one
* we want. However, we just support one config at this point,
* configuration # 1, which is Config Descriptor 0.
*/
dev_dbg(&dev->dev, "%s - Number of Interfaces = %d\n",
__func__, dev->config->desc.bNumInterfaces);
dev_dbg(&dev->dev, "%s - MAX Power = %d\n",
__func__, dev->config->desc.bMaxPower * 2);
if (dev->config->desc.bNumInterfaces != 1) {
dev_err(&dev->dev, "%s - bNumInterfaces is not 1, ERROR!\n", __func__);
return -ENODEV;
}
return 0;
}
static int read_rom(struct edgeport_serial *serial,
int start_address, int length, u8 *buffer)
{
int status;
if (serial->product_info.TiMode == TI_MODE_DOWNLOAD) {
status = read_download_mem(serial->serial->dev,
start_address,
length,
serial->TI_I2C_Type,
buffer);
} else {
status = read_boot_mem(serial, start_address, length,
buffer);
}
return status;
}
static int write_rom(struct edgeport_serial *serial, int start_address,
int length, u8 *buffer)
{
if (serial->product_info.TiMode == TI_MODE_BOOT)
return write_boot_mem(serial, start_address, length,
buffer);
if (serial->product_info.TiMode == TI_MODE_DOWNLOAD)
return write_i2c_mem(serial, start_address, length,
serial->TI_I2C_Type, buffer);
return -EINVAL;
}
/* Read a descriptor header from I2C based on type */
static int get_descriptor_addr(struct edgeport_serial *serial,
int desc_type, struct ti_i2c_desc *rom_desc)
{
int start_address;
int status;
/* Search for requested descriptor in I2C */
start_address = 2;
do {
status = read_rom(serial,
start_address,
sizeof(struct ti_i2c_desc),
(u8 *)rom_desc);
if (status)
return 0;
if (rom_desc->Type == desc_type)
return start_address;
start_address = start_address + sizeof(struct ti_i2c_desc) +
le16_to_cpu(rom_desc->Size);
} while ((start_address < TI_MAX_I2C_SIZE) && rom_desc->Type);
return 0;
}
/* Validate descriptor checksum */
static int valid_csum(struct ti_i2c_desc *rom_desc, u8 *buffer)
{
u16 i;
u8 cs = 0;
for (i = 0; i < le16_to_cpu(rom_desc->Size); i++)
cs = (u8)(cs + buffer[i]);
if (cs != rom_desc->CheckSum) {
pr_debug("%s - Mismatch %x - %x", __func__, rom_desc->CheckSum, cs);
return -EINVAL;
}
return 0;
}
/* Make sure that the I2C image is good */
static int check_i2c_image(struct edgeport_serial *serial)
{
struct device *dev = &serial->serial->dev->dev;
int status = 0;
struct ti_i2c_desc *rom_desc;
int start_address = 2;
u8 *buffer;
u16 ttype;
rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL);
if (!rom_desc)
return -ENOMEM;
buffer = kmalloc(TI_MAX_I2C_SIZE, GFP_KERNEL);
if (!buffer) {
kfree(rom_desc);
return -ENOMEM;
}
/* Read the first byte (Signature0) must be 0x52 or 0x10 */
status = read_rom(serial, 0, 1, buffer);
if (status)
goto out;
if (*buffer != UMP5152 && *buffer != UMP3410) {
dev_err(dev, "%s - invalid buffer signature\n", __func__);
status = -ENODEV;
goto out;
}
do {
/* Validate the I2C */
status = read_rom(serial,
start_address,
sizeof(struct ti_i2c_desc),
(u8 *)rom_desc);
if (status)
break;
if ((start_address + sizeof(struct ti_i2c_desc) +
le16_to_cpu(rom_desc->Size)) > TI_MAX_I2C_SIZE) {
status = -ENODEV;
dev_dbg(dev, "%s - structure too big, erroring out.\n", __func__);
break;
}
dev_dbg(dev, "%s Type = 0x%x\n", __func__, rom_desc->Type);
/* Skip type 2 record */
ttype = rom_desc->Type & 0x0f;
if (ttype != I2C_DESC_TYPE_FIRMWARE_BASIC
&& ttype != I2C_DESC_TYPE_FIRMWARE_AUTO) {
/* Read the descriptor data */
status = read_rom(serial, start_address +
sizeof(struct ti_i2c_desc),
le16_to_cpu(rom_desc->Size),
buffer);
if (status)
break;
status = valid_csum(rom_desc, buffer);
if (status)
break;
}
start_address = start_address + sizeof(struct ti_i2c_desc) +
le16_to_cpu(rom_desc->Size);
} while ((rom_desc->Type != I2C_DESC_TYPE_ION) &&
(start_address < TI_MAX_I2C_SIZE));
if ((rom_desc->Type != I2C_DESC_TYPE_ION) ||
(start_address > TI_MAX_I2C_SIZE))
status = -ENODEV;
out:
kfree(buffer);
kfree(rom_desc);
return status;
}
static int get_manuf_info(struct edgeport_serial *serial, u8 *buffer)
{
int status;
int start_address;
struct ti_i2c_desc *rom_desc;
struct edge_ti_manuf_descriptor *desc;
struct device *dev = &serial->serial->dev->dev;
rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL);
if (!rom_desc)
return -ENOMEM;
start_address = get_descriptor_addr(serial, I2C_DESC_TYPE_ION,
rom_desc);
if (!start_address) {
dev_dbg(dev, "%s - Edge Descriptor not found in I2C\n", __func__);
status = -ENODEV;
goto exit;
}
/* Read the descriptor data */
status = read_rom(serial, start_address+sizeof(struct ti_i2c_desc),
le16_to_cpu(rom_desc->Size), buffer);
if (status)
goto exit;
status = valid_csum(rom_desc, buffer);
desc = (struct edge_ti_manuf_descriptor *)buffer;
dev_dbg(dev, "%s - IonConfig 0x%x\n", __func__, desc->IonConfig);
dev_dbg(dev, "%s - Version %d\n", __func__, desc->Version);
dev_dbg(dev, "%s - Cpu/Board 0x%x\n", __func__, desc->CpuRev_BoardRev);
dev_dbg(dev, "%s - NumPorts %d\n", __func__, desc->NumPorts);
dev_dbg(dev, "%s - NumVirtualPorts %d\n", __func__, desc->NumVirtualPorts);
dev_dbg(dev, "%s - TotalPorts %d\n", __func__, desc->TotalPorts);
exit:
kfree(rom_desc);
return status;
}
/* Build firmware header used for firmware update */
static int build_i2c_fw_hdr(u8 *header, const struct firmware *fw)
{
u8 *buffer;
int buffer_size;
int i;
u8 cs = 0;
struct ti_i2c_desc *i2c_header;
struct ti_i2c_image_header *img_header;
struct ti_i2c_firmware_rec *firmware_rec;
struct edgeport_fw_hdr *fw_hdr = (struct edgeport_fw_hdr *)fw->data;
/*
* In order to update the I2C firmware we must change the type 2 record
* to type 0xF2. This will force the UMP to come up in Boot Mode.
* Then while in boot mode, the driver will download the latest
* firmware (padded to 15.5k) into the UMP ram. And finally when the
* device comes back up in download mode the driver will cause the new
* firmware to be copied from the UMP Ram to I2C and the firmware will
* update the record type from 0xf2 to 0x02.
*/
/*
* Allocate a 15.5k buffer + 2 bytes for version number (Firmware
* Record)
*/
buffer_size = (((1024 * 16) - 512 ) +
sizeof(struct ti_i2c_firmware_rec));
buffer = kmalloc(buffer_size, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
/* Set entire image of 0xffs */
memset(buffer, 0xff, buffer_size);
/* Copy version number into firmware record */
firmware_rec = (struct ti_i2c_firmware_rec *)buffer;
firmware_rec->Ver_Major = fw_hdr->major_version;
firmware_rec->Ver_Minor = fw_hdr->minor_version;
/* Pointer to fw_down memory image */
img_header = (struct ti_i2c_image_header *)&fw->data[4];
memcpy(buffer + sizeof(struct ti_i2c_firmware_rec),
&fw->data[4 + sizeof(struct ti_i2c_image_header)],
le16_to_cpu(img_header->Length));
for (i=0; i < buffer_size; i++) {
cs = (u8)(cs + buffer[i]);
}
kfree(buffer);
/* Build new header */
i2c_header = (struct ti_i2c_desc *)header;
firmware_rec = (struct ti_i2c_firmware_rec*)i2c_header->Data;
i2c_header->Type = I2C_DESC_TYPE_FIRMWARE_BLANK;
i2c_header->Size = cpu_to_le16(buffer_size);
i2c_header->CheckSum = cs;
firmware_rec->Ver_Major = fw_hdr->major_version;
firmware_rec->Ver_Minor = fw_hdr->minor_version;
return 0;
}
/* Try to figure out what type of I2c we have */
static int i2c_type_bootmode(struct edgeport_serial *serial)
{
struct device *dev = &serial->serial->dev->dev;
int status;
u8 *data;
data = kmalloc(1, GFP_KERNEL);
if (!data)
return -ENOMEM;
/* Try to read type 2 */
status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ,
DTK_ADDR_SPACE_I2C_TYPE_II, 0, data, 0x01);
if (status)
dev_dbg(dev, "%s - read 2 status error = %d\n", __func__, status);
else
dev_dbg(dev, "%s - read 2 data = 0x%x\n", __func__, *data);
if ((!status) && (*data == UMP5152 || *data == UMP3410)) {
dev_dbg(dev, "%s - ROM_TYPE_II\n", __func__);
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II;
goto out;
}
/* Try to read type 3 */
status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ,
DTK_ADDR_SPACE_I2C_TYPE_III, 0, data, 0x01);
if (status)
dev_dbg(dev, "%s - read 3 status error = %d\n", __func__, status);
else
dev_dbg(dev, "%s - read 2 data = 0x%x\n", __func__, *data);
if ((!status) && (*data == UMP5152 || *data == UMP3410)) {
dev_dbg(dev, "%s - ROM_TYPE_III\n", __func__);
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_III;
goto out;
}
dev_dbg(dev, "%s - Unknown\n", __func__);
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II;
status = -ENODEV;
out:
kfree(data);
return status;
}
static int bulk_xfer(struct usb_serial *serial, void *buffer,
int length, int *num_sent)
{
int status;
status = usb_bulk_msg(serial->dev,
usb_sndbulkpipe(serial->dev,
serial->port[0]->bulk_out_endpointAddress),
buffer, length, num_sent, 1000);
return status;
}
/* Download given firmware image to the device (IN BOOT MODE) */
static int download_code(struct edgeport_serial *serial, u8 *image,
int image_length)
{
int status = 0;
int pos;
int transfer;
int done;
/* Transfer firmware image */
for (pos = 0; pos < image_length; ) {
/* Read the next buffer from file */
transfer = image_length - pos;
if (transfer > EDGE_FW_BULK_MAX_PACKET_SIZE)
transfer = EDGE_FW_BULK_MAX_PACKET_SIZE;
/* Transfer data */
status = bulk_xfer(serial->serial, &image[pos],
transfer, &done);
if (status)
break;
/* Advance buffer pointer */
pos += done;
}
return status;
}
/* FIXME!!! */
static int config_boot_dev(struct usb_device *dev)
{
return 0;
}
static int ti_cpu_rev(struct edge_ti_manuf_descriptor *desc)
{
return TI_GET_CPU_REVISION(desc->CpuRev_BoardRev);
}
static int check_fw_sanity(struct edgeport_serial *serial,
const struct firmware *fw)
{
u16 length_total;
u8 checksum = 0;
int pos;
struct device *dev = &serial->serial->interface->dev;
struct edgeport_fw_hdr *fw_hdr = (struct edgeport_fw_hdr *)fw->data;
if (fw->size < sizeof(struct edgeport_fw_hdr)) {
dev_err(dev, "incomplete fw header\n");
return -EINVAL;
}
length_total = le16_to_cpu(fw_hdr->length) +
sizeof(struct edgeport_fw_hdr);
if (fw->size != length_total) {
dev_err(dev, "bad fw size (expected: %u, got: %zu)\n",
length_total, fw->size);
return -EINVAL;
}
for (pos = sizeof(struct edgeport_fw_hdr); pos < fw->size; ++pos)
checksum += fw->data[pos];
if (checksum != fw_hdr->checksum) {
dev_err(dev, "bad fw checksum (expected: 0x%x, got: 0x%x)\n",
fw_hdr->checksum, checksum);
return -EINVAL;
}
return 0;
}
/*
* DownloadTIFirmware - Download run-time operating firmware to the TI5052
*
* This routine downloads the main operating code into the TI5052, using the
* boot code already burned into E2PROM or ROM.
*/
static int download_fw(struct edgeport_serial *serial)
{
struct device *dev = &serial->serial->interface->dev;
int status = 0;
struct usb_interface_descriptor *interface;
const struct firmware *fw;
const char *fw_name = "edgeport/down3.bin";
struct edgeport_fw_hdr *fw_hdr;
status = request_firmware(&fw, fw_name, dev);
if (status) {
dev_err(dev, "Failed to load image \"%s\" err %d\n",
fw_name, status);
return status;
}
if (check_fw_sanity(serial, fw)) {
status = -EINVAL;
goto out;
}
fw_hdr = (struct edgeport_fw_hdr *)fw->data;
/* If on-board version is newer, "fw_version" will be updated later. */
serial->fw_version = (fw_hdr->major_version << 8) +
fw_hdr->minor_version;
/*
* This routine is entered by both the BOOT mode and the Download mode
* We can determine which code is running by the reading the config
* descriptor and if we have only one bulk pipe it is in boot mode
*/
serial->product_info.hardware_type = HARDWARE_TYPE_TIUMP;
/* Default to type 2 i2c */
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II;
status = choose_config(serial->serial->dev);
if (status)
goto out;
interface = &serial->serial->interface->cur_altsetting->desc;
if (!interface) {
dev_err(dev, "%s - no interface set, error!\n", __func__);
status = -ENODEV;
goto out;
}
/*
* Setup initial mode -- the default mode 0 is TI_MODE_CONFIGURING
* if we have more than one endpoint we are definitely in download
* mode
*/
if (interface->bNumEndpoints > 1) {
serial->product_info.TiMode = TI_MODE_DOWNLOAD;
status = do_download_mode(serial, fw);
} else {
/* Otherwise we will remain in configuring mode */
serial->product_info.TiMode = TI_MODE_CONFIGURING;
status = do_boot_mode(serial, fw);
}
out:
release_firmware(fw);
return status;
}
static int do_download_mode(struct edgeport_serial *serial,
const struct firmware *fw)
{
struct device *dev = &serial->serial->interface->dev;
int status = 0;
int start_address;
struct edge_ti_manuf_descriptor *ti_manuf_desc;
int download_cur_ver;
int download_new_ver;
struct edgeport_fw_hdr *fw_hdr = (struct edgeport_fw_hdr *)fw->data;
struct ti_i2c_desc *rom_desc;
dev_dbg(dev, "%s - RUNNING IN DOWNLOAD MODE\n", __func__);
status = check_i2c_image(serial);
if (status) {
dev_dbg(dev, "%s - DOWNLOAD MODE -- BAD I2C\n", __func__);
return status;
}
/*
* Validate Hardware version number
* Read Manufacturing Descriptor from TI Based Edgeport
*/
ti_manuf_desc = kmalloc(sizeof(*ti_manuf_desc), GFP_KERNEL);
if (!ti_manuf_desc)
return -ENOMEM;
status = get_manuf_info(serial, (u8 *)ti_manuf_desc);
if (status) {
kfree(ti_manuf_desc);
return status;
}
/* Check version number of ION descriptor */
if (!ignore_cpu_rev && ti_cpu_rev(ti_manuf_desc) < 2) {
dev_dbg(dev, "%s - Wrong CPU Rev %d (Must be 2)\n",
__func__, ti_cpu_rev(ti_manuf_desc));
kfree(ti_manuf_desc);
return -EINVAL;
}
rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL);
if (!rom_desc) {
kfree(ti_manuf_desc);
return -ENOMEM;
}
/* Search for type 2 record (firmware record) */
start_address = get_descriptor_addr(serial,
I2C_DESC_TYPE_FIRMWARE_BASIC, rom_desc);
if (start_address != 0) {
struct ti_i2c_firmware_rec *firmware_version;
u8 *record;
dev_dbg(dev, "%s - Found Type FIRMWARE (Type 2) record\n",
__func__);
firmware_version = kmalloc(sizeof(*firmware_version),
GFP_KERNEL);
if (!firmware_version) {
kfree(rom_desc);
kfree(ti_manuf_desc);
return -ENOMEM;
}
/*
* Validate version number
* Read the descriptor data
*/
status = read_rom(serial, start_address +
sizeof(struct ti_i2c_desc),
sizeof(struct ti_i2c_firmware_rec),
(u8 *)firmware_version);
if (status) {
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
return status;
}
/*
* Check version number of download with current
* version in I2c
*/
download_cur_ver = (firmware_version->Ver_Major << 8) +
(firmware_version->Ver_Minor);
download_new_ver = (fw_hdr->major_version << 8) +
(fw_hdr->minor_version);
dev_dbg(dev, "%s - >> FW Versions Device %d.%d Driver %d.%d\n",
__func__, firmware_version->Ver_Major,
firmware_version->Ver_Minor,
fw_hdr->major_version, fw_hdr->minor_version);
/*
* Check if we have an old version in the I2C and
* update if necessary
*/
if (download_cur_ver < download_new_ver) {
dev_dbg(dev, "%s - Update I2C dld from %d.%d to %d.%d\n",
__func__,
firmware_version->Ver_Major,
firmware_version->Ver_Minor,
fw_hdr->major_version,
fw_hdr->minor_version);
record = kmalloc(1, GFP_KERNEL);
if (!record) {
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
return -ENOMEM;
}
/*
* In order to update the I2C firmware we must
* change the type 2 record to type 0xF2. This
* will force the UMP to come up in Boot Mode.
* Then while in boot mode, the driver will
* download the latest firmware (padded to
* 15.5k) into the UMP ram. Finally when the
* device comes back up in download mode the
* driver will cause the new firmware to be
* copied from the UMP Ram to I2C and the
* firmware will update the record type from
* 0xf2 to 0x02.
*/
*record = I2C_DESC_TYPE_FIRMWARE_BLANK;
/*
* Change the I2C Firmware record type to
* 0xf2 to trigger an update
*/
status = write_rom(serial, start_address,
sizeof(*record), record);
if (status) {
kfree(record);
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
return status;
}
/*
* verify the write -- must do this in order
* for write to complete before we do the
* hardware reset
*/
status = read_rom(serial,
start_address,
sizeof(*record),
record);
if (status) {
kfree(record);
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
return status;
}
if (*record != I2C_DESC_TYPE_FIRMWARE_BLANK) {
dev_err(dev, "%s - error resetting device\n",
__func__);
kfree(record);
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
return -ENODEV;
}
dev_dbg(dev, "%s - HARDWARE RESET\n", __func__);
/* Reset UMP -- Back to BOOT MODE */
status = ti_vsend_sync(serial->serial->dev,
UMPC_HARDWARE_RESET,
0, 0, NULL, 0,
TI_VSEND_TIMEOUT_DEFAULT);
dev_dbg(dev, "%s - HARDWARE RESET return %d\n",
__func__, status);
/* return an error on purpose. */
kfree(record);
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
return -ENODEV;
}
/* Same or newer fw version is already loaded */
serial->fw_version = download_cur_ver;
kfree(firmware_version);
}
/* Search for type 0xF2 record (firmware blank record) */
else {
start_address = get_descriptor_addr(serial,
I2C_DESC_TYPE_FIRMWARE_BLANK, rom_desc);
if (start_address != 0) {
#define HEADER_SIZE (sizeof(struct ti_i2c_desc) + \
sizeof(struct ti_i2c_firmware_rec))
u8 *header;
u8 *vheader;
header = kmalloc(HEADER_SIZE, GFP_KERNEL);
if (!header) {
kfree(rom_desc);
kfree(ti_manuf_desc);
return -ENOMEM;
}
vheader = kmalloc(HEADER_SIZE, GFP_KERNEL);
if (!vheader) {
kfree(header);
kfree(rom_desc);
kfree(ti_manuf_desc);
return -ENOMEM;
}
dev_dbg(dev, "%s - Found Type BLANK FIRMWARE (Type F2) record\n",
__func__);
/*
* In order to update the I2C firmware we must change
* the type 2 record to type 0xF2. This will force the
* UMP to come up in Boot Mode. Then while in boot
* mode, the driver will download the latest firmware
* (padded to 15.5k) into the UMP ram. Finally when the
* device comes back up in download mode the driver
* will cause the new firmware to be copied from the
* UMP Ram to I2C and the firmware will update the
* record type from 0xf2 to 0x02.
*/
status = build_i2c_fw_hdr(header, fw);
if (status) {
kfree(vheader);
kfree(header);
kfree(rom_desc);
kfree(ti_manuf_desc);
return -EINVAL;
}
/*
* Update I2C with type 0xf2 record with correct
* size and checksum
*/
status = write_rom(serial,
start_address,
HEADER_SIZE,
header);
if (status) {
kfree(vheader);
kfree(header);
kfree(rom_desc);
kfree(ti_manuf_desc);
return -EINVAL;
}
/*
* verify the write -- must do this in order for
* write to complete before we do the hardware reset
*/
status = read_rom(serial, start_address,
HEADER_SIZE, vheader);
if (status) {
dev_dbg(dev, "%s - can't read header back\n",
__func__);
kfree(vheader);
kfree(header);
kfree(rom_desc);
kfree(ti_manuf_desc);
return status;
}
if (memcmp(vheader, header, HEADER_SIZE)) {
dev_dbg(dev, "%s - write download record failed\n",
__func__);
kfree(vheader);
kfree(header);
kfree(rom_desc);
kfree(ti_manuf_desc);
return -EINVAL;
}
kfree(vheader);
kfree(header);
dev_dbg(dev, "%s - Start firmware update\n", __func__);
/* Tell firmware to copy download image into I2C */
status = ti_vsend_sync(serial->serial->dev,
UMPC_COPY_DNLD_TO_I2C,
0, 0, NULL, 0,
TI_VSEND_TIMEOUT_FW_DOWNLOAD);
dev_dbg(dev, "%s - Update complete 0x%x\n", __func__,
status);
if (status) {
dev_err(dev,
"%s - UMPC_COPY_DNLD_TO_I2C failed\n",
__func__);
kfree(rom_desc);
kfree(ti_manuf_desc);
return status;
}
}
}
/* The device is running the download code */
kfree(rom_desc);
kfree(ti_manuf_desc);
return 0;
}
static int do_boot_mode(struct edgeport_serial *serial,
const struct firmware *fw)
{
struct device *dev = &serial->serial->interface->dev;
int status = 0;
struct edge_ti_manuf_descriptor *ti_manuf_desc;
struct edgeport_fw_hdr *fw_hdr = (struct edgeport_fw_hdr *)fw->data;
dev_dbg(dev, "%s - RUNNING IN BOOT MODE\n", __func__);
/* Configure the TI device so we can use the BULK pipes for download */
status = config_boot_dev(serial->serial->dev);
if (status)
return status;
if (le16_to_cpu(serial->serial->dev->descriptor.idVendor)
!= USB_VENDOR_ID_ION) {
dev_dbg(dev, "%s - VID = 0x%x\n", __func__,
le16_to_cpu(serial->serial->dev->descriptor.idVendor));
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II;
goto stayinbootmode;
}
/*
* We have an ION device (I2c Must be programmed)
* Determine I2C image type
*/
if (i2c_type_bootmode(serial))
goto stayinbootmode;
/* Check for ION Vendor ID and that the I2C is valid */
if (!check_i2c_image(serial)) {
struct ti_i2c_image_header *header;
int i;
u8 cs = 0;
u8 *buffer;
int buffer_size;
/*
* Validate Hardware version number
* Read Manufacturing Descriptor from TI Based Edgeport
*/
ti_manuf_desc = kmalloc(sizeof(*ti_manuf_desc), GFP_KERNEL);
if (!ti_manuf_desc)
return -ENOMEM;
status = get_manuf_info(serial, (u8 *)ti_manuf_desc);
if (status) {
kfree(ti_manuf_desc);
goto stayinbootmode;
}
/* Check for version 2 */
if (!ignore_cpu_rev && ti_cpu_rev(ti_manuf_desc) < 2) {
dev_dbg(dev, "%s - Wrong CPU Rev %d (Must be 2)\n",
__func__, ti_cpu_rev(ti_manuf_desc));
kfree(ti_manuf_desc);
goto stayinbootmode;
}
kfree(ti_manuf_desc);
/*
* In order to update the I2C firmware we must change the type
* 2 record to type 0xF2. This will force the UMP to come up
* in Boot Mode. Then while in boot mode, the driver will
* download the latest firmware (padded to 15.5k) into the
* UMP ram. Finally when the device comes back up in download
* mode the driver will cause the new firmware to be copied
* from the UMP Ram to I2C and the firmware will update the
* record type from 0xf2 to 0x02.
*
* Do we really have to copy the whole firmware image,
* or could we do this in place!
*/
/* Allocate a 15.5k buffer + 3 byte header */
buffer_size = (((1024 * 16) - 512) +
sizeof(struct ti_i2c_image_header));
buffer = kmalloc(buffer_size, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
/* Initialize the buffer to 0xff (pad the buffer) */
memset(buffer, 0xff, buffer_size);
memcpy(buffer, &fw->data[4], fw->size - 4);
for (i = sizeof(struct ti_i2c_image_header);
i < buffer_size; i++) {
cs = (u8)(cs + buffer[i]);
}
header = (struct ti_i2c_image_header *)buffer;
/* update length and checksum after padding */
header->Length = cpu_to_le16((u16)(buffer_size -
sizeof(struct ti_i2c_image_header)));
header->CheckSum = cs;
/* Download the operational code */
dev_dbg(dev, "%s - Downloading operational code image version %d.%d (TI UMP)\n",
__func__,
fw_hdr->major_version, fw_hdr->minor_version);
status = download_code(serial, buffer, buffer_size);
kfree(buffer);
if (status) {
dev_dbg(dev, "%s - Error downloading operational code image\n", __func__);
return status;
}
/* Device will reboot */
serial->product_info.TiMode = TI_MODE_TRANSITIONING;
dev_dbg(dev, "%s - Download successful -- Device rebooting...\n", __func__);
return 1;
}
stayinbootmode:
/* Eprom is invalid or blank stay in boot mode */
dev_dbg(dev, "%s - STAYING IN BOOT MODE\n", __func__);
serial->product_info.TiMode = TI_MODE_BOOT;
return 1;
}
static int ti_do_config(struct edgeport_port *port, int feature, int on)
{
on = !!on; /* 1 or 0 not bitmask */
return send_port_cmd(port->port, feature, on, NULL, 0);
}
static int restore_mcr(struct edgeport_port *port, u8 mcr)
{
int status = 0;
dev_dbg(&port->port->dev, "%s - %x\n", __func__, mcr);
status = ti_do_config(port, UMPC_SET_CLR_DTR, mcr & MCR_DTR);
if (status)
return status;
status = ti_do_config(port, UMPC_SET_CLR_RTS, mcr & MCR_RTS);
if (status)
return status;
return ti_do_config(port, UMPC_SET_CLR_LOOPBACK, mcr & MCR_LOOPBACK);
}
/* Convert TI LSR to standard UART flags */
static u8 map_line_status(u8 ti_lsr)
{
u8 lsr = 0;
#define MAP_FLAG(flagUmp, flagUart) \
if (ti_lsr & flagUmp) \
lsr |= flagUart;
MAP_FLAG(UMP_UART_LSR_OV_MASK, LSR_OVER_ERR) /* overrun */
MAP_FLAG(UMP_UART_LSR_PE_MASK, LSR_PAR_ERR) /* parity error */
MAP_FLAG(UMP_UART_LSR_FE_MASK, LSR_FRM_ERR) /* framing error */
MAP_FLAG(UMP_UART_LSR_BR_MASK, LSR_BREAK) /* break detected */
MAP_FLAG(UMP_UART_LSR_RX_MASK, LSR_RX_AVAIL) /* rx data available */
MAP_FLAG(UMP_UART_LSR_TX_MASK, LSR_TX_EMPTY) /* tx hold reg empty */
#undef MAP_FLAG
return lsr;
}
static void handle_new_msr(struct edgeport_port *edge_port, u8 msr)
{
struct async_icount *icount;
struct tty_struct *tty;
dev_dbg(&edge_port->port->dev, "%s - %02x\n", __func__, msr);
if (msr & (EDGEPORT_MSR_DELTA_CTS | EDGEPORT_MSR_DELTA_DSR |
EDGEPORT_MSR_DELTA_RI | EDGEPORT_MSR_DELTA_CD)) {
icount = &edge_port->port->icount;
/* update input line counters */
if (msr & EDGEPORT_MSR_DELTA_CTS)
icount->cts++;
if (msr & EDGEPORT_MSR_DELTA_DSR)
icount->dsr++;
if (msr & EDGEPORT_MSR_DELTA_CD)
icount->dcd++;
if (msr & EDGEPORT_MSR_DELTA_RI)
icount->rng++;
wake_up_interruptible(&edge_port->port->port.delta_msr_wait);
}
/* Save the new modem status */
edge_port->shadow_msr = msr & 0xf0;
tty = tty_port_tty_get(&edge_port->port->port);
/* handle CTS flow control */
if (tty && C_CRTSCTS(tty)) {
if (msr & EDGEPORT_MSR_CTS)
tty_wakeup(tty);
}
tty_kref_put(tty);
}
static void handle_new_lsr(struct edgeport_port *edge_port, int lsr_data,
u8 lsr, u8 data)
{
struct async_icount *icount;
u8 new_lsr = (u8)(lsr & (u8)(LSR_OVER_ERR | LSR_PAR_ERR |
LSR_FRM_ERR | LSR_BREAK));
dev_dbg(&edge_port->port->dev, "%s - %02x\n", __func__, new_lsr);
edge_port->shadow_lsr = lsr;
if (new_lsr & LSR_BREAK)
/*
* Parity and Framing errors only count if they
* occur exclusive of a break being received.
*/
new_lsr &= (u8)(LSR_OVER_ERR | LSR_BREAK);
/* Place LSR data byte into Rx buffer */
if (lsr_data)
edge_tty_recv(edge_port->port, &data, 1);
/* update input line counters */
icount = &edge_port->port->icount;
if (new_lsr & LSR_BREAK)
icount->brk++;
if (new_lsr & LSR_OVER_ERR)
icount->overrun++;
if (new_lsr & LSR_PAR_ERR)
icount->parity++;
if (new_lsr & LSR_FRM_ERR)
icount->frame++;
}
static void edge_interrupt_callback(struct urb *urb)
{
struct edgeport_serial *edge_serial = urb->context;
struct usb_serial_port *port;
struct edgeport_port *edge_port;
struct device *dev;
unsigned char *data = urb->transfer_buffer;
int length = urb->actual_length;
int port_number;
int function;
int retval;
u8 lsr;
u8 msr;
int status = urb->status;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_err(&urb->dev->dev, "%s - nonzero urb status received: "
"%d\n", __func__, status);
goto exit;
}
if (!length) {
dev_dbg(&urb->dev->dev, "%s - no data in urb\n", __func__);
goto exit;
}
dev = &edge_serial->serial->dev->dev;
usb_serial_debug_data(dev, __func__, length, data);
if (length != 2) {
dev_dbg(dev, "%s - expecting packet of size 2, got %d\n", __func__, length);
goto exit;
}
port_number = TIUMP_GET_PORT_FROM_CODE(data[0]);
function = TIUMP_GET_FUNC_FROM_CODE(data[0]);
dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__,
port_number, function, data[1]);
if (port_number >= edge_serial->serial->num_ports) {
dev_err(dev, "bad port number %d\n", port_number);
goto exit;
}
port = edge_serial->serial->port[port_number];
edge_port = usb_get_serial_port_data(port);
if (!edge_port) {
dev_dbg(dev, "%s - edge_port not found\n", __func__);
return;
}
switch (function) {
case TIUMP_INTERRUPT_CODE_LSR:
lsr = map_line_status(data[1]);
if (lsr & UMP_UART_LSR_DATA_MASK) {
/*
* Save the LSR event for bulk read completion routine
*/
dev_dbg(dev, "%s - LSR Event Port %u LSR Status = %02x\n",
__func__, port_number, lsr);
edge_port->lsr_event = 1;
edge_port->lsr_mask = lsr;
} else {
dev_dbg(dev, "%s - ===== Port %d LSR Status = %02x ======\n",
__func__, port_number, lsr);
handle_new_lsr(edge_port, 0, lsr, 0);
}
break;
case TIUMP_INTERRUPT_CODE_MSR: /* MSR */
/* Copy MSR from UMP */
msr = data[1];
dev_dbg(dev, "%s - ===== Port %u MSR Status = %02x ======\n",
__func__, port_number, msr);
handle_new_msr(edge_port, msr);
break;
default:
dev_err(&urb->dev->dev,
"%s - Unknown Interrupt code from UMP %x\n",
__func__, data[1]);
break;
}
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&urb->dev->dev,
"%s - usb_submit_urb failed with result %d\n",
__func__, retval);
}
static void edge_bulk_in_callback(struct urb *urb)
{
struct edgeport_port *edge_port = urb->context;
struct device *dev = &edge_port->port->dev;
unsigned char *data = urb->transfer_buffer;
unsigned long flags;
int retval = 0;
int port_number;
int status = urb->status;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n", __func__, status);
return;
default:
dev_err(&urb->dev->dev, "%s - nonzero read bulk status received: %d\n", __func__, status);
}
if (status == -EPIPE)
goto exit;
if (status) {
dev_err(&urb->dev->dev, "%s - stopping read!\n", __func__);
return;
}
port_number = edge_port->port->port_number;
if (urb->actual_length > 0 && edge_port->lsr_event) {
edge_port->lsr_event = 0;
dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n",
__func__, port_number, edge_port->lsr_mask, *data);
handle_new_lsr(edge_port, 1, edge_port->lsr_mask, *data);
/* Adjust buffer length/pointer */
--urb->actual_length;
++data;
}
if (urb->actual_length) {
usb_serial_debug_data(dev, __func__, urb->actual_length, data);
if (edge_port->close_pending)
dev_dbg(dev, "%s - close pending, dropping data on the floor\n",
__func__);
else
edge_tty_recv(edge_port->port, data,
urb->actual_length);
edge_port->port->icount.rx += urb->actual_length;
}
exit:
/* continue read unless stopped */
spin_lock_irqsave(&edge_port->ep_lock, flags);
if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING)
retval = usb_submit_urb(urb, GFP_ATOMIC);
else if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPING)
edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPED;
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
if (retval)
dev_err(dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval);
}
static void edge_tty_recv(struct usb_serial_port *port, unsigned char *data,
int length)
{
int queued;
queued = tty_insert_flip_string(&port->port, data, length);
if (queued < length)
dev_err(&port->dev, "%s - dropping data, %d bytes lost\n",
__func__, length - queued);
tty_flip_buffer_push(&port->port);
}
static void edge_bulk_out_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int status = urb->status;
struct tty_struct *tty;
edge_port->ep_write_urb_in_use = 0;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_err_console(port, "%s - nonzero write bulk status "
"received: %d\n", __func__, status);
}
/* send any buffered data */
tty = tty_port_tty_get(&port->port);
edge_send(port, tty);
tty_kref_put(tty);
}
static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
struct edgeport_serial *edge_serial;
struct usb_device *dev;
struct urb *urb;
int status;
u16 open_settings;
u8 transaction_timeout;
if (edge_port == NULL)
return -ENODEV;
dev = port->serial->dev;
/* turn off loopback */
status = ti_do_config(edge_port, UMPC_SET_CLR_LOOPBACK, 0);
if (status) {
dev_err(&port->dev,
"%s - cannot send clear loopback command, %d\n",
__func__, status);
return status;
}
/* set up the port settings */
if (tty)
edge_set_termios(tty, port, &tty->termios);
/* open up the port */
/* milliseconds to timeout for DMA transfer */
transaction_timeout = 2;
edge_port->ump_read_timeout =
max(20, ((transaction_timeout * 3) / 2));
/* milliseconds to timeout for DMA transfer */
open_settings = (u8)(UMP_DMA_MODE_CONTINOUS |
UMP_PIPE_TRANS_TIMEOUT_ENA |
(transaction_timeout << 2));
dev_dbg(&port->dev, "%s - Sending UMPC_OPEN_PORT\n", __func__);
/* Tell TI to open and start the port */
status = send_port_cmd(port, UMPC_OPEN_PORT, open_settings, NULL, 0);
if (status) {
dev_err(&port->dev, "%s - cannot send open command, %d\n",
__func__, status);
return status;
}
/* Start the DMA? */
status = send_port_cmd(port, UMPC_START_PORT, 0, NULL, 0);
if (status) {
dev_err(&port->dev, "%s - cannot send start DMA command, %d\n",
__func__, status);
return status;
}
/* Clear TX and RX buffers in UMP */
status = purge_port(port, UMP_PORT_DIR_OUT | UMP_PORT_DIR_IN);
if (status) {
dev_err(&port->dev,
"%s - cannot send clear buffers command, %d\n",
__func__, status);
return status;
}
/* Read Initial MSR */
status = read_port_cmd(port, UMPC_READ_MSR, 0, &edge_port->shadow_msr, 1);
if (status) {
dev_err(&port->dev, "%s - cannot send read MSR command, %d\n",
__func__, status);
return status;
}
dev_dbg(&port->dev, "ShadowMSR 0x%X\n", edge_port->shadow_msr);
/* Set Initial MCR */
edge_port->shadow_mcr = MCR_RTS | MCR_DTR;
dev_dbg(&port->dev, "ShadowMCR 0x%X\n", edge_port->shadow_mcr);
edge_serial = edge_port->edge_serial;
if (mutex_lock_interruptible(&edge_serial->es_lock))
return -ERESTARTSYS;
if (edge_serial->num_ports_open == 0) {
/* we are the first port to open, post the interrupt urb */
urb = edge_serial->serial->port[0]->interrupt_in_urb;
urb->context = edge_serial;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev,
"%s - usb_submit_urb failed with value %d\n",
__func__, status);
goto release_es_lock;
}
}
/*
* reset the data toggle on the bulk endpoints to work around bug in
* host controllers where things get out of sync some times
*/
usb_clear_halt(dev, port->write_urb->pipe);
usb_clear_halt(dev, port->read_urb->pipe);
/* start up our bulk read urb */
urb = port->read_urb;
edge_port->ep_read_urb_state = EDGE_READ_URB_RUNNING;
urb->context = edge_port;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev,
"%s - read bulk usb_submit_urb failed with value %d\n",
__func__, status);
goto unlink_int_urb;
}
++edge_serial->num_ports_open;
goto release_es_lock;
unlink_int_urb:
if (edge_port->edge_serial->num_ports_open == 0)
usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
release_es_lock:
mutex_unlock(&edge_serial->es_lock);
return status;
}
static void edge_close(struct usb_serial_port *port)
{
struct edgeport_serial *edge_serial;
struct edgeport_port *edge_port;
unsigned long flags;
edge_serial = usb_get_serial_data(port->serial);
edge_port = usb_get_serial_port_data(port);
if (edge_serial == NULL || edge_port == NULL)
return;
/*
* The bulkreadcompletion routine will check
* this flag and dump add read data
*/
edge_port->close_pending = 1;
usb_kill_urb(port->read_urb);
usb_kill_urb(port->write_urb);
edge_port->ep_write_urb_in_use = 0;
spin_lock_irqsave(&edge_port->ep_lock, flags);
kfifo_reset_out(&port->write_fifo);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
dev_dbg(&port->dev, "%s - send umpc_close_port\n", __func__);
send_port_cmd(port, UMPC_CLOSE_PORT, 0, NULL, 0);
mutex_lock(&edge_serial->es_lock);
--edge_port->edge_serial->num_ports_open;
if (edge_port->edge_serial->num_ports_open <= 0) {
/* last port is now closed, let's shut down our interrupt urb */
usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
edge_port->edge_serial->num_ports_open = 0;
}
mutex_unlock(&edge_serial->es_lock);
edge_port->close_pending = 0;
}
static int edge_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *data, int count)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
if (count == 0) {
dev_dbg(&port->dev, "%s - write request of 0 bytes\n", __func__);
return 0;
}
if (edge_port == NULL)
return -ENODEV;
if (edge_port->close_pending == 1)
return -ENODEV;
count = kfifo_in_locked(&port->write_fifo, data, count,
&edge_port->ep_lock);
edge_send(port, tty);
return count;
}
static void edge_send(struct usb_serial_port *port, struct tty_struct *tty)
{
int count, result;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned long flags;
spin_lock_irqsave(&edge_port->ep_lock, flags);
if (edge_port->ep_write_urb_in_use) {
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
return;
}
count = kfifo_out(&port->write_fifo,
port->write_urb->transfer_buffer,
port->bulk_out_size);
if (count == 0) {
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
return;
}
edge_port->ep_write_urb_in_use = 1;
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
usb_serial_debug_data(&port->dev, __func__, count, port->write_urb->transfer_buffer);
/* set up our urb */
port->write_urb->transfer_buffer_length = count;
/* send the data out the bulk port */
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (result) {
dev_err_console(port,
"%s - failed submitting write urb, error %d\n",
__func__, result);
edge_port->ep_write_urb_in_use = 0;
/* TODO: reschedule edge_send */
} else
edge_port->port->icount.tx += count;
/*
* wakeup any process waiting for writes to complete
* there is now more room in the buffer for new writes
*/
if (tty)
tty_wakeup(tty);
}
static unsigned int edge_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned int room;
unsigned long flags;
if (edge_port == NULL)
return 0;
if (edge_port->close_pending == 1)
return 0;
spin_lock_irqsave(&edge_port->ep_lock, flags);
room = kfifo_avail(&port->write_fifo);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
dev_dbg(&port->dev, "%s - returns %u\n", __func__, room);
return room;
}
static unsigned int edge_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned int chars;
unsigned long flags;
if (edge_port == NULL)
return 0;
spin_lock_irqsave(&edge_port->ep_lock, flags);
chars = kfifo_len(&port->write_fifo);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
dev_dbg(&port->dev, "%s - returns %u\n", __func__, chars);
return chars;
}
static bool edge_tx_empty(struct usb_serial_port *port)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int ret;
ret = tx_active(edge_port);
if (ret > 0)
return false;
return true;
}
static void edge_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int status;
if (edge_port == NULL)
return;
/* if we are implementing XON/XOFF, send the stop character */
if (I_IXOFF(tty)) {
unsigned char stop_char = STOP_CHAR(tty);
status = edge_write(tty, port, &stop_char, 1);
if (status <= 0) {
dev_err(&port->dev, "%s - failed to write stop character, %d\n", __func__, status);
}
}
/*
* if we are implementing RTS/CTS, stop reads
* and the Edgeport will clear the RTS line
*/
if (C_CRTSCTS(tty))
stop_read(edge_port);
}
static void edge_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int status;
if (edge_port == NULL)
return;
/* if we are implementing XON/XOFF, send the start character */
if (I_IXOFF(tty)) {
unsigned char start_char = START_CHAR(tty);
status = edge_write(tty, port, &start_char, 1);
if (status <= 0) {
dev_err(&port->dev, "%s - failed to write start character, %d\n", __func__, status);
}
}
/*
* if we are implementing RTS/CTS, restart reads
* are the Edgeport will assert the RTS line
*/
if (C_CRTSCTS(tty)) {
status = restart_read(edge_port);
if (status)
dev_err(&port->dev,
"%s - read bulk usb_submit_urb failed: %d\n",
__func__, status);
}
}
static void stop_read(struct edgeport_port *edge_port)
{
unsigned long flags;
spin_lock_irqsave(&edge_port->ep_lock, flags);
if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING)
edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPING;
edge_port->shadow_mcr &= ~MCR_RTS;
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
}
static int restart_read(struct edgeport_port *edge_port)
{
struct urb *urb;
int status = 0;
unsigned long flags;
spin_lock_irqsave(&edge_port->ep_lock, flags);
if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPED) {
urb = edge_port->port->read_urb;
status = usb_submit_urb(urb, GFP_ATOMIC);
}
edge_port->ep_read_urb_state = EDGE_READ_URB_RUNNING;
edge_port->shadow_mcr |= MCR_RTS;
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
return status;
}
static void change_port_settings(struct tty_struct *tty,
struct edgeport_port *edge_port, const struct ktermios *old_termios)
{
struct device *dev = &edge_port->port->dev;
struct ump_uart_config *config;
int baud;
unsigned cflag;
int status;
config = kmalloc (sizeof (*config), GFP_KERNEL);
if (!config) {
tty->termios = *old_termios;
return;
}
cflag = tty->termios.c_cflag;
config->wFlags = 0;
/* These flags must be set */
config->wFlags |= UMP_MASK_UART_FLAGS_RECEIVE_MS_INT;
config->wFlags |= UMP_MASK_UART_FLAGS_AUTO_START_ON_ERR;
config->bUartMode = (u8)(edge_port->bUartMode);
switch (cflag & CSIZE) {
case CS5:
config->bDataBits = UMP_UART_CHAR5BITS;
dev_dbg(dev, "%s - data bits = 5\n", __func__);
break;
case CS6:
config->bDataBits = UMP_UART_CHAR6BITS;
dev_dbg(dev, "%s - data bits = 6\n", __func__);
break;
case CS7:
config->bDataBits = UMP_UART_CHAR7BITS;
dev_dbg(dev, "%s - data bits = 7\n", __func__);
break;
default:
case CS8:
config->bDataBits = UMP_UART_CHAR8BITS;
dev_dbg(dev, "%s - data bits = 8\n", __func__);
break;
}
if (cflag & PARENB) {
if (cflag & PARODD) {
config->wFlags |= UMP_MASK_UART_FLAGS_PARITY;
config->bParity = UMP_UART_ODDPARITY;
dev_dbg(dev, "%s - parity = odd\n", __func__);
} else {
config->wFlags |= UMP_MASK_UART_FLAGS_PARITY;
config->bParity = UMP_UART_EVENPARITY;
dev_dbg(dev, "%s - parity = even\n", __func__);
}
} else {
config->bParity = UMP_UART_NOPARITY;
dev_dbg(dev, "%s - parity = none\n", __func__);
}
if (cflag & CSTOPB) {
config->bStopBits = UMP_UART_STOPBIT2;
dev_dbg(dev, "%s - stop bits = 2\n", __func__);
} else {
config->bStopBits = UMP_UART_STOPBIT1;
dev_dbg(dev, "%s - stop bits = 1\n", __func__);
}
/* figure out the flow control settings */
if (cflag & CRTSCTS) {
config->wFlags |= UMP_MASK_UART_FLAGS_OUT_X_CTS_FLOW;
config->wFlags |= UMP_MASK_UART_FLAGS_RTS_FLOW;
dev_dbg(dev, "%s - RTS/CTS is enabled\n", __func__);
} else {
dev_dbg(dev, "%s - RTS/CTS is disabled\n", __func__);
restart_read(edge_port);
}
/*
* if we are implementing XON/XOFF, set the start and stop
* character in the device
*/
config->cXon = START_CHAR(tty);
config->cXoff = STOP_CHAR(tty);
/* if we are implementing INBOUND XON/XOFF */
if (I_IXOFF(tty)) {
config->wFlags |= UMP_MASK_UART_FLAGS_IN_X;
dev_dbg(dev, "%s - INBOUND XON/XOFF is enabled, XON = %2x, XOFF = %2x\n",
__func__, config->cXon, config->cXoff);
} else
dev_dbg(dev, "%s - INBOUND XON/XOFF is disabled\n", __func__);
/* if we are implementing OUTBOUND XON/XOFF */
if (I_IXON(tty)) {
config->wFlags |= UMP_MASK_UART_FLAGS_OUT_X;
dev_dbg(dev, "%s - OUTBOUND XON/XOFF is enabled, XON = %2x, XOFF = %2x\n",
__func__, config->cXon, config->cXoff);
} else
dev_dbg(dev, "%s - OUTBOUND XON/XOFF is disabled\n", __func__);
tty->termios.c_cflag &= ~CMSPAR;
/* Round the baud rate */
baud = tty_get_baud_rate(tty);
if (!baud) {
/* pick a default, any default... */
baud = 9600;
} else {
/* Avoid a zero divisor. */
baud = min(baud, 461550);
tty_encode_baud_rate(tty, baud, baud);
}
edge_port->baud_rate = baud;
config->wBaudRate = (u16)((461550L + baud/2) / baud);
/* FIXME: Recompute actual baud from divisor here */
dev_dbg(dev, "%s - baud rate = %d, wBaudRate = %d\n", __func__, baud, config->wBaudRate);
dev_dbg(dev, "wBaudRate: %d\n", (int)(461550L / config->wBaudRate));
dev_dbg(dev, "wFlags: 0x%x\n", config->wFlags);
dev_dbg(dev, "bDataBits: %d\n", config->bDataBits);
dev_dbg(dev, "bParity: %d\n", config->bParity);
dev_dbg(dev, "bStopBits: %d\n", config->bStopBits);
dev_dbg(dev, "cXon: %d\n", config->cXon);
dev_dbg(dev, "cXoff: %d\n", config->cXoff);
dev_dbg(dev, "bUartMode: %d\n", config->bUartMode);
/* move the word values into big endian mode */
cpu_to_be16s(&config->wFlags);
cpu_to_be16s(&config->wBaudRate);
status = send_port_cmd(edge_port->port, UMPC_SET_CONFIG, 0, config,
sizeof(*config));
if (status)
dev_dbg(dev, "%s - error %d when trying to write config to device\n",
__func__, status);
kfree(config);
}
static void edge_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
if (edge_port == NULL)
return;
/* change the port settings to the new ones specified */
change_port_settings(tty, edge_port, old_termios);
}
static int edge_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned int mcr;
unsigned long flags;
spin_lock_irqsave(&edge_port->ep_lock, flags);
mcr = edge_port->shadow_mcr;
if (set & TIOCM_RTS)
mcr |= MCR_RTS;
if (set & TIOCM_DTR)
mcr |= MCR_DTR;
if (set & TIOCM_LOOP)
mcr |= MCR_LOOPBACK;
if (clear & TIOCM_RTS)
mcr &= ~MCR_RTS;
if (clear & TIOCM_DTR)
mcr &= ~MCR_DTR;
if (clear & TIOCM_LOOP)
mcr &= ~MCR_LOOPBACK;
edge_port->shadow_mcr = mcr;
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
restore_mcr(edge_port, mcr);
return 0;
}
static int edge_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned int result = 0;
unsigned int msr;
unsigned int mcr;
unsigned long flags;
spin_lock_irqsave(&edge_port->ep_lock, flags);
msr = edge_port->shadow_msr;
mcr = edge_port->shadow_mcr;
result = ((mcr & MCR_DTR) ? TIOCM_DTR: 0) /* 0x002 */
| ((mcr & MCR_RTS) ? TIOCM_RTS: 0) /* 0x004 */
| ((msr & EDGEPORT_MSR_CTS) ? TIOCM_CTS: 0) /* 0x020 */
| ((msr & EDGEPORT_MSR_CD) ? TIOCM_CAR: 0) /* 0x040 */
| ((msr & EDGEPORT_MSR_RI) ? TIOCM_RI: 0) /* 0x080 */
| ((msr & EDGEPORT_MSR_DSR) ? TIOCM_DSR: 0); /* 0x100 */
dev_dbg(&port->dev, "%s -- %x\n", __func__, result);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
return result;
}
static int edge_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int status;
int bv = 0; /* Off */
if (break_state == -1)
bv = 1; /* On */
status = ti_do_config(edge_port, UMPC_SET_CLR_BREAK, bv);
if (status) {
dev_dbg(&port->dev, "%s - error %d sending break set/clear command.\n",
__func__, status);
return status;
}
return 0;
}
static void edge_heartbeat_schedule(struct edgeport_serial *edge_serial)
{
if (!edge_serial->use_heartbeat)
return;
schedule_delayed_work(&edge_serial->heartbeat_work,
FW_HEARTBEAT_SECS * HZ);
}
static void edge_heartbeat_work(struct work_struct *work)
{
struct edgeport_serial *serial;
struct ti_i2c_desc *rom_desc;
serial = container_of(work, struct edgeport_serial,
heartbeat_work.work);
rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL);
/* Descriptor address request is enough to reset the firmware timer */
if (!rom_desc || !get_descriptor_addr(serial, I2C_DESC_TYPE_ION,
rom_desc)) {
dev_err(&serial->serial->interface->dev,
"%s - Incomplete heartbeat\n", __func__);
}
kfree(rom_desc);
edge_heartbeat_schedule(serial);
}
static int edge_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds)
{
struct device *dev = &serial->interface->dev;
unsigned char num_ports = serial->type->num_ports;
/* Make sure we have the required endpoints when in download mode. */
if (serial->interface->cur_altsetting->desc.bNumEndpoints > 1) {
if (epds->num_bulk_in < num_ports ||
epds->num_bulk_out < num_ports ||
epds->num_interrupt_in < 1) {
dev_err(dev, "required endpoints missing\n");
return -ENODEV;
}
}
return num_ports;
}
static int edge_startup(struct usb_serial *serial)
{
struct edgeport_serial *edge_serial;
int status;
u16 product_id;
/* create our private serial structure */
edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL);
if (!edge_serial)
return -ENOMEM;
mutex_init(&edge_serial->es_lock);
edge_serial->serial = serial;
INIT_DELAYED_WORK(&edge_serial->heartbeat_work, edge_heartbeat_work);
usb_set_serial_data(serial, edge_serial);
status = download_fw(edge_serial);
if (status < 0) {
kfree(edge_serial);
return status;
}
if (status > 0)
return 1; /* bind but do not register any ports */
product_id = le16_to_cpu(
edge_serial->serial->dev->descriptor.idProduct);
/* Currently only the EP/416 models require heartbeat support */
if (edge_serial->fw_version > FW_HEARTBEAT_VERSION_CUTOFF) {
if (product_id == ION_DEVICE_ID_TI_EDGEPORT_416 ||
product_id == ION_DEVICE_ID_TI_EDGEPORT_416B) {
edge_serial->use_heartbeat = true;
}
}
edge_heartbeat_schedule(edge_serial);
return 0;
}
static void edge_disconnect(struct usb_serial *serial)
{
struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
cancel_delayed_work_sync(&edge_serial->heartbeat_work);
}
static void edge_release(struct usb_serial *serial)
{
struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
cancel_delayed_work_sync(&edge_serial->heartbeat_work);
kfree(edge_serial);
}
static int edge_port_probe(struct usb_serial_port *port)
{
struct edgeport_port *edge_port;
int ret;
edge_port = kzalloc(sizeof(*edge_port), GFP_KERNEL);
if (!edge_port)
return -ENOMEM;
spin_lock_init(&edge_port->ep_lock);
edge_port->port = port;
edge_port->edge_serial = usb_get_serial_data(port->serial);
edge_port->bUartMode = default_uart_mode;
switch (port->port_number) {
case 0:
edge_port->uart_base = UMPMEM_BASE_UART1;
edge_port->dma_address = UMPD_OEDB1_ADDRESS;
break;
case 1:
edge_port->uart_base = UMPMEM_BASE_UART2;
edge_port->dma_address = UMPD_OEDB2_ADDRESS;
break;
default:
dev_err(&port->dev, "unknown port number\n");
ret = -ENODEV;
goto err;
}
dev_dbg(&port->dev,
"%s - port_number = %d, uart_base = %04x, dma_address = %04x\n",
__func__, port->port_number, edge_port->uart_base,
edge_port->dma_address);
usb_set_serial_port_data(port, edge_port);
ret = edge_create_sysfs_attrs(port);
if (ret)
goto err;
/*
* The LSR does not tell when the transmitter shift register has
* emptied so add a one-character drain delay.
*/
port->port.drain_delay = 1;
return 0;
err:
kfree(edge_port);
return ret;
}
static void edge_port_remove(struct usb_serial_port *port)
{
struct edgeport_port *edge_port;
edge_port = usb_get_serial_port_data(port);
edge_remove_sysfs_attrs(port);
kfree(edge_port);
}
/* Sysfs Attributes */
static ssize_t uart_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
return sprintf(buf, "%d\n", edge_port->bUartMode);
}
static ssize_t uart_mode_store(struct device *dev,
struct device_attribute *attr, const char *valbuf, size_t count)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned int v = simple_strtoul(valbuf, NULL, 0);
dev_dbg(dev, "%s: setting uart_mode = %d\n", __func__, v);
if (v < 256)
edge_port->bUartMode = v;
else
dev_err(dev, "%s - uart_mode %d is invalid\n", __func__, v);
return count;
}
static DEVICE_ATTR_RW(uart_mode);
static int edge_create_sysfs_attrs(struct usb_serial_port *port)
{
return device_create_file(&port->dev, &dev_attr_uart_mode);
}
static int edge_remove_sysfs_attrs(struct usb_serial_port *port)
{
device_remove_file(&port->dev, &dev_attr_uart_mode);
return 0;
}
#ifdef CONFIG_PM
static int edge_suspend(struct usb_serial *serial, pm_message_t message)
{
struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
cancel_delayed_work_sync(&edge_serial->heartbeat_work);
return 0;
}
static int edge_resume(struct usb_serial *serial)
{
struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
edge_heartbeat_schedule(edge_serial);
return 0;
}
#endif
static struct usb_serial_driver edgeport_1port_device = {
.driver = {
.owner = THIS_MODULE,
.name = "edgeport_ti_1",
},
.description = "Edgeport TI 1 port adapter",
.id_table = edgeport_1port_id_table,
.num_ports = 1,
.num_bulk_out = 1,
.open = edge_open,
.close = edge_close,
.throttle = edge_throttle,
.unthrottle = edge_unthrottle,
.attach = edge_startup,
.calc_num_ports = edge_calc_num_ports,
.disconnect = edge_disconnect,
.release = edge_release,
.port_probe = edge_port_probe,
.port_remove = edge_port_remove,
.set_termios = edge_set_termios,
.tiocmget = edge_tiocmget,
.tiocmset = edge_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
.write = edge_write,
.write_room = edge_write_room,
.chars_in_buffer = edge_chars_in_buffer,
.tx_empty = edge_tx_empty,
.break_ctl = edge_break,
.read_int_callback = edge_interrupt_callback,
.read_bulk_callback = edge_bulk_in_callback,
.write_bulk_callback = edge_bulk_out_callback,
#ifdef CONFIG_PM
.suspend = edge_suspend,
.resume = edge_resume,
#endif
};
static struct usb_serial_driver edgeport_2port_device = {
.driver = {
.owner = THIS_MODULE,
.name = "edgeport_ti_2",
},
.description = "Edgeport TI 2 port adapter",
.id_table = edgeport_2port_id_table,
.num_ports = 2,
.num_bulk_out = 1,
.open = edge_open,
.close = edge_close,
.throttle = edge_throttle,
.unthrottle = edge_unthrottle,
.attach = edge_startup,
.calc_num_ports = edge_calc_num_ports,
.disconnect = edge_disconnect,
.release = edge_release,
.port_probe = edge_port_probe,
.port_remove = edge_port_remove,
.set_termios = edge_set_termios,
.tiocmget = edge_tiocmget,
.tiocmset = edge_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
.write = edge_write,
.write_room = edge_write_room,
.chars_in_buffer = edge_chars_in_buffer,
.tx_empty = edge_tx_empty,
.break_ctl = edge_break,
.read_int_callback = edge_interrupt_callback,
.read_bulk_callback = edge_bulk_in_callback,
.write_bulk_callback = edge_bulk_out_callback,
#ifdef CONFIG_PM
.suspend = edge_suspend,
.resume = edge_resume,
#endif
};
static struct usb_serial_driver * const serial_drivers[] = {
&edgeport_1port_device, &edgeport_2port_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table_combined);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("edgeport/down3.bin");
module_param(ignore_cpu_rev, bool, 0644);
MODULE_PARM_DESC(ignore_cpu_rev,
"Ignore the cpu revision when connecting to a device");
module_param(default_uart_mode, int, 0644);
MODULE_PARM_DESC(default_uart_mode, "Default uart_mode, 0=RS232, ...");
| linux-master | drivers/usb/serial/io_ti.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* USB Compaq iPAQ driver
*
* Copyright (C) 2001 - 2002
* Ganesh Varadarajan <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#define KP_RETRIES 100
#define DRIVER_AUTHOR "Ganesh Varadarajan <[email protected]>"
#define DRIVER_DESC "USB PocketPC PDA driver"
static int connect_retries = KP_RETRIES;
static int initial_wait;
/* Function prototypes for an ipaq */
static int ipaq_open(struct tty_struct *tty,
struct usb_serial_port *port);
static int ipaq_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds);
static int ipaq_startup(struct usb_serial *serial);
static const struct usb_device_id ipaq_id_table[] = {
{ USB_DEVICE(0x0104, 0x00BE) }, /* Socket USB Sync */
{ USB_DEVICE(0x03F0, 0x1016) }, /* HP USB Sync */
{ USB_DEVICE(0x03F0, 0x1116) }, /* HP USB Sync 1611 */
{ USB_DEVICE(0x03F0, 0x1216) }, /* HP USB Sync 1612 */
{ USB_DEVICE(0x03F0, 0x2016) }, /* HP USB Sync 1620 */
{ USB_DEVICE(0x03F0, 0x2116) }, /* HP USB Sync 1621 */
{ USB_DEVICE(0x03F0, 0x2216) }, /* HP USB Sync 1622 */
{ USB_DEVICE(0x03F0, 0x3016) }, /* HP USB Sync 1630 */
{ USB_DEVICE(0x03F0, 0x3116) }, /* HP USB Sync 1631 */
{ USB_DEVICE(0x03F0, 0x3216) }, /* HP USB Sync 1632 */
{ USB_DEVICE(0x03F0, 0x4016) }, /* HP USB Sync 1640 */
{ USB_DEVICE(0x03F0, 0x4116) }, /* HP USB Sync 1641 */
{ USB_DEVICE(0x03F0, 0x4216) }, /* HP USB Sync 1642 */
{ USB_DEVICE(0x03F0, 0x5016) }, /* HP USB Sync 1650 */
{ USB_DEVICE(0x03F0, 0x5116) }, /* HP USB Sync 1651 */
{ USB_DEVICE(0x03F0, 0x5216) }, /* HP USB Sync 1652 */
{ USB_DEVICE(0x0409, 0x00D5) }, /* NEC USB Sync */
{ USB_DEVICE(0x0409, 0x00D6) }, /* NEC USB Sync */
{ USB_DEVICE(0x0409, 0x00D7) }, /* NEC USB Sync */
{ USB_DEVICE(0x0409, 0x8024) }, /* NEC USB Sync */
{ USB_DEVICE(0x0409, 0x8025) }, /* NEC USB Sync */
{ USB_DEVICE(0x043E, 0x9C01) }, /* LGE USB Sync */
{ USB_DEVICE(0x045E, 0x00CE) }, /* Microsoft USB Sync */
{ USB_DEVICE(0x045E, 0x0400) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0401) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0402) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0403) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0404) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0405) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0406) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0407) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0408) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0409) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x040A) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x040B) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x040C) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x040D) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x040E) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x040F) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0410) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0411) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0412) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0413) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0414) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0415) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0416) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0417) }, /* Windows Powered Pocket PC 2002 */
{ USB_DEVICE(0x045E, 0x0432) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0433) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0434) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0435) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0436) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0437) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0438) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0439) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x043A) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x043B) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x043C) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x043D) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x043E) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x043F) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0440) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0441) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0442) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0443) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0444) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0445) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0446) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0447) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0448) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0449) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x044A) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x044B) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x044C) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x044D) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x044E) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x044F) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0450) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0451) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0452) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0453) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0454) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0455) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0456) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0457) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0458) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0459) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x045A) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x045B) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x045C) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x045D) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x045E) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x045F) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0460) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0461) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0462) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0463) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0464) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0465) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0466) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0467) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0468) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0469) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x046A) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x046B) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x046C) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x046D) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x046E) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x046F) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0470) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0471) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0472) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0473) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0474) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0475) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0476) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0477) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0478) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x0479) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x047A) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x047B) }, /* Windows Powered Pocket PC 2003 */
{ USB_DEVICE(0x045E, 0x04C8) }, /* Windows Powered Smartphone 2002 */
{ USB_DEVICE(0x045E, 0x04C9) }, /* Windows Powered Smartphone 2002 */
{ USB_DEVICE(0x045E, 0x04CA) }, /* Windows Powered Smartphone 2002 */
{ USB_DEVICE(0x045E, 0x04CB) }, /* Windows Powered Smartphone 2002 */
{ USB_DEVICE(0x045E, 0x04CC) }, /* Windows Powered Smartphone 2002 */
{ USB_DEVICE(0x045E, 0x04CD) }, /* Windows Powered Smartphone 2002 */
{ USB_DEVICE(0x045E, 0x04CE) }, /* Windows Powered Smartphone 2002 */
{ USB_DEVICE(0x045E, 0x04D7) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04D8) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04D9) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04DA) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04DB) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04DC) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04DD) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04DE) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04DF) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04E0) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04E1) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04E2) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04E3) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04E4) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04E5) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04E6) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04E7) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04E8) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04E9) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x045E, 0x04EA) }, /* Windows Powered Smartphone 2003 */
{ USB_DEVICE(0x049F, 0x0003) }, /* Compaq iPAQ USB Sync */
{ USB_DEVICE(0x049F, 0x0032) }, /* Compaq iPAQ USB Sync */
{ USB_DEVICE(0x04A4, 0x0014) }, /* Hitachi USB Sync */
{ USB_DEVICE(0x04AD, 0x0301) }, /* USB Sync 0301 */
{ USB_DEVICE(0x04AD, 0x0302) }, /* USB Sync 0302 */
{ USB_DEVICE(0x04AD, 0x0303) }, /* USB Sync 0303 */
{ USB_DEVICE(0x04AD, 0x0306) }, /* GPS Pocket PC USB Sync */
{ USB_DEVICE(0x04B7, 0x0531) }, /* MyGuide 7000 XL USB Sync */
{ USB_DEVICE(0x04C5, 0x1058) }, /* FUJITSU USB Sync */
{ USB_DEVICE(0x04C5, 0x1079) }, /* FUJITSU USB Sync */
{ USB_DEVICE(0x04DA, 0x2500) }, /* Panasonic USB Sync */
{ USB_DEVICE(0x04DD, 0x9102) }, /* SHARP WS003SH USB Modem */
{ USB_DEVICE(0x04DD, 0x9121) }, /* SHARP WS004SH USB Modem */
{ USB_DEVICE(0x04DD, 0x9123) }, /* SHARP WS007SH USB Modem */
{ USB_DEVICE(0x04DD, 0x9151) }, /* SHARP S01SH USB Modem */
{ USB_DEVICE(0x04DD, 0x91AC) }, /* SHARP WS011SH USB Modem */
{ USB_DEVICE(0x04E8, 0x5F00) }, /* Samsung NEXiO USB Sync */
{ USB_DEVICE(0x04E8, 0x5F01) }, /* Samsung NEXiO USB Sync */
{ USB_DEVICE(0x04E8, 0x5F02) }, /* Samsung NEXiO USB Sync */
{ USB_DEVICE(0x04E8, 0x5F03) }, /* Samsung NEXiO USB Sync */
{ USB_DEVICE(0x04E8, 0x5F04) }, /* Samsung NEXiO USB Sync */
{ USB_DEVICE(0x04E8, 0x6611) }, /* Samsung MITs USB Sync */
{ USB_DEVICE(0x04E8, 0x6613) }, /* Samsung MITs USB Sync */
{ USB_DEVICE(0x04E8, 0x6615) }, /* Samsung MITs USB Sync */
{ USB_DEVICE(0x04E8, 0x6617) }, /* Samsung MITs USB Sync */
{ USB_DEVICE(0x04E8, 0x6619) }, /* Samsung MITs USB Sync */
{ USB_DEVICE(0x04E8, 0x661B) }, /* Samsung MITs USB Sync */
{ USB_DEVICE(0x04E8, 0x662E) }, /* Samsung MITs USB Sync */
{ USB_DEVICE(0x04E8, 0x6630) }, /* Samsung MITs USB Sync */
{ USB_DEVICE(0x04E8, 0x6632) }, /* Samsung MITs USB Sync */
{ USB_DEVICE(0x04f1, 0x3011) }, /* JVC USB Sync */
{ USB_DEVICE(0x04F1, 0x3012) }, /* JVC USB Sync */
{ USB_DEVICE(0x0502, 0x1631) }, /* c10 Series */
{ USB_DEVICE(0x0502, 0x1632) }, /* c20 Series */
{ USB_DEVICE(0x0502, 0x16E1) }, /* Acer n10 Handheld USB Sync */
{ USB_DEVICE(0x0502, 0x16E2) }, /* Acer n20 Handheld USB Sync */
{ USB_DEVICE(0x0502, 0x16E3) }, /* Acer n30 Handheld USB Sync */
{ USB_DEVICE(0x0536, 0x01A0) }, /* HHP PDT */
{ USB_DEVICE(0x0543, 0x0ED9) }, /* ViewSonic Color Pocket PC V35 */
{ USB_DEVICE(0x0543, 0x1527) }, /* ViewSonic Color Pocket PC V36 */
{ USB_DEVICE(0x0543, 0x1529) }, /* ViewSonic Color Pocket PC V37 */
{ USB_DEVICE(0x0543, 0x152B) }, /* ViewSonic Color Pocket PC V38 */
{ USB_DEVICE(0x0543, 0x152E) }, /* ViewSonic Pocket PC */
{ USB_DEVICE(0x0543, 0x1921) }, /* ViewSonic Communicator Pocket PC */
{ USB_DEVICE(0x0543, 0x1922) }, /* ViewSonic Smartphone */
{ USB_DEVICE(0x0543, 0x1923) }, /* ViewSonic Pocket PC V30 */
{ USB_DEVICE(0x05E0, 0x2000) }, /* Symbol USB Sync */
{ USB_DEVICE(0x05E0, 0x2001) }, /* Symbol USB Sync 0x2001 */
{ USB_DEVICE(0x05E0, 0x2002) }, /* Symbol USB Sync 0x2002 */
{ USB_DEVICE(0x05E0, 0x2003) }, /* Symbol USB Sync 0x2003 */
{ USB_DEVICE(0x05E0, 0x2004) }, /* Symbol USB Sync 0x2004 */
{ USB_DEVICE(0x05E0, 0x2005) }, /* Symbol USB Sync 0x2005 */
{ USB_DEVICE(0x05E0, 0x2006) }, /* Symbol USB Sync 0x2006 */
{ USB_DEVICE(0x05E0, 0x2007) }, /* Symbol USB Sync 0x2007 */
{ USB_DEVICE(0x05E0, 0x2008) }, /* Symbol USB Sync 0x2008 */
{ USB_DEVICE(0x05E0, 0x2009) }, /* Symbol USB Sync 0x2009 */
{ USB_DEVICE(0x05E0, 0x200A) }, /* Symbol USB Sync 0x200A */
{ USB_DEVICE(0x067E, 0x1001) }, /* Intermec Mobile Computer */
{ USB_DEVICE(0x07CF, 0x2001) }, /* CASIO USB Sync 2001 */
{ USB_DEVICE(0x07CF, 0x2002) }, /* CASIO USB Sync 2002 */
{ USB_DEVICE(0x07CF, 0x2003) }, /* CASIO USB Sync 2003 */
{ USB_DEVICE(0x0930, 0x0700) }, /* TOSHIBA USB Sync 0700 */
{ USB_DEVICE(0x0930, 0x0705) }, /* TOSHIBA Pocket PC e310 */
{ USB_DEVICE(0x0930, 0x0706) }, /* TOSHIBA Pocket PC e740 */
{ USB_DEVICE(0x0930, 0x0707) }, /* TOSHIBA Pocket PC e330 Series */
{ USB_DEVICE(0x0930, 0x0708) }, /* TOSHIBA Pocket PC e350 Series */
{ USB_DEVICE(0x0930, 0x0709) }, /* TOSHIBA Pocket PC e750 Series */
{ USB_DEVICE(0x0930, 0x070A) }, /* TOSHIBA Pocket PC e400 Series */
{ USB_DEVICE(0x0930, 0x070B) }, /* TOSHIBA Pocket PC e800 Series */
{ USB_DEVICE(0x094B, 0x0001) }, /* Linkup Systems USB Sync */
{ USB_DEVICE(0x0960, 0x0065) }, /* BCOM USB Sync 0065 */
{ USB_DEVICE(0x0960, 0x0066) }, /* BCOM USB Sync 0066 */
{ USB_DEVICE(0x0960, 0x0067) }, /* BCOM USB Sync 0067 */
{ USB_DEVICE(0x0961, 0x0010) }, /* Portatec USB Sync */
{ USB_DEVICE(0x099E, 0x0052) }, /* Trimble GeoExplorer */
{ USB_DEVICE(0x099E, 0x4000) }, /* TDS Data Collector */
{ USB_DEVICE(0x0B05, 0x4200) }, /* ASUS USB Sync */
{ USB_DEVICE(0x0B05, 0x4201) }, /* ASUS USB Sync */
{ USB_DEVICE(0x0B05, 0x4202) }, /* ASUS USB Sync */
{ USB_DEVICE(0x0B05, 0x420F) }, /* ASUS USB Sync */
{ USB_DEVICE(0x0B05, 0x9200) }, /* ASUS USB Sync */
{ USB_DEVICE(0x0B05, 0x9202) }, /* ASUS USB Sync */
{ USB_DEVICE(0x0BB4, 0x00CE) }, /* HTC USB Sync */
{ USB_DEVICE(0x0BB4, 0x00CF) }, /* HTC USB Modem */
{ USB_DEVICE(0x0BB4, 0x0A01) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A02) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A03) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A04) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A05) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A06) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A07) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A08) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A09) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A0A) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A0B) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A0C) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A0D) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A0E) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A0F) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A10) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A11) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A12) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A13) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A14) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A15) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A16) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A17) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A18) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A19) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A1A) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A1B) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A1C) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A1D) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A1E) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A1F) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A20) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A21) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A22) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A23) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A24) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A25) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A26) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A27) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A28) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A29) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A2A) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A2B) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A2C) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A2D) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A2E) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A2F) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A30) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A31) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A32) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A33) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A34) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A35) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A36) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A37) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A38) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A39) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A3A) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A3B) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A3C) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A3D) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A3E) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A3F) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A40) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A41) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A42) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A43) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A44) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A45) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A46) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A47) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A48) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A49) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A4A) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A4B) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A4C) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A4D) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A4E) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A4F) }, /* PocketPC USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A50) }, /* HTC SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A51) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A52) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A53) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A54) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A55) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A56) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A57) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A58) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A59) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A5A) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A5B) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A5C) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A5D) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A5E) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A5F) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A60) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A61) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A62) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A63) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A64) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A65) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A66) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A67) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A68) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A69) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A6A) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A6B) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A6C) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A6D) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A6E) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A6F) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A70) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A71) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A72) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A73) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A74) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A75) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A76) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A77) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A78) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A79) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A7A) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A7B) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A7C) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A7D) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A7E) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A7F) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A80) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A81) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A82) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A83) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A84) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A85) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A86) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A87) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A88) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A89) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A8A) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A8B) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A8C) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A8D) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A8E) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A8F) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A90) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A91) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A92) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A93) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A94) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A95) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A96) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A97) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A98) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A99) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A9A) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A9B) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A9C) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A9D) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A9E) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0A9F) }, /* SmartPhone USB Sync */
{ USB_DEVICE(0x0BB4, 0x0BCE) }, /* "High Tech Computer Corp" */
{ USB_DEVICE(0x0BF8, 0x1001) }, /* Fujitsu Siemens Computers USB Sync */
{ USB_DEVICE(0x0C44, 0x03A2) }, /* Motorola iDEN Smartphone */
{ USB_DEVICE(0x0C8E, 0x6000) }, /* Cesscom Luxian Series */
{ USB_DEVICE(0x0CAD, 0x9001) }, /* Motorola PowerPad Pocket PC Device */
{ USB_DEVICE(0x0F4E, 0x0200) }, /* Freedom Scientific USB Sync */
{ USB_DEVICE(0x0F98, 0x0201) }, /* Cyberbank USB Sync */
{ USB_DEVICE(0x0FB8, 0x3001) }, /* Wistron USB Sync */
{ USB_DEVICE(0x0FB8, 0x3002) }, /* Wistron USB Sync */
{ USB_DEVICE(0x0FB8, 0x3003) }, /* Wistron USB Sync */
{ USB_DEVICE(0x0FB8, 0x4001) }, /* Wistron USB Sync */
{ USB_DEVICE(0x1066, 0x00CE) }, /* E-TEN USB Sync */
{ USB_DEVICE(0x1066, 0x0300) }, /* E-TEN P3XX Pocket PC */
{ USB_DEVICE(0x1066, 0x0500) }, /* E-TEN P5XX Pocket PC */
{ USB_DEVICE(0x1066, 0x0600) }, /* E-TEN P6XX Pocket PC */
{ USB_DEVICE(0x1066, 0x0700) }, /* E-TEN P7XX Pocket PC */
{ USB_DEVICE(0x1114, 0x0001) }, /* Psion Teklogix Sync 753x */
{ USB_DEVICE(0x1114, 0x0004) }, /* Psion Teklogix Sync netBookPro */
{ USB_DEVICE(0x1114, 0x0006) }, /* Psion Teklogix Sync 7525 */
{ USB_DEVICE(0x1182, 0x1388) }, /* VES USB Sync */
{ USB_DEVICE(0x11D9, 0x1002) }, /* Rugged Pocket PC 2003 */
{ USB_DEVICE(0x11D9, 0x1003) }, /* Rugged Pocket PC 2003 */
{ USB_DEVICE(0x1231, 0xCE01) }, /* USB Sync 03 */
{ USB_DEVICE(0x1231, 0xCE02) }, /* USB Sync 03 */
{ USB_DEVICE(0x1690, 0x0601) }, /* Askey USB Sync */
{ USB_DEVICE(0x22B8, 0x4204) }, /* Motorola MPx200 Smartphone */
{ USB_DEVICE(0x22B8, 0x4214) }, /* Motorola MPc GSM */
{ USB_DEVICE(0x22B8, 0x4224) }, /* Motorola MPx220 Smartphone */
{ USB_DEVICE(0x22B8, 0x4234) }, /* Motorola MPc CDMA */
{ USB_DEVICE(0x22B8, 0x4244) }, /* Motorola MPx100 Smartphone */
{ USB_DEVICE(0x3340, 0x011C) }, /* Mio DigiWalker PPC StrongARM */
{ USB_DEVICE(0x3340, 0x0326) }, /* Mio DigiWalker 338 */
{ USB_DEVICE(0x3340, 0x0426) }, /* Mio DigiWalker 338 */
{ USB_DEVICE(0x3340, 0x043A) }, /* Mio DigiWalker USB Sync */
{ USB_DEVICE(0x3340, 0x051C) }, /* MiTAC USB Sync 528 */
{ USB_DEVICE(0x3340, 0x053A) }, /* Mio DigiWalker SmartPhone USB Sync */
{ USB_DEVICE(0x3340, 0x071C) }, /* MiTAC USB Sync */
{ USB_DEVICE(0x3340, 0x0B1C) }, /* Generic PPC StrongARM */
{ USB_DEVICE(0x3340, 0x0E3A) }, /* Generic PPC USB Sync */
{ USB_DEVICE(0x3340, 0x0F1C) }, /* Itautec USB Sync */
{ USB_DEVICE(0x3340, 0x0F3A) }, /* Generic SmartPhone USB Sync */
{ USB_DEVICE(0x3340, 0x1326) }, /* Itautec USB Sync */
{ USB_DEVICE(0x3340, 0x191C) }, /* YAKUMO USB Sync */
{ USB_DEVICE(0x3340, 0x2326) }, /* Vobis USB Sync */
{ USB_DEVICE(0x3340, 0x3326) }, /* MEDION Winodws Moble USB Sync */
{ USB_DEVICE(0x3708, 0x20CE) }, /* Legend USB Sync */
{ USB_DEVICE(0x3708, 0x21CE) }, /* Lenovo USB Sync */
{ USB_DEVICE(0x4113, 0x0210) }, /* Mobile Media Technology USB Sync */
{ USB_DEVICE(0x4113, 0x0211) }, /* Mobile Media Technology USB Sync */
{ USB_DEVICE(0x4113, 0x0400) }, /* Mobile Media Technology USB Sync */
{ USB_DEVICE(0x4113, 0x0410) }, /* Mobile Media Technology USB Sync */
{ USB_DEVICE(0x413C, 0x4001) }, /* Dell Axim USB Sync */
{ USB_DEVICE(0x413C, 0x4002) }, /* Dell Axim USB Sync */
{ USB_DEVICE(0x413C, 0x4003) }, /* Dell Axim USB Sync */
{ USB_DEVICE(0x413C, 0x4004) }, /* Dell Axim USB Sync */
{ USB_DEVICE(0x413C, 0x4005) }, /* Dell Axim USB Sync */
{ USB_DEVICE(0x413C, 0x4006) }, /* Dell Axim USB Sync */
{ USB_DEVICE(0x413C, 0x4007) }, /* Dell Axim USB Sync */
{ USB_DEVICE(0x413C, 0x4008) }, /* Dell Axim USB Sync */
{ USB_DEVICE(0x413C, 0x4009) }, /* Dell Axim USB Sync */
{ USB_DEVICE(0x4505, 0x0010) }, /* Smartphone */
{ USB_DEVICE(0x5E04, 0xCE00) }, /* SAGEM Wireless Assistant */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, ipaq_id_table);
/* All of the device info needed for the Compaq iPAQ */
static struct usb_serial_driver ipaq_device = {
.driver = {
.owner = THIS_MODULE,
.name = "ipaq",
},
.description = "PocketPC PDA",
.id_table = ipaq_id_table,
.bulk_in_size = 256,
.bulk_out_size = 256,
.open = ipaq_open,
.attach = ipaq_startup,
.calc_num_ports = ipaq_calc_num_ports,
};
static struct usb_serial_driver * const serial_drivers[] = {
&ipaq_device, NULL
};
static int ipaq_open(struct tty_struct *tty,
struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
int result = 0;
int retries = connect_retries;
msleep(1000*initial_wait);
/*
* Send out control message observed in win98 sniffs. Not sure what
* it does, but from empirical observations, it seems that the device
* will start the chat sequence once one of these messages gets
* through. Since this has a reasonably high failure rate, we retry
* several times.
*/
while (retries) {
retries--;
result = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21,
0x1, 0, NULL, 0, 100);
if (!result)
break;
msleep(1000);
}
if (!retries && result) {
dev_err(&port->dev, "%s - failed doing control urb, error %d\n",
__func__, result);
return result;
}
return usb_serial_generic_open(tty, port);
}
static int ipaq_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds)
{
/*
* Some of the devices in ipaq_id_table[] are composite, and we
* shouldn't bind to all the interfaces. This test will rule out
* some obviously invalid possibilities.
*/
if (epds->num_bulk_in == 0 || epds->num_bulk_out == 0)
return -ENODEV;
/*
* A few devices have four endpoints, seemingly Yakuma devices, and
* we need the second pair.
*/
if (epds->num_bulk_in > 1 && epds->num_bulk_out > 1) {
epds->bulk_in[0] = epds->bulk_in[1];
epds->bulk_out[0] = epds->bulk_out[1];
}
/*
* Other devices have 3 endpoints, but we only use the first bulk in
* and out endpoints.
*/
epds->num_bulk_in = 1;
epds->num_bulk_out = 1;
return 1;
}
static int ipaq_startup(struct usb_serial *serial)
{
if (serial->dev->actconfig->desc.bConfigurationValue != 1) {
/*
* FIXME: HP iPaq rx3715, possibly others, have 1 config that
* is labeled as 2
*/
dev_err(&serial->dev->dev, "active config #%d != 1 ??\n",
serial->dev->actconfig->desc.bConfigurationValue);
return -ENODEV;
}
return usb_reset_configuration(serial->dev);
}
module_usb_serial_driver(serial_drivers, ipaq_id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(connect_retries, int, 0644);
MODULE_PARM_DESC(connect_retries,
"Maximum number of connect retries (one second each)");
module_param(initial_wait, int, 0644);
MODULE_PARM_DESC(initial_wait,
"Time to wait before attempting a connection (in seconds)");
| linux-master | drivers/usb/serial/ipaq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas Electronics uPD78F0730 USB to serial converter driver
*
* Copyright (C) 2014,2016 Maksim Salau <[email protected]>
*
* Protocol of the adaptor is described in the application note U19660EJ1V0AN00
* μPD78F0730 8-bit Single-Chip Microcontroller
* USB-to-Serial Conversion Software
* <https://www.renesas.com/en-eu/doc/DocumentServer/026/U19660EJ1V0AN00.pdf>
*
* The adaptor functionality is limited to the following:
* - data bits: 7 or 8
* - stop bits: 1 or 2
* - parity: even, odd or none
* - flow control: none
* - baud rates: 0, 2400, 4800, 9600, 19200, 38400, 57600, 115200, 153600
* - signals: DTR, RTS and BREAK
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#define DRIVER_DESC "Renesas uPD78F0730 USB to serial converter driver"
#define DRIVER_AUTHOR "Maksim Salau <[email protected]>"
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0409, 0x0063) }, /* V850ESJX3-STICK */
{ USB_DEVICE(0x045B, 0x0212) }, /* YRPBRL78G13, YRPBRL78G14 */
{ USB_DEVICE(0x064B, 0x7825) }, /* Analog Devices EVAL-ADXL362Z-DB */
{}
};
MODULE_DEVICE_TABLE(usb, id_table);
/*
* Each adaptor is associated with a private structure, that holds the current
* state of control signals (DTR, RTS and BREAK).
*/
struct upd78f0730_port_private {
struct mutex lock; /* mutex to protect line_signals */
u8 line_signals;
};
/* Op-codes of control commands */
#define UPD78F0730_CMD_LINE_CONTROL 0x00
#define UPD78F0730_CMD_SET_DTR_RTS 0x01
#define UPD78F0730_CMD_SET_XON_XOFF_CHR 0x02
#define UPD78F0730_CMD_OPEN_CLOSE 0x03
#define UPD78F0730_CMD_SET_ERR_CHR 0x04
/* Data sizes in UPD78F0730_CMD_LINE_CONTROL command */
#define UPD78F0730_DATA_SIZE_7_BITS 0x00
#define UPD78F0730_DATA_SIZE_8_BITS 0x01
#define UPD78F0730_DATA_SIZE_MASK 0x01
/* Stop-bit modes in UPD78F0730_CMD_LINE_CONTROL command */
#define UPD78F0730_STOP_BIT_1_BIT 0x00
#define UPD78F0730_STOP_BIT_2_BIT 0x02
#define UPD78F0730_STOP_BIT_MASK 0x02
/* Parity modes in UPD78F0730_CMD_LINE_CONTROL command */
#define UPD78F0730_PARITY_NONE 0x00
#define UPD78F0730_PARITY_EVEN 0x04
#define UPD78F0730_PARITY_ODD 0x08
#define UPD78F0730_PARITY_MASK 0x0C
/* Flow control modes in UPD78F0730_CMD_LINE_CONTROL command */
#define UPD78F0730_FLOW_CONTROL_NONE 0x00
#define UPD78F0730_FLOW_CONTROL_HW 0x10
#define UPD78F0730_FLOW_CONTROL_SW 0x20
#define UPD78F0730_FLOW_CONTROL_MASK 0x30
/* Control signal bits in UPD78F0730_CMD_SET_DTR_RTS command */
#define UPD78F0730_RTS 0x01
#define UPD78F0730_DTR 0x02
#define UPD78F0730_BREAK 0x04
/* Port modes in UPD78F0730_CMD_OPEN_CLOSE command */
#define UPD78F0730_PORT_CLOSE 0x00
#define UPD78F0730_PORT_OPEN 0x01
/* Error character substitution modes in UPD78F0730_CMD_SET_ERR_CHR command */
#define UPD78F0730_ERR_CHR_DISABLED 0x00
#define UPD78F0730_ERR_CHR_ENABLED 0x01
/*
* Declaration of command structures
*/
/* UPD78F0730_CMD_LINE_CONTROL command */
struct upd78f0730_line_control {
u8 opcode;
__le32 baud_rate;
u8 params;
} __packed;
/* UPD78F0730_CMD_SET_DTR_RTS command */
struct upd78f0730_set_dtr_rts {
u8 opcode;
u8 params;
};
/* UPD78F0730_CMD_SET_XON_OFF_CHR command */
struct upd78f0730_set_xon_xoff_chr {
u8 opcode;
u8 xon;
u8 xoff;
};
/* UPD78F0730_CMD_OPEN_CLOSE command */
struct upd78f0730_open_close {
u8 opcode;
u8 state;
};
/* UPD78F0730_CMD_SET_ERR_CHR command */
struct upd78f0730_set_err_chr {
u8 opcode;
u8 state;
u8 err_char;
};
static int upd78f0730_send_ctl(struct usb_serial_port *port,
const void *data, int size)
{
struct usb_device *usbdev = port->serial->dev;
void *buf;
int res;
if (size <= 0 || !data)
return -EINVAL;
buf = kmemdup(data, size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
res = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x00,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
0x0000, 0x0000, buf, size, USB_CTRL_SET_TIMEOUT);
kfree(buf);
if (res < 0) {
struct device *dev = &port->dev;
dev_err(dev, "failed to send control request %02x: %d\n",
*(u8 *)data, res);
return res;
}
return 0;
}
static int upd78f0730_port_probe(struct usb_serial_port *port)
{
struct upd78f0730_port_private *private;
private = kzalloc(sizeof(*private), GFP_KERNEL);
if (!private)
return -ENOMEM;
mutex_init(&private->lock);
usb_set_serial_port_data(port, private);
return 0;
}
static void upd78f0730_port_remove(struct usb_serial_port *port)
{
struct upd78f0730_port_private *private;
private = usb_get_serial_port_data(port);
mutex_destroy(&private->lock);
kfree(private);
}
static int upd78f0730_tiocmget(struct tty_struct *tty)
{
struct upd78f0730_port_private *private;
struct usb_serial_port *port = tty->driver_data;
int signals;
int res;
private = usb_get_serial_port_data(port);
mutex_lock(&private->lock);
signals = private->line_signals;
mutex_unlock(&private->lock);
res = ((signals & UPD78F0730_DTR) ? TIOCM_DTR : 0) |
((signals & UPD78F0730_RTS) ? TIOCM_RTS : 0);
dev_dbg(&port->dev, "%s - res = %x\n", __func__, res);
return res;
}
static int upd78f0730_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct upd78f0730_port_private *private;
struct upd78f0730_set_dtr_rts request;
struct device *dev = &port->dev;
int res;
private = usb_get_serial_port_data(port);
mutex_lock(&private->lock);
if (set & TIOCM_DTR) {
private->line_signals |= UPD78F0730_DTR;
dev_dbg(dev, "%s - set DTR\n", __func__);
}
if (set & TIOCM_RTS) {
private->line_signals |= UPD78F0730_RTS;
dev_dbg(dev, "%s - set RTS\n", __func__);
}
if (clear & TIOCM_DTR) {
private->line_signals &= ~UPD78F0730_DTR;
dev_dbg(dev, "%s - clear DTR\n", __func__);
}
if (clear & TIOCM_RTS) {
private->line_signals &= ~UPD78F0730_RTS;
dev_dbg(dev, "%s - clear RTS\n", __func__);
}
request.opcode = UPD78F0730_CMD_SET_DTR_RTS;
request.params = private->line_signals;
res = upd78f0730_send_ctl(port, &request, sizeof(request));
mutex_unlock(&private->lock);
return res;
}
static int upd78f0730_break_ctl(struct tty_struct *tty, int break_state)
{
struct upd78f0730_port_private *private;
struct usb_serial_port *port = tty->driver_data;
struct upd78f0730_set_dtr_rts request;
struct device *dev = &port->dev;
int res;
private = usb_get_serial_port_data(port);
mutex_lock(&private->lock);
if (break_state) {
private->line_signals |= UPD78F0730_BREAK;
dev_dbg(dev, "%s - set BREAK\n", __func__);
} else {
private->line_signals &= ~UPD78F0730_BREAK;
dev_dbg(dev, "%s - clear BREAK\n", __func__);
}
request.opcode = UPD78F0730_CMD_SET_DTR_RTS;
request.params = private->line_signals;
res = upd78f0730_send_ctl(port, &request, sizeof(request));
mutex_unlock(&private->lock);
return res;
}
static void upd78f0730_dtr_rts(struct usb_serial_port *port, int on)
{
struct tty_struct *tty = port->port.tty;
unsigned int set = 0;
unsigned int clear = 0;
if (on)
set = TIOCM_DTR | TIOCM_RTS;
else
clear = TIOCM_DTR | TIOCM_RTS;
upd78f0730_tiocmset(tty, set, clear);
}
static speed_t upd78f0730_get_baud_rate(struct tty_struct *tty)
{
const speed_t baud_rate = tty_get_baud_rate(tty);
static const speed_t supported[] = {
0, 2400, 4800, 9600, 19200, 38400, 57600, 115200, 153600
};
int i;
for (i = ARRAY_SIZE(supported) - 1; i >= 0; i--) {
if (baud_rate == supported[i])
return baud_rate;
}
/* If the baud rate is not supported, switch to the default one */
tty_encode_baud_rate(tty, 9600, 9600);
return tty_get_baud_rate(tty);
}
static void upd78f0730_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct device *dev = &port->dev;
struct upd78f0730_line_control request;
speed_t baud_rate;
if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
return;
if (C_BAUD(tty) == B0)
upd78f0730_dtr_rts(port, 0);
else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
upd78f0730_dtr_rts(port, 1);
baud_rate = upd78f0730_get_baud_rate(tty);
request.opcode = UPD78F0730_CMD_LINE_CONTROL;
request.baud_rate = cpu_to_le32(baud_rate);
request.params = 0;
dev_dbg(dev, "%s - baud rate = %d\n", __func__, baud_rate);
switch (C_CSIZE(tty)) {
case CS7:
request.params |= UPD78F0730_DATA_SIZE_7_BITS;
dev_dbg(dev, "%s - 7 data bits\n", __func__);
break;
default:
tty->termios.c_cflag &= ~CSIZE;
tty->termios.c_cflag |= CS8;
dev_warn(dev, "data size is not supported, using 8 bits\n");
fallthrough;
case CS8:
request.params |= UPD78F0730_DATA_SIZE_8_BITS;
dev_dbg(dev, "%s - 8 data bits\n", __func__);
break;
}
if (C_PARENB(tty)) {
if (C_PARODD(tty)) {
request.params |= UPD78F0730_PARITY_ODD;
dev_dbg(dev, "%s - odd parity\n", __func__);
} else {
request.params |= UPD78F0730_PARITY_EVEN;
dev_dbg(dev, "%s - even parity\n", __func__);
}
if (C_CMSPAR(tty)) {
tty->termios.c_cflag &= ~CMSPAR;
dev_warn(dev, "MARK/SPACE parity is not supported\n");
}
} else {
request.params |= UPD78F0730_PARITY_NONE;
dev_dbg(dev, "%s - no parity\n", __func__);
}
if (C_CSTOPB(tty)) {
request.params |= UPD78F0730_STOP_BIT_2_BIT;
dev_dbg(dev, "%s - 2 stop bits\n", __func__);
} else {
request.params |= UPD78F0730_STOP_BIT_1_BIT;
dev_dbg(dev, "%s - 1 stop bit\n", __func__);
}
if (C_CRTSCTS(tty)) {
tty->termios.c_cflag &= ~CRTSCTS;
dev_warn(dev, "RTSCTS flow control is not supported\n");
}
if (I_IXOFF(tty) || I_IXON(tty)) {
tty->termios.c_iflag &= ~(IXOFF | IXON);
dev_warn(dev, "XON/XOFF flow control is not supported\n");
}
request.params |= UPD78F0730_FLOW_CONTROL_NONE;
dev_dbg(dev, "%s - no flow control\n", __func__);
upd78f0730_send_ctl(port, &request, sizeof(request));
}
static int upd78f0730_open(struct tty_struct *tty, struct usb_serial_port *port)
{
static const struct upd78f0730_open_close request = {
.opcode = UPD78F0730_CMD_OPEN_CLOSE,
.state = UPD78F0730_PORT_OPEN
};
int res;
res = upd78f0730_send_ctl(port, &request, sizeof(request));
if (res)
return res;
if (tty)
upd78f0730_set_termios(tty, port, NULL);
return usb_serial_generic_open(tty, port);
}
static void upd78f0730_close(struct usb_serial_port *port)
{
static const struct upd78f0730_open_close request = {
.opcode = UPD78F0730_CMD_OPEN_CLOSE,
.state = UPD78F0730_PORT_CLOSE
};
usb_serial_generic_close(port);
upd78f0730_send_ctl(port, &request, sizeof(request));
}
static struct usb_serial_driver upd78f0730_device = {
.driver = {
.owner = THIS_MODULE,
.name = "upd78f0730",
},
.id_table = id_table,
.num_ports = 1,
.port_probe = upd78f0730_port_probe,
.port_remove = upd78f0730_port_remove,
.open = upd78f0730_open,
.close = upd78f0730_close,
.set_termios = upd78f0730_set_termios,
.tiocmget = upd78f0730_tiocmget,
.tiocmset = upd78f0730_tiocmset,
.dtr_rts = upd78f0730_dtr_rts,
.break_ctl = upd78f0730_break_ctl,
};
static struct usb_serial_driver * const serial_drivers[] = {
&upd78f0730_device,
NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/upd78f0730.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* USB IR Dongle driver
*
* Copyright (C) 2001-2002 Greg Kroah-Hartman ([email protected])
* Copyright (C) 2002 Gary Brubaker ([email protected])
* Copyright (C) 2010 Johan Hovold ([email protected])
*
* This driver allows a USB IrDA device to be used as a "dumb" serial device.
* This can be useful if you do not have access to a full IrDA stack on the
* other side of the connection. If you do have an IrDA stack on both devices,
* please use the usb-irda driver, as it contains the proper error checking and
* other goodness of a full IrDA stack.
*
* Portions of this driver were taken from drivers/net/irda/irda-usb.c, which
* was written by Roman Weissgaerber <[email protected]>, Dag Brattli
* <[email protected]>, and Jean Tourrilhes <[email protected]>
*
* See Documentation/usb/usb-serial.rst for more information on using this
* driver
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/usb/irda.h>
#define DRIVER_AUTHOR "Greg Kroah-Hartman <[email protected]>, Johan Hovold <[email protected]>"
#define DRIVER_DESC "USB IR Dongle driver"
/* if overridden by the user, then use their value for the size of the read and
* write urbs */
static int buffer_size;
/* if overridden by the user, then use the specified number of XBOFs */
static int xbof = -1;
static int ir_startup (struct usb_serial *serial);
static int ir_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static unsigned int ir_write_room(struct tty_struct *tty);
static void ir_write_bulk_callback(struct urb *urb);
static void ir_process_read_urb(struct urb *urb);
static void ir_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios);
/* Not that this lot means you can only have one per system */
static u8 ir_baud;
static u8 ir_xbof;
static u8 ir_add_bof;
static const struct usb_device_id ir_id_table[] = {
{ USB_DEVICE(0x050f, 0x0180) }, /* KC Technology, KC-180 */
{ USB_DEVICE(0x08e9, 0x0100) }, /* XTNDAccess */
{ USB_DEVICE(0x09c4, 0x0011) }, /* ACTiSys ACT-IR2000U */
{ USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, USB_SUBCLASS_IRDA, 0) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, ir_id_table);
static struct usb_serial_driver ir_device = {
.driver = {
.owner = THIS_MODULE,
.name = "ir-usb",
},
.description = "IR Dongle",
.id_table = ir_id_table,
.num_ports = 1,
.num_bulk_in = 1,
.num_bulk_out = 1,
.set_termios = ir_set_termios,
.attach = ir_startup,
.write = ir_write,
.write_room = ir_write_room,
.write_bulk_callback = ir_write_bulk_callback,
.process_read_urb = ir_process_read_urb,
};
static struct usb_serial_driver * const serial_drivers[] = {
&ir_device, NULL
};
static inline void irda_usb_dump_class_desc(struct usb_serial *serial,
struct usb_irda_cs_descriptor *desc)
{
struct device *dev = &serial->dev->dev;
dev_dbg(dev, "bLength=%x\n", desc->bLength);
dev_dbg(dev, "bDescriptorType=%x\n", desc->bDescriptorType);
dev_dbg(dev, "bcdSpecRevision=%x\n", __le16_to_cpu(desc->bcdSpecRevision));
dev_dbg(dev, "bmDataSize=%x\n", desc->bmDataSize);
dev_dbg(dev, "bmWindowSize=%x\n", desc->bmWindowSize);
dev_dbg(dev, "bmMinTurnaroundTime=%d\n", desc->bmMinTurnaroundTime);
dev_dbg(dev, "wBaudRate=%x\n", __le16_to_cpu(desc->wBaudRate));
dev_dbg(dev, "bmAdditionalBOFs=%x\n", desc->bmAdditionalBOFs);
dev_dbg(dev, "bIrdaRateSniff=%x\n", desc->bIrdaRateSniff);
dev_dbg(dev, "bMaxUnicastList=%x\n", desc->bMaxUnicastList);
}
/*------------------------------------------------------------------*/
/*
* Function irda_usb_find_class_desc(dev, ifnum)
*
* Returns instance of IrDA class descriptor, or NULL if not found
*
* The class descriptor is some extra info that IrDA USB devices will
* offer to us, describing their IrDA characteristics. We will use that in
* irda_usb_init_qos()
*
* Based on the same function in drivers/net/irda/irda-usb.c
*/
static struct usb_irda_cs_descriptor *
irda_usb_find_class_desc(struct usb_serial *serial, unsigned int ifnum)
{
struct usb_device *dev = serial->dev;
struct usb_irda_cs_descriptor *desc;
int ret;
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
return NULL;
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
USB_REQ_CS_IRDA_GET_CLASS_DESC,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, ifnum, desc, sizeof(*desc), 1000);
dev_dbg(&serial->dev->dev, "%s - ret=%d\n", __func__, ret);
if (ret < (int)sizeof(*desc)) {
dev_dbg(&serial->dev->dev,
"%s - class descriptor read %s (%d)\n", __func__,
(ret < 0) ? "failed" : "too short", ret);
goto error;
}
if (desc->bDescriptorType != USB_DT_CS_IRDA) {
dev_dbg(&serial->dev->dev, "%s - bad class descriptor type\n",
__func__);
goto error;
}
irda_usb_dump_class_desc(serial, desc);
return desc;
error:
kfree(desc);
return NULL;
}
static u8 ir_xbof_change(u8 xbof)
{
u8 result;
/* reference irda-usb.c */
switch (xbof) {
case 48:
result = 0x10;
break;
case 28:
case 24:
result = 0x20;
break;
default:
case 12:
result = 0x30;
break;
case 5:
case 6:
result = 0x40;
break;
case 3:
result = 0x50;
break;
case 2:
result = 0x60;
break;
case 1:
result = 0x70;
break;
case 0:
result = 0x80;
break;
}
return(result);
}
static int ir_startup(struct usb_serial *serial)
{
struct usb_irda_cs_descriptor *irda_desc;
int rates;
irda_desc = irda_usb_find_class_desc(serial, 0);
if (!irda_desc) {
dev_err(&serial->dev->dev,
"IRDA class descriptor not found, device not bound\n");
return -ENODEV;
}
rates = le16_to_cpu(irda_desc->wBaudRate);
dev_dbg(&serial->dev->dev,
"%s - Baud rates supported:%s%s%s%s%s%s%s%s%s\n",
__func__,
(rates & USB_IRDA_BR_2400) ? " 2400" : "",
(rates & USB_IRDA_BR_9600) ? " 9600" : "",
(rates & USB_IRDA_BR_19200) ? " 19200" : "",
(rates & USB_IRDA_BR_38400) ? " 38400" : "",
(rates & USB_IRDA_BR_57600) ? " 57600" : "",
(rates & USB_IRDA_BR_115200) ? " 115200" : "",
(rates & USB_IRDA_BR_576000) ? " 576000" : "",
(rates & USB_IRDA_BR_1152000) ? " 1152000" : "",
(rates & USB_IRDA_BR_4000000) ? " 4000000" : "");
switch (irda_desc->bmAdditionalBOFs) {
case USB_IRDA_AB_48:
ir_add_bof = 48;
break;
case USB_IRDA_AB_24:
ir_add_bof = 24;
break;
case USB_IRDA_AB_12:
ir_add_bof = 12;
break;
case USB_IRDA_AB_6:
ir_add_bof = 6;
break;
case USB_IRDA_AB_3:
ir_add_bof = 3;
break;
case USB_IRDA_AB_2:
ir_add_bof = 2;
break;
case USB_IRDA_AB_1:
ir_add_bof = 1;
break;
case USB_IRDA_AB_0:
ir_add_bof = 0;
break;
default:
break;
}
kfree(irda_desc);
return 0;
}
static int ir_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
struct urb *urb = NULL;
unsigned long flags;
int ret;
if (port->bulk_out_size == 0)
return -EINVAL;
if (count == 0)
return 0;
count = min(count, port->bulk_out_size - 1);
spin_lock_irqsave(&port->lock, flags);
if (__test_and_clear_bit(0, &port->write_urbs_free)) {
urb = port->write_urbs[0];
port->tx_bytes += count;
}
spin_unlock_irqrestore(&port->lock, flags);
if (!urb)
return 0;
/*
* The first byte of the packet we send to the device contains an
* outbound header which indicates an additional number of BOFs and
* a baud rate change.
*
* See section 5.4.2.2 of the USB IrDA spec.
*/
*(u8 *)urb->transfer_buffer = ir_xbof | ir_baud;
memcpy(urb->transfer_buffer + 1, buf, count);
urb->transfer_buffer_length = count + 1;
urb->transfer_flags = URB_ZERO_PACKET;
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret) {
dev_err(&port->dev, "failed to submit write urb: %d\n", ret);
spin_lock_irqsave(&port->lock, flags);
__set_bit(0, &port->write_urbs_free);
port->tx_bytes -= count;
spin_unlock_irqrestore(&port->lock, flags);
return ret;
}
return count;
}
static void ir_write_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
int status = urb->status;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
__set_bit(0, &port->write_urbs_free);
port->tx_bytes -= urb->transfer_buffer_length - 1;
spin_unlock_irqrestore(&port->lock, flags);
switch (status) {
case 0:
break;
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
dev_dbg(&port->dev, "write urb stopped: %d\n", status);
return;
case -EPIPE:
dev_err(&port->dev, "write urb stopped: %d\n", status);
return;
default:
dev_err(&port->dev, "nonzero write-urb status: %d\n", status);
break;
}
usb_serial_port_softint(port);
}
static unsigned int ir_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
unsigned int count = 0;
if (port->bulk_out_size == 0)
return 0;
if (test_bit(0, &port->write_urbs_free))
count = port->bulk_out_size - 1;
return count;
}
static void ir_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
if (!urb->actual_length)
return;
/*
* The first byte of the packet we get from the device
* contains a busy indicator and baud rate change.
* See section 5.4.1.2 of the USB IrDA spec.
*/
if (*data & 0x0f)
ir_baud = *data & 0x0f;
if (urb->actual_length == 1)
return;
tty_insert_flip_string(&port->port, data + 1, urb->actual_length - 1);
tty_flip_buffer_push(&port->port);
}
static void ir_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct usb_device *udev = port->serial->dev;
unsigned char *transfer_buffer;
int actual_length;
speed_t baud;
int ir_baud;
int ret;
baud = tty_get_baud_rate(tty);
/*
* FIXME, we should compare the baud request against the
* capability stated in the IR header that we got in the
* startup function.
*/
switch (baud) {
case 2400:
ir_baud = USB_IRDA_LS_2400;
break;
case 9600:
ir_baud = USB_IRDA_LS_9600;
break;
case 19200:
ir_baud = USB_IRDA_LS_19200;
break;
case 38400:
ir_baud = USB_IRDA_LS_38400;
break;
case 57600:
ir_baud = USB_IRDA_LS_57600;
break;
case 115200:
ir_baud = USB_IRDA_LS_115200;
break;
case 576000:
ir_baud = USB_IRDA_LS_576000;
break;
case 1152000:
ir_baud = USB_IRDA_LS_1152000;
break;
case 4000000:
ir_baud = USB_IRDA_LS_4000000;
break;
default:
ir_baud = USB_IRDA_LS_9600;
baud = 9600;
}
if (xbof == -1)
ir_xbof = ir_xbof_change(ir_add_bof);
else
ir_xbof = ir_xbof_change(xbof) ;
/* Only speed changes are supported */
tty_termios_copy_hw(&tty->termios, old_termios);
tty_encode_baud_rate(tty, baud, baud);
/*
* send the baud change out on an "empty" data packet
*/
transfer_buffer = kmalloc(1, GFP_KERNEL);
if (!transfer_buffer)
return;
*transfer_buffer = ir_xbof | ir_baud;
ret = usb_bulk_msg(udev,
usb_sndbulkpipe(udev, port->bulk_out_endpointAddress),
transfer_buffer, 1, &actual_length, 5000);
if (ret || actual_length != 1) {
if (!ret)
ret = -EIO;
dev_err(&port->dev, "failed to change line speed: %d\n", ret);
}
kfree(transfer_buffer);
}
static int __init ir_init(void)
{
if (buffer_size) {
ir_device.bulk_in_size = buffer_size;
ir_device.bulk_out_size = buffer_size;
}
return usb_serial_register_drivers(serial_drivers, KBUILD_MODNAME, ir_id_table);
}
static void __exit ir_exit(void)
{
usb_serial_deregister_drivers(serial_drivers);
}
module_init(ir_init);
module_exit(ir_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(xbof, int, 0);
MODULE_PARM_DESC(xbof, "Force specific number of XBOFs");
module_param(buffer_size, int, 0);
MODULE_PARM_DESC(buffer_size, "Size of the transfer buffers");
| linux-master | drivers/usb/serial/ir-usb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* mos7720.c
* Controls the Moschip 7720 usb to dual port serial converter
*
* Copyright 2006 Moschip Semiconductor Tech. Ltd.
*
* Developed by:
* Vijaya Kumar <[email protected]>
* Ajay Kumar <[email protected]>
* Gurudeva <[email protected]>
*
* Cleaned up from the original by:
* Greg Kroah-Hartman <[email protected]>
*
* Originally based on drivers/usb/serial/io_edgeport.c which is:
* Copyright (C) 2000 Inside Out Networks, All rights reserved.
* Copyright (C) 2001-2002 Greg Kroah-Hartman <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
#include <linux/parport.h>
#define DRIVER_AUTHOR "Aspire Communications pvt Ltd."
#define DRIVER_DESC "Moschip USB Serial Driver"
/* default urb timeout */
#define MOS_WDR_TIMEOUT 5000
#define MOS_MAX_PORT 0x02
#define MOS_WRITE 0x0E
#define MOS_READ 0x0D
/* Interrupt Routines Defines */
#define SERIAL_IIR_RLS 0x06
#define SERIAL_IIR_RDA 0x04
#define SERIAL_IIR_CTI 0x0c
#define SERIAL_IIR_THR 0x02
#define SERIAL_IIR_MS 0x00
#define NUM_URBS 16 /* URB Count */
#define URB_TRANSFER_BUFFER_SIZE 32 /* URB Size */
/* This structure holds all of the local serial port information */
struct moschip_port {
__u8 shadowLCR; /* last LCR value received */
__u8 shadowMCR; /* last MCR value received */
__u8 shadowMSR; /* last MSR value received */
char open;
struct usb_serial_port *port; /* loop back to the owner */
struct urb *write_urb_pool[NUM_URBS];
};
#define USB_VENDOR_ID_MOSCHIP 0x9710
#define MOSCHIP_DEVICE_ID_7720 0x7720
#define MOSCHIP_DEVICE_ID_7715 0x7715
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7720) },
{ USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7715) },
{ } /* terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
/* initial values for parport regs */
#define DCR_INIT_VAL 0x0c /* SLCTIN, nINIT */
#define ECR_INIT_VAL 0x00 /* SPP mode */
enum mos7715_pp_modes {
SPP = 0<<5,
PS2 = 1<<5, /* moschip calls this 'NIBBLE' mode */
PPF = 2<<5, /* moschip calls this 'CB-FIFO mode */
};
struct mos7715_parport {
struct parport *pp; /* back to containing struct */
struct kref ref_count; /* to instance of this struct */
bool msg_pending; /* usb sync call pending */
struct completion syncmsg_compl; /* usb sync call completed */
struct work_struct work; /* restore deferred writes */
struct usb_serial *serial; /* back to containing struct */
__u8 shadowECR; /* parallel port regs... */
__u8 shadowDCR;
atomic_t shadowDSR; /* updated in int-in callback */
};
/* lock guards against dereferencing NULL ptr in parport ops callbacks */
static DEFINE_SPINLOCK(release_lock);
#endif /* CONFIG_USB_SERIAL_MOS7715_PARPORT */
static const unsigned int dummy; /* for clarity in register access fns */
enum mos_regs {
MOS7720_THR, /* serial port regs */
MOS7720_RHR,
MOS7720_IER,
MOS7720_FCR,
MOS7720_ISR,
MOS7720_LCR,
MOS7720_MCR,
MOS7720_LSR,
MOS7720_MSR,
MOS7720_SPR,
MOS7720_DLL,
MOS7720_DLM,
MOS7720_DPR, /* parallel port regs */
MOS7720_DSR,
MOS7720_DCR,
MOS7720_ECR,
MOS7720_SP1_REG, /* device control regs */
MOS7720_SP2_REG, /* serial port 2 (7720 only) */
MOS7720_PP_REG,
MOS7720_SP_CONTROL_REG,
};
/*
* Return the correct value for the Windex field of the setup packet
* for a control endpoint message. See the 7715 datasheet.
*/
static inline __u16 get_reg_index(enum mos_regs reg)
{
static const __u16 mos7715_index_lookup_table[] = {
0x00, /* MOS7720_THR */
0x00, /* MOS7720_RHR */
0x01, /* MOS7720_IER */
0x02, /* MOS7720_FCR */
0x02, /* MOS7720_ISR */
0x03, /* MOS7720_LCR */
0x04, /* MOS7720_MCR */
0x05, /* MOS7720_LSR */
0x06, /* MOS7720_MSR */
0x07, /* MOS7720_SPR */
0x00, /* MOS7720_DLL */
0x01, /* MOS7720_DLM */
0x00, /* MOS7720_DPR */
0x01, /* MOS7720_DSR */
0x02, /* MOS7720_DCR */
0x0a, /* MOS7720_ECR */
0x01, /* MOS7720_SP1_REG */
0x02, /* MOS7720_SP2_REG (7720 only) */
0x04, /* MOS7720_PP_REG (7715 only) */
0x08, /* MOS7720_SP_CONTROL_REG */
};
return mos7715_index_lookup_table[reg];
}
/*
* Return the correct value for the upper byte of the Wvalue field of
* the setup packet for a control endpoint message.
*/
static inline __u16 get_reg_value(enum mos_regs reg,
unsigned int serial_portnum)
{
if (reg >= MOS7720_SP1_REG) /* control reg */
return 0x0000;
else if (reg >= MOS7720_DPR) /* parallel port reg (7715 only) */
return 0x0100;
else /* serial port reg */
return (serial_portnum + 2) << 8;
}
/*
* Write data byte to the specified device register. The data is embedded in
* the value field of the setup packet. serial_portnum is ignored for registers
* not specific to a particular serial port.
*/
static int write_mos_reg(struct usb_serial *serial, unsigned int serial_portnum,
enum mos_regs reg, __u8 data)
{
struct usb_device *usbdev = serial->dev;
unsigned int pipe = usb_sndctrlpipe(usbdev, 0);
__u8 request = (__u8)0x0e;
__u8 requesttype = (__u8)0x40;
__u16 index = get_reg_index(reg);
__u16 value = get_reg_value(reg, serial_portnum) + data;
int status = usb_control_msg(usbdev, pipe, request, requesttype, value,
index, NULL, 0, MOS_WDR_TIMEOUT);
if (status < 0)
dev_err(&usbdev->dev,
"mos7720: usb_control_msg() failed: %d\n", status);
return status;
}
/*
* Read data byte from the specified device register. The data returned by the
* device is embedded in the value field of the setup packet. serial_portnum is
* ignored for registers that are not specific to a particular serial port.
*/
static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum,
enum mos_regs reg, __u8 *data)
{
struct usb_device *usbdev = serial->dev;
unsigned int pipe = usb_rcvctrlpipe(usbdev, 0);
__u8 request = (__u8)0x0d;
__u8 requesttype = (__u8)0xc0;
__u16 index = get_reg_index(reg);
__u16 value = get_reg_value(reg, serial_portnum);
u8 *buf;
int status;
buf = kmalloc(1, GFP_KERNEL);
if (!buf) {
*data = 0;
return -ENOMEM;
}
status = usb_control_msg(usbdev, pipe, request, requesttype, value,
index, buf, 1, MOS_WDR_TIMEOUT);
if (status == 1) {
*data = *buf;
} else {
dev_err(&usbdev->dev,
"mos7720: usb_control_msg() failed: %d\n", status);
if (status >= 0)
status = -EIO;
*data = 0;
}
kfree(buf);
return status;
}
#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
static inline int mos7715_change_mode(struct mos7715_parport *mos_parport,
enum mos7715_pp_modes mode)
{
mos_parport->shadowECR = mode;
write_mos_reg(mos_parport->serial, dummy, MOS7720_ECR,
mos_parport->shadowECR);
return 0;
}
static void destroy_mos_parport(struct kref *kref)
{
struct mos7715_parport *mos_parport =
container_of(kref, struct mos7715_parport, ref_count);
kfree(mos_parport);
}
/*
* This is the common top part of all parallel port callback operations that
* send synchronous messages to the device. This implements convoluted locking
* that avoids two scenarios: (1) a port operation is called after usbserial
* has called our release function, at which point struct mos7715_parport has
* been destroyed, and (2) the device has been disconnected, but usbserial has
* not called the release function yet because someone has a serial port open.
* The shared release_lock prevents the first, and the mutex and disconnected
* flag maintained by usbserial covers the second. We also use the msg_pending
* flag to ensure that all synchronous usb message calls have completed before
* our release function can return.
*/
static int parport_prologue(struct parport *pp)
{
struct mos7715_parport *mos_parport;
spin_lock(&release_lock);
mos_parport = pp->private_data;
if (unlikely(mos_parport == NULL)) {
/* release fn called, port struct destroyed */
spin_unlock(&release_lock);
return -1;
}
mos_parport->msg_pending = true; /* synch usb call pending */
reinit_completion(&mos_parport->syncmsg_compl);
spin_unlock(&release_lock);
/* ensure writes from restore are submitted before new requests */
if (work_pending(&mos_parport->work))
flush_work(&mos_parport->work);
mutex_lock(&mos_parport->serial->disc_mutex);
if (mos_parport->serial->disconnected) {
/* device disconnected */
mutex_unlock(&mos_parport->serial->disc_mutex);
mos_parport->msg_pending = false;
complete(&mos_parport->syncmsg_compl);
return -1;
}
return 0;
}
/*
* This is the common bottom part of all parallel port functions that send
* synchronous messages to the device.
*/
static inline void parport_epilogue(struct parport *pp)
{
struct mos7715_parport *mos_parport = pp->private_data;
mutex_unlock(&mos_parport->serial->disc_mutex);
mos_parport->msg_pending = false;
complete(&mos_parport->syncmsg_compl);
}
static void deferred_restore_writes(struct work_struct *work)
{
struct mos7715_parport *mos_parport;
mos_parport = container_of(work, struct mos7715_parport, work);
mutex_lock(&mos_parport->serial->disc_mutex);
/* if device disconnected, game over */
if (mos_parport->serial->disconnected)
goto done;
write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
mos_parport->shadowDCR);
write_mos_reg(mos_parport->serial, dummy, MOS7720_ECR,
mos_parport->shadowECR);
done:
mutex_unlock(&mos_parport->serial->disc_mutex);
}
static void parport_mos7715_write_data(struct parport *pp, unsigned char d)
{
struct mos7715_parport *mos_parport = pp->private_data;
if (parport_prologue(pp) < 0)
return;
mos7715_change_mode(mos_parport, SPP);
write_mos_reg(mos_parport->serial, dummy, MOS7720_DPR, (__u8)d);
parport_epilogue(pp);
}
static unsigned char parport_mos7715_read_data(struct parport *pp)
{
struct mos7715_parport *mos_parport = pp->private_data;
unsigned char d;
if (parport_prologue(pp) < 0)
return 0;
read_mos_reg(mos_parport->serial, dummy, MOS7720_DPR, &d);
parport_epilogue(pp);
return d;
}
static void parport_mos7715_write_control(struct parport *pp, unsigned char d)
{
struct mos7715_parport *mos_parport = pp->private_data;
__u8 data;
if (parport_prologue(pp) < 0)
return;
data = ((__u8)d & 0x0f) | (mos_parport->shadowDCR & 0xf0);
write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR, data);
mos_parport->shadowDCR = data;
parport_epilogue(pp);
}
static unsigned char parport_mos7715_read_control(struct parport *pp)
{
struct mos7715_parport *mos_parport;
__u8 dcr;
spin_lock(&release_lock);
mos_parport = pp->private_data;
if (unlikely(mos_parport == NULL)) {
spin_unlock(&release_lock);
return 0;
}
dcr = mos_parport->shadowDCR & 0x0f;
spin_unlock(&release_lock);
return dcr;
}
static unsigned char parport_mos7715_frob_control(struct parport *pp,
unsigned char mask,
unsigned char val)
{
struct mos7715_parport *mos_parport = pp->private_data;
__u8 dcr;
mask &= 0x0f;
val &= 0x0f;
if (parport_prologue(pp) < 0)
return 0;
mos_parport->shadowDCR = (mos_parport->shadowDCR & (~mask)) ^ val;
write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
mos_parport->shadowDCR);
dcr = mos_parport->shadowDCR & 0x0f;
parport_epilogue(pp);
return dcr;
}
static unsigned char parport_mos7715_read_status(struct parport *pp)
{
unsigned char status;
struct mos7715_parport *mos_parport;
spin_lock(&release_lock);
mos_parport = pp->private_data;
if (unlikely(mos_parport == NULL)) { /* release called */
spin_unlock(&release_lock);
return 0;
}
status = atomic_read(&mos_parport->shadowDSR) & 0xf8;
spin_unlock(&release_lock);
return status;
}
static void parport_mos7715_enable_irq(struct parport *pp)
{
}
static void parport_mos7715_disable_irq(struct parport *pp)
{
}
static void parport_mos7715_data_forward(struct parport *pp)
{
struct mos7715_parport *mos_parport = pp->private_data;
if (parport_prologue(pp) < 0)
return;
mos7715_change_mode(mos_parport, PS2);
mos_parport->shadowDCR &= ~0x20;
write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
mos_parport->shadowDCR);
parport_epilogue(pp);
}
static void parport_mos7715_data_reverse(struct parport *pp)
{
struct mos7715_parport *mos_parport = pp->private_data;
if (parport_prologue(pp) < 0)
return;
mos7715_change_mode(mos_parport, PS2);
mos_parport->shadowDCR |= 0x20;
write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
mos_parport->shadowDCR);
parport_epilogue(pp);
}
static void parport_mos7715_init_state(struct pardevice *dev,
struct parport_state *s)
{
s->u.pc.ctr = DCR_INIT_VAL;
s->u.pc.ecr = ECR_INIT_VAL;
}
/* N.B. Parport core code requires that this function not block */
static void parport_mos7715_save_state(struct parport *pp,
struct parport_state *s)
{
struct mos7715_parport *mos_parport;
spin_lock(&release_lock);
mos_parport = pp->private_data;
if (unlikely(mos_parport == NULL)) { /* release called */
spin_unlock(&release_lock);
return;
}
s->u.pc.ctr = mos_parport->shadowDCR;
s->u.pc.ecr = mos_parport->shadowECR;
spin_unlock(&release_lock);
}
/* N.B. Parport core code requires that this function not block */
static void parport_mos7715_restore_state(struct parport *pp,
struct parport_state *s)
{
struct mos7715_parport *mos_parport;
spin_lock(&release_lock);
mos_parport = pp->private_data;
if (unlikely(mos_parport == NULL)) { /* release called */
spin_unlock(&release_lock);
return;
}
mos_parport->shadowDCR = s->u.pc.ctr;
mos_parport->shadowECR = s->u.pc.ecr;
schedule_work(&mos_parport->work);
spin_unlock(&release_lock);
}
static size_t parport_mos7715_write_compat(struct parport *pp,
const void *buffer,
size_t len, int flags)
{
int retval;
struct mos7715_parport *mos_parport = pp->private_data;
int actual_len;
if (parport_prologue(pp) < 0)
return 0;
mos7715_change_mode(mos_parport, PPF);
retval = usb_bulk_msg(mos_parport->serial->dev,
usb_sndbulkpipe(mos_parport->serial->dev, 2),
(void *)buffer, len, &actual_len,
MOS_WDR_TIMEOUT);
parport_epilogue(pp);
if (retval) {
dev_err(&mos_parport->serial->dev->dev,
"mos7720: usb_bulk_msg() failed: %d\n", retval);
return 0;
}
return actual_len;
}
static struct parport_operations parport_mos7715_ops = {
.owner = THIS_MODULE,
.write_data = parport_mos7715_write_data,
.read_data = parport_mos7715_read_data,
.write_control = parport_mos7715_write_control,
.read_control = parport_mos7715_read_control,
.frob_control = parport_mos7715_frob_control,
.read_status = parport_mos7715_read_status,
.enable_irq = parport_mos7715_enable_irq,
.disable_irq = parport_mos7715_disable_irq,
.data_forward = parport_mos7715_data_forward,
.data_reverse = parport_mos7715_data_reverse,
.init_state = parport_mos7715_init_state,
.save_state = parport_mos7715_save_state,
.restore_state = parport_mos7715_restore_state,
.compat_write_data = parport_mos7715_write_compat,
.nibble_read_data = parport_ieee1284_read_nibble,
.byte_read_data = parport_ieee1284_read_byte,
};
/*
* Allocate and initialize parallel port control struct, initialize
* the parallel port hardware device, and register with the parport subsystem.
*/
static int mos7715_parport_init(struct usb_serial *serial)
{
struct mos7715_parport *mos_parport;
/* allocate and initialize parallel port control struct */
mos_parport = kzalloc(sizeof(struct mos7715_parport), GFP_KERNEL);
if (!mos_parport)
return -ENOMEM;
mos_parport->msg_pending = false;
kref_init(&mos_parport->ref_count);
usb_set_serial_data(serial, mos_parport); /* hijack private pointer */
mos_parport->serial = serial;
INIT_WORK(&mos_parport->work, deferred_restore_writes);
init_completion(&mos_parport->syncmsg_compl);
/* cycle parallel port reset bit */
write_mos_reg(mos_parport->serial, dummy, MOS7720_PP_REG, (__u8)0x80);
write_mos_reg(mos_parport->serial, dummy, MOS7720_PP_REG, (__u8)0x00);
/* initialize device registers */
mos_parport->shadowDCR = DCR_INIT_VAL;
write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
mos_parport->shadowDCR);
mos_parport->shadowECR = ECR_INIT_VAL;
write_mos_reg(mos_parport->serial, dummy, MOS7720_ECR,
mos_parport->shadowECR);
/* register with parport core */
mos_parport->pp = parport_register_port(0, PARPORT_IRQ_NONE,
PARPORT_DMA_NONE,
&parport_mos7715_ops);
if (mos_parport->pp == NULL) {
dev_err(&serial->interface->dev,
"Could not register parport\n");
kref_put(&mos_parport->ref_count, destroy_mos_parport);
return -EIO;
}
mos_parport->pp->private_data = mos_parport;
mos_parport->pp->modes = PARPORT_MODE_COMPAT | PARPORT_MODE_PCSPP;
mos_parport->pp->dev = &serial->interface->dev;
parport_announce_port(mos_parport->pp);
return 0;
}
#endif /* CONFIG_USB_SERIAL_MOS7715_PARPORT */
/*
* mos7720_interrupt_callback
* this is the callback function for when we have received data on the
* interrupt endpoint.
*/
static void mos7720_interrupt_callback(struct urb *urb)
{
int result;
int length;
int status = urb->status;
struct device *dev = &urb->dev->dev;
__u8 *data;
__u8 sp1;
__u8 sp2;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, status);
return;
default:
dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, status);
goto exit;
}
length = urb->actual_length;
data = urb->transfer_buffer;
/* Moschip get 4 bytes
* Byte 1 IIR Port 1 (port.number is 0)
* Byte 2 IIR Port 2 (port.number is 1)
* Byte 3 --------------
* Byte 4 FIFO status for both */
/* the above description is inverted
* oneukum 2007-03-14 */
if (unlikely(length != 4)) {
dev_dbg(dev, "Wrong data !!!\n");
return;
}
sp1 = data[3];
sp2 = data[2];
if ((sp1 | sp2) & 0x01) {
/* No Interrupt Pending in both the ports */
dev_dbg(dev, "No Interrupt !!!\n");
} else {
switch (sp1 & 0x0f) {
case SERIAL_IIR_RLS:
dev_dbg(dev, "Serial Port 1: Receiver status error or address bit detected in 9-bit mode\n");
break;
case SERIAL_IIR_CTI:
dev_dbg(dev, "Serial Port 1: Receiver time out\n");
break;
case SERIAL_IIR_MS:
/* dev_dbg(dev, "Serial Port 1: Modem status change\n"); */
break;
}
switch (sp2 & 0x0f) {
case SERIAL_IIR_RLS:
dev_dbg(dev, "Serial Port 2: Receiver status error or address bit detected in 9-bit mode\n");
break;
case SERIAL_IIR_CTI:
dev_dbg(dev, "Serial Port 2: Receiver time out\n");
break;
case SERIAL_IIR_MS:
/* dev_dbg(dev, "Serial Port 2: Modem status change\n"); */
break;
}
}
exit:
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
dev_err(dev, "%s - Error %d submitting control urb\n", __func__, result);
}
/*
* mos7715_interrupt_callback
* this is the 7715's callback function for when we have received data on
* the interrupt endpoint.
*/
static void mos7715_interrupt_callback(struct urb *urb)
{
int result;
int length;
int status = urb->status;
struct device *dev = &urb->dev->dev;
__u8 *data;
__u8 iir;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
case -ENODEV:
/* this urb is terminated, clean up */
dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, status);
return;
default:
dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, status);
goto exit;
}
length = urb->actual_length;
data = urb->transfer_buffer;
/* Structure of data from 7715 device:
* Byte 1: IIR serial Port
* Byte 2: unused
* Byte 2: DSR parallel port
* Byte 4: FIFO status for both */
if (unlikely(length != 4)) {
dev_dbg(dev, "Wrong data !!!\n");
return;
}
iir = data[0];
if (!(iir & 0x01)) { /* serial port interrupt pending */
switch (iir & 0x0f) {
case SERIAL_IIR_RLS:
dev_dbg(dev, "Serial Port: Receiver status error or address bit detected in 9-bit mode\n");
break;
case SERIAL_IIR_CTI:
dev_dbg(dev, "Serial Port: Receiver time out\n");
break;
case SERIAL_IIR_MS:
/* dev_dbg(dev, "Serial Port: Modem status change\n"); */
break;
}
}
#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
{ /* update local copy of DSR reg */
struct usb_serial_port *port = urb->context;
struct mos7715_parport *mos_parport = port->serial->private;
if (unlikely(mos_parport == NULL))
return;
atomic_set(&mos_parport->shadowDSR, data[2]);
}
#endif
exit:
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
dev_err(dev, "%s - Error %d submitting control urb\n", __func__, result);
}
/*
* mos7720_bulk_in_callback
* this is the callback function for when we have received data on the
* bulk in endpoint.
*/
static void mos7720_bulk_in_callback(struct urb *urb)
{
int retval;
unsigned char *data ;
struct usb_serial_port *port;
int status = urb->status;
if (status) {
dev_dbg(&urb->dev->dev, "nonzero read bulk status received: %d\n", status);
return;
}
port = urb->context;
dev_dbg(&port->dev, "Entering...%s\n", __func__);
data = urb->transfer_buffer;
if (urb->actual_length) {
tty_insert_flip_string(&port->port, data, urb->actual_length);
tty_flip_buffer_push(&port->port);
}
if (port->read_urb->status != -EINPROGRESS) {
retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (retval)
dev_dbg(&port->dev, "usb_submit_urb(read bulk) failed, retval = %d\n", retval);
}
}
/*
* mos7720_bulk_out_data_callback
* this is the callback function for when we have finished sending serial
* data on the bulk out endpoint.
*/
static void mos7720_bulk_out_data_callback(struct urb *urb)
{
struct moschip_port *mos7720_port;
int status = urb->status;
if (status) {
dev_dbg(&urb->dev->dev, "nonzero write bulk status received:%d\n", status);
return;
}
mos7720_port = urb->context;
if (!mos7720_port) {
dev_dbg(&urb->dev->dev, "NULL mos7720_port pointer\n");
return ;
}
if (mos7720_port->open)
tty_port_tty_wakeup(&mos7720_port->port->port);
}
static int mos77xx_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds)
{
u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
if (product == MOSCHIP_DEVICE_ID_7715) {
/*
* The 7715 uses the first bulk in/out endpoint pair for the
* parallel port, and the second for the serial port. We swap
* the endpoint descriptors here so that the first and
* only registered port structure uses the serial-port
* endpoints.
*/
swap(epds->bulk_in[0], epds->bulk_in[1]);
swap(epds->bulk_out[0], epds->bulk_out[1]);
return 1;
}
return 2;
}
static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_serial *serial;
struct urb *urb;
struct moschip_port *mos7720_port;
int response;
int port_number;
__u8 data;
int allocated_urbs = 0;
int j;
serial = port->serial;
mos7720_port = usb_get_serial_port_data(port);
if (mos7720_port == NULL)
return -ENODEV;
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
/* Initialising the write urb pool */
for (j = 0; j < NUM_URBS; ++j) {
urb = usb_alloc_urb(0, GFP_KERNEL);
mos7720_port->write_urb_pool[j] = urb;
if (!urb)
continue;
urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
GFP_KERNEL);
if (!urb->transfer_buffer) {
usb_free_urb(mos7720_port->write_urb_pool[j]);
mos7720_port->write_urb_pool[j] = NULL;
continue;
}
allocated_urbs++;
}
if (!allocated_urbs)
return -ENOMEM;
/* Initialize MCS7720 -- Write Init values to corresponding Registers
*
* Register Index
* 0 : MOS7720_THR/MOS7720_RHR
* 1 : MOS7720_IER
* 2 : MOS7720_FCR
* 3 : MOS7720_LCR
* 4 : MOS7720_MCR
* 5 : MOS7720_LSR
* 6 : MOS7720_MSR
* 7 : MOS7720_SPR
*
* 0x08 : SP1/2 Control Reg
*/
port_number = port->port_number;
read_mos_reg(serial, port_number, MOS7720_LSR, &data);
dev_dbg(&port->dev, "SS::%p LSR:%x\n", mos7720_port, data);
write_mos_reg(serial, dummy, MOS7720_SP1_REG, 0x02);
write_mos_reg(serial, dummy, MOS7720_SP2_REG, 0x02);
write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
write_mos_reg(serial, port_number, MOS7720_FCR, 0x00);
write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf);
mos7720_port->shadowLCR = 0x03;
write_mos_reg(serial, port_number, MOS7720_LCR,
mos7720_port->shadowLCR);
mos7720_port->shadowMCR = 0x0b;
write_mos_reg(serial, port_number, MOS7720_MCR,
mos7720_port->shadowMCR);
write_mos_reg(serial, port_number, MOS7720_SP_CONTROL_REG, 0x00);
read_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, &data);
data = data | (port->port_number + 1);
write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, data);
mos7720_port->shadowLCR = 0x83;
write_mos_reg(serial, port_number, MOS7720_LCR,
mos7720_port->shadowLCR);
write_mos_reg(serial, port_number, MOS7720_THR, 0x0c);
write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
mos7720_port->shadowLCR = 0x03;
write_mos_reg(serial, port_number, MOS7720_LCR,
mos7720_port->shadowLCR);
write_mos_reg(serial, port_number, MOS7720_IER, 0x0c);
response = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (response)
dev_err(&port->dev, "%s - Error %d submitting read urb\n",
__func__, response);
/* initialize our port settings */
mos7720_port->shadowMCR = UART_MCR_OUT2; /* Must set to enable ints! */
/* send a open port command */
mos7720_port->open = 1;
return 0;
}
/*
* mos7720_chars_in_buffer
* this function is called by the tty driver when it wants to know how many
* bytes of data we currently have outstanding in the port (data that has
* been written, but hasn't made it out the port yet)
*/
static unsigned int mos7720_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct moschip_port *mos7720_port = usb_get_serial_port_data(port);
int i;
unsigned int chars = 0;
for (i = 0; i < NUM_URBS; ++i) {
if (mos7720_port->write_urb_pool[i] &&
mos7720_port->write_urb_pool[i]->status == -EINPROGRESS)
chars += URB_TRANSFER_BUFFER_SIZE;
}
dev_dbg(&port->dev, "%s - returns %u\n", __func__, chars);
return chars;
}
static void mos7720_close(struct usb_serial_port *port)
{
struct usb_serial *serial;
struct moschip_port *mos7720_port;
int j;
serial = port->serial;
mos7720_port = usb_get_serial_port_data(port);
if (mos7720_port == NULL)
return;
for (j = 0; j < NUM_URBS; ++j)
usb_kill_urb(mos7720_port->write_urb_pool[j]);
/* Freeing Write URBs */
for (j = 0; j < NUM_URBS; ++j) {
if (mos7720_port->write_urb_pool[j]) {
kfree(mos7720_port->write_urb_pool[j]->transfer_buffer);
usb_free_urb(mos7720_port->write_urb_pool[j]);
}
}
/* While closing port, shutdown all bulk read, write *
* and interrupt read if they exists, otherwise nop */
usb_kill_urb(port->write_urb);
usb_kill_urb(port->read_urb);
write_mos_reg(serial, port->port_number, MOS7720_MCR, 0x00);
write_mos_reg(serial, port->port_number, MOS7720_IER, 0x00);
mos7720_port->open = 0;
}
static int mos7720_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
unsigned char data;
struct usb_serial *serial;
struct moschip_port *mos7720_port;
serial = port->serial;
mos7720_port = usb_get_serial_port_data(port);
if (mos7720_port == NULL)
return -ENODEV;
if (break_state == -1)
data = mos7720_port->shadowLCR | UART_LCR_SBC;
else
data = mos7720_port->shadowLCR & ~UART_LCR_SBC;
mos7720_port->shadowLCR = data;
return write_mos_reg(serial, port->port_number, MOS7720_LCR,
mos7720_port->shadowLCR);
}
/*
* mos7720_write_room
* this function is called by the tty driver when it wants to know how many
* bytes of data we can accept for a specific port.
*/
static unsigned int mos7720_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct moschip_port *mos7720_port = usb_get_serial_port_data(port);
unsigned int room = 0;
int i;
/* FIXME: Locking */
for (i = 0; i < NUM_URBS; ++i) {
if (mos7720_port->write_urb_pool[i] &&
mos7720_port->write_urb_pool[i]->status != -EINPROGRESS)
room += URB_TRANSFER_BUFFER_SIZE;
}
dev_dbg(&port->dev, "%s - returns %u\n", __func__, room);
return room;
}
static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *data, int count)
{
int status;
int i;
int bytes_sent = 0;
int transfer_size;
struct moschip_port *mos7720_port;
struct usb_serial *serial;
struct urb *urb;
const unsigned char *current_position = data;
serial = port->serial;
mos7720_port = usb_get_serial_port_data(port);
if (mos7720_port == NULL)
return -ENODEV;
/* try to find a free urb in the list */
urb = NULL;
for (i = 0; i < NUM_URBS; ++i) {
if (mos7720_port->write_urb_pool[i] &&
mos7720_port->write_urb_pool[i]->status != -EINPROGRESS) {
urb = mos7720_port->write_urb_pool[i];
dev_dbg(&port->dev, "URB:%d\n", i);
break;
}
}
if (urb == NULL) {
dev_dbg(&port->dev, "%s - no more free urbs\n", __func__);
goto exit;
}
if (urb->transfer_buffer == NULL) {
urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
GFP_ATOMIC);
if (!urb->transfer_buffer) {
bytes_sent = -ENOMEM;
goto exit;
}
}
transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE);
memcpy(urb->transfer_buffer, current_position, transfer_size);
usb_serial_debug_data(&port->dev, __func__, transfer_size,
urb->transfer_buffer);
/* fill urb with data and submit */
usb_fill_bulk_urb(urb, serial->dev,
usb_sndbulkpipe(serial->dev,
port->bulk_out_endpointAddress),
urb->transfer_buffer, transfer_size,
mos7720_bulk_out_data_callback, mos7720_port);
/* send it down the pipe */
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
dev_err_console(port, "%s - usb_submit_urb(write bulk) failed "
"with status = %d\n", __func__, status);
bytes_sent = status;
goto exit;
}
bytes_sent = transfer_size;
exit:
return bytes_sent;
}
static void mos7720_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct moschip_port *mos7720_port;
int status;
mos7720_port = usb_get_serial_port_data(port);
if (mos7720_port == NULL)
return;
if (!mos7720_port->open) {
dev_dbg(&port->dev, "%s - port not opened\n", __func__);
return;
}
/* if we are implementing XON/XOFF, send the stop character */
if (I_IXOFF(tty)) {
unsigned char stop_char = STOP_CHAR(tty);
status = mos7720_write(tty, port, &stop_char, 1);
if (status <= 0)
return;
}
/* if we are implementing RTS/CTS, toggle that line */
if (C_CRTSCTS(tty)) {
mos7720_port->shadowMCR &= ~UART_MCR_RTS;
write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
mos7720_port->shadowMCR);
}
}
static void mos7720_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct moschip_port *mos7720_port = usb_get_serial_port_data(port);
int status;
if (mos7720_port == NULL)
return;
if (!mos7720_port->open) {
dev_dbg(&port->dev, "%s - port not opened\n", __func__);
return;
}
/* if we are implementing XON/XOFF, send the start character */
if (I_IXOFF(tty)) {
unsigned char start_char = START_CHAR(tty);
status = mos7720_write(tty, port, &start_char, 1);
if (status <= 0)
return;
}
/* if we are implementing RTS/CTS, toggle that line */
if (C_CRTSCTS(tty)) {
mos7720_port->shadowMCR |= UART_MCR_RTS;
write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
mos7720_port->shadowMCR);
}
}
/* FIXME: this function does not work */
static int set_higher_rates(struct moschip_port *mos7720_port,
unsigned int baud)
{
struct usb_serial_port *port;
struct usb_serial *serial;
int port_number;
enum mos_regs sp_reg;
if (mos7720_port == NULL)
return -EINVAL;
port = mos7720_port->port;
serial = port->serial;
/***********************************************
* Init Sequence for higher rates
***********************************************/
dev_dbg(&port->dev, "Sending Setting Commands ..........\n");
port_number = port->port_number;
write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
write_mos_reg(serial, port_number, MOS7720_FCR, 0x00);
write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf);
mos7720_port->shadowMCR = 0x0b;
write_mos_reg(serial, port_number, MOS7720_MCR,
mos7720_port->shadowMCR);
write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, 0x00);
/***********************************************
* Set for higher rates *
***********************************************/
/* writing baud rate verbatum into uart clock field clearly not right */
if (port_number == 0)
sp_reg = MOS7720_SP1_REG;
else
sp_reg = MOS7720_SP2_REG;
write_mos_reg(serial, dummy, sp_reg, baud * 0x10);
write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, 0x03);
mos7720_port->shadowMCR = 0x2b;
write_mos_reg(serial, port_number, MOS7720_MCR,
mos7720_port->shadowMCR);
/***********************************************
* Set DLL/DLM
***********************************************/
mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB;
write_mos_reg(serial, port_number, MOS7720_LCR,
mos7720_port->shadowLCR);
write_mos_reg(serial, port_number, MOS7720_DLL, 0x01);
write_mos_reg(serial, port_number, MOS7720_DLM, 0x00);
mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB;
write_mos_reg(serial, port_number, MOS7720_LCR,
mos7720_port->shadowLCR);
return 0;
}
/* baud rate information */
struct divisor_table_entry {
__u32 baudrate;
__u16 divisor;
};
/* Define table of divisors for moschip 7720 hardware *
* These assume a 3.6864MHz crystal, the standard /16, and *
* MCR.7 = 0. */
static const struct divisor_table_entry divisor_table[] = {
{ 50, 2304},
{ 110, 1047}, /* 2094.545455 => 230450 => .0217 % over */
{ 134, 857}, /* 1713.011152 => 230398.5 => .00065% under */
{ 150, 768},
{ 300, 384},
{ 600, 192},
{ 1200, 96},
{ 1800, 64},
{ 2400, 48},
{ 4800, 24},
{ 7200, 16},
{ 9600, 12},
{ 19200, 6},
{ 38400, 3},
{ 57600, 2},
{ 115200, 1},
};
/*****************************************************************************
* calc_baud_rate_divisor
* this function calculates the proper baud rate divisor for the specified
* baud rate.
*****************************************************************************/
static int calc_baud_rate_divisor(struct usb_serial_port *port, int baudrate, int *divisor)
{
int i;
__u16 custom;
__u16 round1;
__u16 round;
dev_dbg(&port->dev, "%s - %d\n", __func__, baudrate);
for (i = 0; i < ARRAY_SIZE(divisor_table); i++) {
if (divisor_table[i].baudrate == baudrate) {
*divisor = divisor_table[i].divisor;
return 0;
}
}
/* After trying for all the standard baud rates *
* Try calculating the divisor for this baud rate */
if (baudrate > 75 && baudrate < 230400) {
/* get the divisor */
custom = (__u16)(230400L / baudrate);
/* Check for round off */
round1 = (__u16)(2304000L / baudrate);
round = (__u16)(round1 - (custom * 10));
if (round > 4)
custom++;
*divisor = custom;
dev_dbg(&port->dev, "Baud %d = %d\n", baudrate, custom);
return 0;
}
dev_dbg(&port->dev, "Baud calculation Failed...\n");
return -EINVAL;
}
/*
* send_cmd_write_baud_rate
* this function sends the proper command to change the baud rate of the
* specified port.
*/
static int send_cmd_write_baud_rate(struct moschip_port *mos7720_port,
int baudrate)
{
struct usb_serial_port *port;
struct usb_serial *serial;
int divisor;
int status;
unsigned char number;
if (mos7720_port == NULL)
return -1;
port = mos7720_port->port;
serial = port->serial;
number = port->port_number;
dev_dbg(&port->dev, "%s - baud = %d\n", __func__, baudrate);
/* Calculate the Divisor */
status = calc_baud_rate_divisor(port, baudrate, &divisor);
if (status) {
dev_err(&port->dev, "%s - bad baud rate\n", __func__);
return status;
}
/* Enable access to divisor latch */
mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB;
write_mos_reg(serial, number, MOS7720_LCR, mos7720_port->shadowLCR);
/* Write the divisor */
write_mos_reg(serial, number, MOS7720_DLL, (__u8)(divisor & 0xff));
write_mos_reg(serial, number, MOS7720_DLM,
(__u8)((divisor & 0xff00) >> 8));
/* Disable access to divisor latch */
mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB;
write_mos_reg(serial, number, MOS7720_LCR, mos7720_port->shadowLCR);
return status;
}
/*
* change_port_settings
* This routine is called to set the UART on the device to match
* the specified new settings.
*/
static void change_port_settings(struct tty_struct *tty,
struct moschip_port *mos7720_port,
const struct ktermios *old_termios)
{
struct usb_serial_port *port;
struct usb_serial *serial;
int baud;
unsigned cflag;
__u8 lData;
__u8 lParity;
__u8 lStop;
int status;
int port_number;
if (mos7720_port == NULL)
return ;
port = mos7720_port->port;
serial = port->serial;
port_number = port->port_number;
if (!mos7720_port->open) {
dev_dbg(&port->dev, "%s - port not opened\n", __func__);
return;
}
lStop = 0x00; /* 1 stop bit */
lParity = 0x00; /* No parity */
cflag = tty->termios.c_cflag;
lData = UART_LCR_WLEN(tty_get_char_size(cflag));
/* Change the Parity bit */
if (cflag & PARENB) {
if (cflag & PARODD) {
lParity = UART_LCR_PARITY;
dev_dbg(&port->dev, "%s - parity = odd\n", __func__);
} else {
lParity = (UART_LCR_EPAR | UART_LCR_PARITY);
dev_dbg(&port->dev, "%s - parity = even\n", __func__);
}
} else {
dev_dbg(&port->dev, "%s - parity = none\n", __func__);
}
if (cflag & CMSPAR)
lParity = lParity | 0x20;
/* Change the Stop bit */
if (cflag & CSTOPB) {
lStop = UART_LCR_STOP;
dev_dbg(&port->dev, "%s - stop bits = 2\n", __func__);
} else {
lStop = 0x00;
dev_dbg(&port->dev, "%s - stop bits = 1\n", __func__);
}
#define LCR_BITS_MASK 0x03 /* Mask for bits/char field */
#define LCR_STOP_MASK 0x04 /* Mask for stop bits field */
#define LCR_PAR_MASK 0x38 /* Mask for parity field */
/* Update the LCR with the correct value */
mos7720_port->shadowLCR &=
~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK);
mos7720_port->shadowLCR |= (lData | lParity | lStop);
/* Disable Interrupts */
write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
write_mos_reg(serial, port_number, MOS7720_FCR, 0x00);
write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf);
/* Send the updated LCR value to the mos7720 */
write_mos_reg(serial, port_number, MOS7720_LCR,
mos7720_port->shadowLCR);
mos7720_port->shadowMCR = 0x0b;
write_mos_reg(serial, port_number, MOS7720_MCR,
mos7720_port->shadowMCR);
/* set up the MCR register and send it to the mos7720 */
mos7720_port->shadowMCR = UART_MCR_OUT2;
if (cflag & CBAUD)
mos7720_port->shadowMCR |= (UART_MCR_DTR | UART_MCR_RTS);
if (cflag & CRTSCTS) {
mos7720_port->shadowMCR |= (UART_MCR_XONANY);
/* To set hardware flow control to the specified *
* serial port, in SP1/2_CONTROL_REG */
if (port_number)
write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG,
0x01);
else
write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG,
0x02);
} else
mos7720_port->shadowMCR &= ~(UART_MCR_XONANY);
write_mos_reg(serial, port_number, MOS7720_MCR,
mos7720_port->shadowMCR);
/* Determine divisor based on baud rate */
baud = tty_get_baud_rate(tty);
if (!baud) {
/* pick a default, any default... */
dev_dbg(&port->dev, "Picked default baud...\n");
baud = 9600;
}
if (baud >= 230400) {
set_higher_rates(mos7720_port, baud);
/* Enable Interrupts */
write_mos_reg(serial, port_number, MOS7720_IER, 0x0c);
return;
}
dev_dbg(&port->dev, "%s - baud rate = %d\n", __func__, baud);
status = send_cmd_write_baud_rate(mos7720_port, baud);
/* FIXME: needs to write actual resulting baud back not just
blindly do so */
if (cflag & CBAUD)
tty_encode_baud_rate(tty, baud, baud);
/* Enable Interrupts */
write_mos_reg(serial, port_number, MOS7720_IER, 0x0c);
if (port->read_urb->status != -EINPROGRESS) {
status = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (status)
dev_dbg(&port->dev, "usb_submit_urb(read bulk) failed, status = %d\n", status);
}
}
/*
* mos7720_set_termios
* this function is called by the tty driver when it wants to change the
* termios structure.
*/
static void mos7720_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
int status;
struct moschip_port *mos7720_port;
mos7720_port = usb_get_serial_port_data(port);
if (mos7720_port == NULL)
return;
if (!mos7720_port->open) {
dev_dbg(&port->dev, "%s - port not opened\n", __func__);
return;
}
/* change the port settings to the new ones specified */
change_port_settings(tty, mos7720_port, old_termios);
if (port->read_urb->status != -EINPROGRESS) {
status = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (status)
dev_dbg(&port->dev, "usb_submit_urb(read bulk) failed, status = %d\n", status);
}
}
/*
* get_lsr_info - get line status register info
*
* Purpose: Let user call ioctl() to get info when the UART physically
* is emptied. On bus types like RS485, the transmitter must
* release the bus after transmitting. This must be done when
* the transmit shift register is empty, not be done when the
* transmit holding register is empty. This functionality
* allows an RS485 driver to be written in user space.
*/
static int get_lsr_info(struct tty_struct *tty,
struct moschip_port *mos7720_port, unsigned int __user *value)
{
struct usb_serial_port *port = tty->driver_data;
unsigned int result = 0;
unsigned char data = 0;
int port_number = port->port_number;
int count;
count = mos7720_chars_in_buffer(tty);
if (count == 0) {
read_mos_reg(port->serial, port_number, MOS7720_LSR, &data);
if ((data & (UART_LSR_TEMT | UART_LSR_THRE))
== (UART_LSR_TEMT | UART_LSR_THRE)) {
dev_dbg(&port->dev, "%s -- Empty\n", __func__);
result = TIOCSER_TEMT;
}
}
if (copy_to_user(value, &result, sizeof(int)))
return -EFAULT;
return 0;
}
static int mos7720_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct moschip_port *mos7720_port = usb_get_serial_port_data(port);
unsigned int result = 0;
unsigned int mcr ;
unsigned int msr ;
mcr = mos7720_port->shadowMCR;
msr = mos7720_port->shadowMSR;
result = ((mcr & UART_MCR_DTR) ? TIOCM_DTR : 0) /* 0x002 */
| ((mcr & UART_MCR_RTS) ? TIOCM_RTS : 0) /* 0x004 */
| ((msr & UART_MSR_CTS) ? TIOCM_CTS : 0) /* 0x020 */
| ((msr & UART_MSR_DCD) ? TIOCM_CAR : 0) /* 0x040 */
| ((msr & UART_MSR_RI) ? TIOCM_RI : 0) /* 0x080 */
| ((msr & UART_MSR_DSR) ? TIOCM_DSR : 0); /* 0x100 */
return result;
}
static int mos7720_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct moschip_port *mos7720_port = usb_get_serial_port_data(port);
unsigned int mcr ;
mcr = mos7720_port->shadowMCR;
if (set & TIOCM_RTS)
mcr |= UART_MCR_RTS;
if (set & TIOCM_DTR)
mcr |= UART_MCR_DTR;
if (set & TIOCM_LOOP)
mcr |= UART_MCR_LOOP;
if (clear & TIOCM_RTS)
mcr &= ~UART_MCR_RTS;
if (clear & TIOCM_DTR)
mcr &= ~UART_MCR_DTR;
if (clear & TIOCM_LOOP)
mcr &= ~UART_MCR_LOOP;
mos7720_port->shadowMCR = mcr;
write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
mos7720_port->shadowMCR);
return 0;
}
static int mos7720_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
struct moschip_port *mos7720_port;
mos7720_port = usb_get_serial_port_data(port);
if (mos7720_port == NULL)
return -ENODEV;
switch (cmd) {
case TIOCSERGETLSR:
dev_dbg(&port->dev, "%s TIOCSERGETLSR\n", __func__);
return get_lsr_info(tty, mos7720_port,
(unsigned int __user *)arg);
}
return -ENOIOCTLCMD;
}
static int mos7720_startup(struct usb_serial *serial)
{
struct usb_device *dev;
char data;
u16 product;
int ret_val;
product = le16_to_cpu(serial->dev->descriptor.idProduct);
dev = serial->dev;
if (product == MOSCHIP_DEVICE_ID_7715) {
struct urb *urb = serial->port[0]->interrupt_in_urb;
urb->complete = mos7715_interrupt_callback;
#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
ret_val = mos7715_parport_init(serial);
if (ret_val < 0)
return ret_val;
#endif
}
/* start the interrupt urb */
ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
if (ret_val) {
dev_err(&dev->dev, "failed to submit interrupt urb: %d\n",
ret_val);
}
/* LSR For Port 1 */
read_mos_reg(serial, 0, MOS7720_LSR, &data);
dev_dbg(&dev->dev, "LSR:%x\n", data);
return 0;
}
static void mos7720_release(struct usb_serial *serial)
{
usb_kill_urb(serial->port[0]->interrupt_in_urb);
#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
/* close the parallel port */
if (le16_to_cpu(serial->dev->descriptor.idProduct)
== MOSCHIP_DEVICE_ID_7715) {
struct mos7715_parport *mos_parport =
usb_get_serial_data(serial);
/* prevent NULL ptr dereference in port callbacks */
spin_lock(&release_lock);
mos_parport->pp->private_data = NULL;
spin_unlock(&release_lock);
/* wait for synchronous usb calls to return */
if (mos_parport->msg_pending)
wait_for_completion_timeout(&mos_parport->syncmsg_compl,
msecs_to_jiffies(MOS_WDR_TIMEOUT));
/*
* If delayed work is currently scheduled, wait for it to
* complete. This also implies barriers that ensure the
* below serial clearing is not hoisted above the ->work.
*/
cancel_work_sync(&mos_parport->work);
parport_remove_port(mos_parport->pp);
usb_set_serial_data(serial, NULL);
mos_parport->serial = NULL;
parport_del_port(mos_parport->pp);
kref_put(&mos_parport->ref_count, destroy_mos_parport);
}
#endif
}
static int mos7720_port_probe(struct usb_serial_port *port)
{
struct moschip_port *mos7720_port;
mos7720_port = kzalloc(sizeof(*mos7720_port), GFP_KERNEL);
if (!mos7720_port)
return -ENOMEM;
mos7720_port->port = port;
usb_set_serial_port_data(port, mos7720_port);
return 0;
}
static void mos7720_port_remove(struct usb_serial_port *port)
{
struct moschip_port *mos7720_port;
mos7720_port = usb_get_serial_port_data(port);
kfree(mos7720_port);
}
static struct usb_serial_driver moschip7720_2port_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "moschip7720",
},
.description = "Moschip 2 port adapter",
.id_table = id_table,
.num_bulk_in = 2,
.num_bulk_out = 2,
.num_interrupt_in = 1,
.calc_num_ports = mos77xx_calc_num_ports,
.open = mos7720_open,
.close = mos7720_close,
.throttle = mos7720_throttle,
.unthrottle = mos7720_unthrottle,
.attach = mos7720_startup,
.release = mos7720_release,
.port_probe = mos7720_port_probe,
.port_remove = mos7720_port_remove,
.ioctl = mos7720_ioctl,
.tiocmget = mos7720_tiocmget,
.tiocmset = mos7720_tiocmset,
.set_termios = mos7720_set_termios,
.write = mos7720_write,
.write_room = mos7720_write_room,
.chars_in_buffer = mos7720_chars_in_buffer,
.break_ctl = mos7720_break,
.read_bulk_callback = mos7720_bulk_in_callback,
.read_int_callback = mos7720_interrupt_callback,
};
static struct usb_serial_driver * const serial_drivers[] = {
&moschip7720_2port_driver, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/mos7720.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Silicon Laboratories CP210x USB to RS232 serial adaptor driver
*
* Copyright (C) 2005 Craig Shelley ([email protected])
* Copyright (C) 2010-2021 Johan Hovold ([email protected])
*
* Support to set flow control line levels using TIOCMGET and TIOCMSET
* thanks to Karl Hiramoto [email protected]. RTSCTS hardware flow
* control thanks to Munir Nassar [email protected]
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/gpio/driver.h>
#include <linux/bitops.h>
#include <linux/mutex.h>
#define DRIVER_DESC "Silicon Labs CP210x RS232 serial adaptor driver"
/*
* Function Prototypes
*/
static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *);
static void cp210x_close(struct usb_serial_port *);
static void cp210x_change_speed(struct tty_struct *, struct usb_serial_port *,
const struct ktermios *);
static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *,
const struct ktermios *);
static bool cp210x_tx_empty(struct usb_serial_port *port);
static int cp210x_tiocmget(struct tty_struct *);
static int cp210x_tiocmset(struct tty_struct *, unsigned int, unsigned int);
static int cp210x_tiocmset_port(struct usb_serial_port *port,
unsigned int, unsigned int);
static int cp210x_break_ctl(struct tty_struct *, int);
static int cp210x_attach(struct usb_serial *);
static void cp210x_disconnect(struct usb_serial *);
static void cp210x_release(struct usb_serial *);
static int cp210x_port_probe(struct usb_serial_port *);
static void cp210x_port_remove(struct usb_serial_port *);
static void cp210x_dtr_rts(struct usb_serial_port *port, int on);
static void cp210x_process_read_urb(struct urb *urb);
static void cp210x_enable_event_mode(struct usb_serial_port *port);
static void cp210x_disable_event_mode(struct usb_serial_port *port);
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0404, 0x034C) }, /* NCR Retail IO Box */
{ USB_DEVICE(0x045B, 0x0053) }, /* Renesas RX610 RX-Stick */
{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
{ USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
{ USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
{ USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
{ USB_DEVICE(0x0908, 0x0070) }, /* Siemens SCALANCE LPE-9000 USB Serial Console */
{ USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
{ USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */
{ USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
{ USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
{ USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
{ USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */
{ USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */
{ USB_DEVICE(0x0FDE, 0xCA05) }, /* OWL Wireless Electricity Monitor CM-160 */
{ USB_DEVICE(0x106F, 0x0003) }, /* CPI / Money Controls Bulk Coin Recycler */
{ USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */
{ USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */
{ USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */
{ USB_DEVICE(0x10C4, 0x0F91) }, /* Vstabi */
{ USB_DEVICE(0x10C4, 0x1101) }, /* Arkham Technology DS101 Bus Monitor */
{ USB_DEVICE(0x10C4, 0x1601) }, /* Arkham Technology DS101 Adapter */
{ USB_DEVICE(0x10C4, 0x800A) }, /* SPORTident BSM7-D-USB main station */
{ USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */
{ USB_DEVICE(0x10C4, 0x8044) }, /* Cygnal Debug Adapter */
{ USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
{ USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
{ USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
{ USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */
{ USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
{ USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
{ USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
{ USB_DEVICE(0x10C4, 0x80C4) }, /* Cygnal Integrated Products, Inc., Optris infrared thermometer */
{ USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */
{ USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */
{ USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */
{ USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
{ USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
{ USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
{ USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
{ USB_DEVICE(0x2405, 0x0003) }, /* West Mountain Radio RIGblaster Advantage */
{ USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
{ USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
{ USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
{ USB_DEVICE(0x10C4, 0x817C) }, /* CESINEL MEDCAL N Power Quality Monitor */
{ USB_DEVICE(0x10C4, 0x817D) }, /* CESINEL MEDCAL NT Power Quality Monitor */
{ USB_DEVICE(0x10C4, 0x817E) }, /* CESINEL MEDCAL S Power Quality Monitor */
{ USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
{ USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
{ USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
{ USB_DEVICE(0x10C4, 0x81A9) }, /* Multiplex RC Interface */
{ USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
{ USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
{ USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
{ USB_DEVICE(0x10C4, 0x81D7) }, /* IAI Corp. RCB-CV-USB USB to RS485 Adaptor */
{ USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
{ USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
{ USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */
{ USB_DEVICE(0x10C4, 0x81F2) }, /* C1007 HF band RFID controller */
{ USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */
{ USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */
{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
{ USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
{ USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
{ USB_DEVICE(0x10C4, 0x82AA) }, /* Silicon Labs IFS-USB-DATACABLE used with Quint UPS */
{ USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */
{ USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */
{ USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */
{ USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
{ USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
{ USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
{ USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */
{ USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
{ USB_DEVICE(0x10C4, 0x8414) }, /* Decagon USB Cable Adapter */
{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
{ USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
{ USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
{ USB_DEVICE(0x10C4, 0x851E) }, /* CESINEL MEDCAL PT Network Analyzer */
{ USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
{ USB_DEVICE(0x10C4, 0x85B8) }, /* CESINEL ReCon T Energy Logger */
{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
{ USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
{ USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
{ USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */
{ USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
{ USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
{ USB_DEVICE(0x10C4, 0x88D8) }, /* Acuity Brands nLight Air Adapter */
{ USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */
{ USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */
{ USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
{ USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
{ USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
{ USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
{ USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */
{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
{ USB_DEVICE(0x10C4, 0x8A5B) }, /* CEL EM3588 ZigBee USB Stick */
{ USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
{ USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA63) }, /* Silicon Labs Windows Update (CP2101-4/CP2102N) */
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
{ USB_DEVICE(0x10C4, 0xEA7A) }, /* Silicon Labs Windows Update (CP2105) */
{ USB_DEVICE(0x10C4, 0xEA7B) }, /* Silicon Labs Windows Update (CP2108) */
{ USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
{ USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
{ USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
{ USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
{ USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
{ USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
{ USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
{ USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
{ USB_DEVICE(0x155A, 0x1006) }, /* ELDAT Easywave RX09 */
{ USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
{ USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */
{ USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
{ USB_DEVICE(0x166A, 0x0304) }, /* Clipsal 5000CT2 C-Bus Black and White Touchscreen */
{ USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */
{ USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */
{ USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */
{ USB_DEVICE(0x16C0, 0x09B0) }, /* Lunatico Seletek */
{ USB_DEVICE(0x16C0, 0x09B1) }, /* Lunatico Seletek */
{ USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
{ USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
{ USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
{ USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */
{ USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
{ USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */
{ USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */
{ USB_DEVICE(0x17A8, 0x0011) }, /* Kamstrup 444 MHz RF sniffer */
{ USB_DEVICE(0x17A8, 0x0013) }, /* Kamstrup 870 MHz RF sniffer */
{ USB_DEVICE(0x17A8, 0x0101) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Int Ant) */
{ USB_DEVICE(0x17A8, 0x0102) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Ext Ant) */
{ USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
{ USB_DEVICE(0x18EF, 0xE030) }, /* ELV ALC 8xxx Battery Charger */
{ USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */
{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
{ USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
{ USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
{ USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
{ USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 M.2 Key E serial interface */
{ USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 Display serial interface */
{ USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */
{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
{ USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
{ USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */
{ USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
{ USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
{ USB_DEVICE(0x1FB9, 0x0100) }, /* Lake Shore Model 121 Current Source */
{ USB_DEVICE(0x1FB9, 0x0200) }, /* Lake Shore Model 218A Temperature Monitor */
{ USB_DEVICE(0x1FB9, 0x0201) }, /* Lake Shore Model 219 Temperature Monitor */
{ USB_DEVICE(0x1FB9, 0x0202) }, /* Lake Shore Model 233 Temperature Transmitter */
{ USB_DEVICE(0x1FB9, 0x0203) }, /* Lake Shore Model 235 Temperature Transmitter */
{ USB_DEVICE(0x1FB9, 0x0300) }, /* Lake Shore Model 335 Temperature Controller */
{ USB_DEVICE(0x1FB9, 0x0301) }, /* Lake Shore Model 336 Temperature Controller */
{ USB_DEVICE(0x1FB9, 0x0302) }, /* Lake Shore Model 350 Temperature Controller */
{ USB_DEVICE(0x1FB9, 0x0303) }, /* Lake Shore Model 371 AC Bridge */
{ USB_DEVICE(0x1FB9, 0x0400) }, /* Lake Shore Model 411 Handheld Gaussmeter */
{ USB_DEVICE(0x1FB9, 0x0401) }, /* Lake Shore Model 425 Gaussmeter */
{ USB_DEVICE(0x1FB9, 0x0402) }, /* Lake Shore Model 455A Gaussmeter */
{ USB_DEVICE(0x1FB9, 0x0403) }, /* Lake Shore Model 475A Gaussmeter */
{ USB_DEVICE(0x1FB9, 0x0404) }, /* Lake Shore Model 465 Three Axis Gaussmeter */
{ USB_DEVICE(0x1FB9, 0x0600) }, /* Lake Shore Model 625A Superconducting MPS */
{ USB_DEVICE(0x1FB9, 0x0601) }, /* Lake Shore Model 642A Magnet Power Supply */
{ USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */
{ USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */
{ USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */
{ USB_DEVICE(0x2184, 0x0030) }, /* GW Instek GDM-834x Digital Multimeter */
{ USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */
{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
{ USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
{ USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
{ USB_DEVICE(0x3923, 0x7A0B) }, /* National Instruments USB Serial Console */
{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
struct cp210x_serial_private {
#ifdef CONFIG_GPIOLIB
struct gpio_chip gc;
bool gpio_registered;
u16 gpio_pushpull;
u16 gpio_altfunc;
u16 gpio_input;
#endif
u8 partnum;
u32 fw_version;
speed_t min_speed;
speed_t max_speed;
bool use_actual_rate;
bool no_flow_control;
bool no_event_mode;
};
enum cp210x_event_state {
ES_DATA,
ES_ESCAPE,
ES_LSR,
ES_LSR_DATA_0,
ES_LSR_DATA_1,
ES_MSR
};
struct cp210x_port_private {
u8 bInterfaceNumber;
bool event_mode;
enum cp210x_event_state event_state;
u8 lsr;
struct mutex mutex;
bool crtscts;
bool dtr;
bool rts;
};
static struct usb_serial_driver cp210x_device = {
.driver = {
.owner = THIS_MODULE,
.name = "cp210x",
},
.id_table = id_table,
.num_ports = 1,
.bulk_in_size = 256,
.bulk_out_size = 256,
.open = cp210x_open,
.close = cp210x_close,
.break_ctl = cp210x_break_ctl,
.set_termios = cp210x_set_termios,
.tx_empty = cp210x_tx_empty,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
.tiocmget = cp210x_tiocmget,
.tiocmset = cp210x_tiocmset,
.get_icount = usb_serial_generic_get_icount,
.attach = cp210x_attach,
.disconnect = cp210x_disconnect,
.release = cp210x_release,
.port_probe = cp210x_port_probe,
.port_remove = cp210x_port_remove,
.dtr_rts = cp210x_dtr_rts,
.process_read_urb = cp210x_process_read_urb,
};
static struct usb_serial_driver * const serial_drivers[] = {
&cp210x_device, NULL
};
/* Config request types */
#define REQTYPE_HOST_TO_INTERFACE 0x41
#define REQTYPE_INTERFACE_TO_HOST 0xc1
#define REQTYPE_HOST_TO_DEVICE 0x40
#define REQTYPE_DEVICE_TO_HOST 0xc0
/* Config request codes */
#define CP210X_IFC_ENABLE 0x00
#define CP210X_SET_BAUDDIV 0x01
#define CP210X_GET_BAUDDIV 0x02
#define CP210X_SET_LINE_CTL 0x03
#define CP210X_GET_LINE_CTL 0x04
#define CP210X_SET_BREAK 0x05
#define CP210X_IMM_CHAR 0x06
#define CP210X_SET_MHS 0x07
#define CP210X_GET_MDMSTS 0x08
#define CP210X_SET_XON 0x09
#define CP210X_SET_XOFF 0x0A
#define CP210X_SET_EVENTMASK 0x0B
#define CP210X_GET_EVENTMASK 0x0C
#define CP210X_SET_CHAR 0x0D
#define CP210X_GET_CHARS 0x0E
#define CP210X_GET_PROPS 0x0F
#define CP210X_GET_COMM_STATUS 0x10
#define CP210X_RESET 0x11
#define CP210X_PURGE 0x12
#define CP210X_SET_FLOW 0x13
#define CP210X_GET_FLOW 0x14
#define CP210X_EMBED_EVENTS 0x15
#define CP210X_GET_EVENTSTATE 0x16
#define CP210X_SET_CHARS 0x19
#define CP210X_GET_BAUDRATE 0x1D
#define CP210X_SET_BAUDRATE 0x1E
#define CP210X_VENDOR_SPECIFIC 0xFF
/* CP210X_IFC_ENABLE */
#define UART_ENABLE 0x0001
#define UART_DISABLE 0x0000
/* CP210X_(SET|GET)_BAUDDIV */
#define BAUD_RATE_GEN_FREQ 0x384000
/* CP210X_(SET|GET)_LINE_CTL */
#define BITS_DATA_MASK 0X0f00
#define BITS_DATA_5 0X0500
#define BITS_DATA_6 0X0600
#define BITS_DATA_7 0X0700
#define BITS_DATA_8 0X0800
#define BITS_DATA_9 0X0900
#define BITS_PARITY_MASK 0x00f0
#define BITS_PARITY_NONE 0x0000
#define BITS_PARITY_ODD 0x0010
#define BITS_PARITY_EVEN 0x0020
#define BITS_PARITY_MARK 0x0030
#define BITS_PARITY_SPACE 0x0040
#define BITS_STOP_MASK 0x000f
#define BITS_STOP_1 0x0000
#define BITS_STOP_1_5 0x0001
#define BITS_STOP_2 0x0002
/* CP210X_SET_BREAK */
#define BREAK_ON 0x0001
#define BREAK_OFF 0x0000
/* CP210X_(SET_MHS|GET_MDMSTS) */
#define CONTROL_DTR 0x0001
#define CONTROL_RTS 0x0002
#define CONTROL_CTS 0x0010
#define CONTROL_DSR 0x0020
#define CONTROL_RING 0x0040
#define CONTROL_DCD 0x0080
#define CONTROL_WRITE_DTR 0x0100
#define CONTROL_WRITE_RTS 0x0200
/* CP210X_(GET|SET)_CHARS */
struct cp210x_special_chars {
u8 bEofChar;
u8 bErrorChar;
u8 bBreakChar;
u8 bEventChar;
u8 bXonChar;
u8 bXoffChar;
};
/* CP210X_VENDOR_SPECIFIC values */
#define CP210X_GET_FW_VER 0x000E
#define CP210X_READ_2NCONFIG 0x000E
#define CP210X_GET_FW_VER_2N 0x0010
#define CP210X_READ_LATCH 0x00C2
#define CP210X_GET_PARTNUM 0x370B
#define CP210X_GET_PORTCONFIG 0x370C
#define CP210X_GET_DEVICEMODE 0x3711
#define CP210X_WRITE_LATCH 0x37E1
/* Part number definitions */
#define CP210X_PARTNUM_CP2101 0x01
#define CP210X_PARTNUM_CP2102 0x02
#define CP210X_PARTNUM_CP2103 0x03
#define CP210X_PARTNUM_CP2104 0x04
#define CP210X_PARTNUM_CP2105 0x05
#define CP210X_PARTNUM_CP2108 0x08
#define CP210X_PARTNUM_CP2102N_QFN28 0x20
#define CP210X_PARTNUM_CP2102N_QFN24 0x21
#define CP210X_PARTNUM_CP2102N_QFN20 0x22
#define CP210X_PARTNUM_UNKNOWN 0xFF
/* CP210X_GET_COMM_STATUS returns these 0x13 bytes */
struct cp210x_comm_status {
__le32 ulErrors;
__le32 ulHoldReasons;
__le32 ulAmountInInQueue;
__le32 ulAmountInOutQueue;
u8 bEofReceived;
u8 bWaitForImmediate;
u8 bReserved;
} __packed;
/*
* CP210X_PURGE - 16 bits passed in wValue of USB request.
* SiLabs app note AN571 gives a strange description of the 4 bits:
* bit 0 or bit 2 clears the transmit queue and 1 or 3 receive.
* writing 1 to all, however, purges cp2108 well enough to avoid the hang.
*/
#define PURGE_ALL 0x000f
/* CP210X_EMBED_EVENTS */
#define CP210X_ESCCHAR 0xec
#define CP210X_LSR_OVERRUN BIT(1)
#define CP210X_LSR_PARITY BIT(2)
#define CP210X_LSR_FRAME BIT(3)
#define CP210X_LSR_BREAK BIT(4)
/* CP210X_GET_FLOW/CP210X_SET_FLOW read/write these 0x10 bytes */
struct cp210x_flow_ctl {
__le32 ulControlHandshake;
__le32 ulFlowReplace;
__le32 ulXonLimit;
__le32 ulXoffLimit;
};
/* cp210x_flow_ctl::ulControlHandshake */
#define CP210X_SERIAL_DTR_MASK GENMASK(1, 0)
#define CP210X_SERIAL_DTR_INACTIVE (0 << 0)
#define CP210X_SERIAL_DTR_ACTIVE (1 << 0)
#define CP210X_SERIAL_DTR_FLOW_CTL (2 << 0)
#define CP210X_SERIAL_CTS_HANDSHAKE BIT(3)
#define CP210X_SERIAL_DSR_HANDSHAKE BIT(4)
#define CP210X_SERIAL_DCD_HANDSHAKE BIT(5)
#define CP210X_SERIAL_DSR_SENSITIVITY BIT(6)
/* cp210x_flow_ctl::ulFlowReplace */
#define CP210X_SERIAL_AUTO_TRANSMIT BIT(0)
#define CP210X_SERIAL_AUTO_RECEIVE BIT(1)
#define CP210X_SERIAL_ERROR_CHAR BIT(2)
#define CP210X_SERIAL_NULL_STRIPPING BIT(3)
#define CP210X_SERIAL_BREAK_CHAR BIT(4)
#define CP210X_SERIAL_RTS_MASK GENMASK(7, 6)
#define CP210X_SERIAL_RTS_INACTIVE (0 << 6)
#define CP210X_SERIAL_RTS_ACTIVE (1 << 6)
#define CP210X_SERIAL_RTS_FLOW_CTL (2 << 6)
#define CP210X_SERIAL_XOFF_CONTINUE BIT(31)
/* CP210X_VENDOR_SPECIFIC, CP210X_GET_DEVICEMODE call reads these 0x2 bytes. */
struct cp210x_pin_mode {
u8 eci;
u8 sci;
};
#define CP210X_PIN_MODE_MODEM 0
#define CP210X_PIN_MODE_GPIO BIT(0)
/*
* CP210X_VENDOR_SPECIFIC, CP210X_GET_PORTCONFIG call reads these 0xf bytes
* on a CP2105 chip. Structure needs padding due to unused/unspecified bytes.
*/
struct cp210x_dual_port_config {
__le16 gpio_mode;
u8 __pad0[2];
__le16 reset_state;
u8 __pad1[4];
__le16 suspend_state;
u8 sci_cfg;
u8 eci_cfg;
u8 device_cfg;
} __packed;
/*
* CP210X_VENDOR_SPECIFIC, CP210X_GET_PORTCONFIG call reads these 0xd bytes
* on a CP2104 chip. Structure needs padding due to unused/unspecified bytes.
*/
struct cp210x_single_port_config {
__le16 gpio_mode;
u8 __pad0[2];
__le16 reset_state;
u8 __pad1[4];
__le16 suspend_state;
u8 device_cfg;
} __packed;
/* GPIO modes */
#define CP210X_SCI_GPIO_MODE_OFFSET 9
#define CP210X_SCI_GPIO_MODE_MASK GENMASK(11, 9)
#define CP210X_ECI_GPIO_MODE_OFFSET 2
#define CP210X_ECI_GPIO_MODE_MASK GENMASK(3, 2)
#define CP210X_GPIO_MODE_OFFSET 8
#define CP210X_GPIO_MODE_MASK GENMASK(11, 8)
/* CP2105 port configuration values */
#define CP2105_GPIO0_TXLED_MODE BIT(0)
#define CP2105_GPIO1_RXLED_MODE BIT(1)
#define CP2105_GPIO1_RS485_MODE BIT(2)
/* CP2104 port configuration values */
#define CP2104_GPIO0_TXLED_MODE BIT(0)
#define CP2104_GPIO1_RXLED_MODE BIT(1)
#define CP2104_GPIO2_RS485_MODE BIT(2)
struct cp210x_quad_port_state {
__le16 gpio_mode_pb0;
__le16 gpio_mode_pb1;
__le16 gpio_mode_pb2;
__le16 gpio_mode_pb3;
__le16 gpio_mode_pb4;
__le16 gpio_lowpower_pb0;
__le16 gpio_lowpower_pb1;
__le16 gpio_lowpower_pb2;
__le16 gpio_lowpower_pb3;
__le16 gpio_lowpower_pb4;
__le16 gpio_latch_pb0;
__le16 gpio_latch_pb1;
__le16 gpio_latch_pb2;
__le16 gpio_latch_pb3;
__le16 gpio_latch_pb4;
};
/*
* CP210X_VENDOR_SPECIFIC, CP210X_GET_PORTCONFIG call reads these 0x49 bytes
* on a CP2108 chip.
*
* See https://www.silabs.com/documents/public/application-notes/an978-cp210x-usb-to-uart-api-specification.pdf
*/
struct cp210x_quad_port_config {
struct cp210x_quad_port_state reset_state;
struct cp210x_quad_port_state suspend_state;
u8 ipdelay_ifc[4];
u8 enhancedfxn_ifc[4];
u8 enhancedfxn_device;
u8 extclkfreq[4];
} __packed;
#define CP2108_EF_IFC_GPIO_TXLED 0x01
#define CP2108_EF_IFC_GPIO_RXLED 0x02
#define CP2108_EF_IFC_GPIO_RS485 0x04
#define CP2108_EF_IFC_GPIO_RS485_LOGIC 0x08
#define CP2108_EF_IFC_GPIO_CLOCK 0x10
#define CP2108_EF_IFC_DYNAMIC_SUSPEND 0x40
/* CP2102N configuration array indices */
#define CP210X_2NCONFIG_CONFIG_VERSION_IDX 2
#define CP210X_2NCONFIG_GPIO_MODE_IDX 581
#define CP210X_2NCONFIG_GPIO_RSTLATCH_IDX 587
#define CP210X_2NCONFIG_GPIO_CONTROL_IDX 600
/* CP2102N QFN20 port configuration values */
#define CP2102N_QFN20_GPIO2_TXLED_MODE BIT(2)
#define CP2102N_QFN20_GPIO3_RXLED_MODE BIT(3)
#define CP2102N_QFN20_GPIO1_RS485_MODE BIT(4)
#define CP2102N_QFN20_GPIO0_CLK_MODE BIT(6)
/*
* CP210X_VENDOR_SPECIFIC, CP210X_WRITE_LATCH call writes these 0x02 bytes
* for CP2102N, CP2103, CP2104 and CP2105.
*/
struct cp210x_gpio_write {
u8 mask;
u8 state;
};
/*
* CP210X_VENDOR_SPECIFIC, CP210X_WRITE_LATCH call writes these 0x04 bytes
* for CP2108.
*/
struct cp210x_gpio_write16 {
__le16 mask;
__le16 state;
};
/*
* Helper to get interface number when we only have struct usb_serial.
*/
static u8 cp210x_interface_num(struct usb_serial *serial)
{
struct usb_host_interface *cur_altsetting;
cur_altsetting = serial->interface->cur_altsetting;
return cur_altsetting->desc.bInterfaceNumber;
}
/*
* Reads a variable-sized block of CP210X_ registers, identified by req.
* Returns data into buf in native USB byte order.
*/
static int cp210x_read_reg_block(struct usb_serial_port *port, u8 req,
void *buf, int bufsize)
{
struct usb_serial *serial = port->serial;
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
int result;
result = usb_control_msg_recv(serial->dev, 0, req,
REQTYPE_INTERFACE_TO_HOST, 0,
port_priv->bInterfaceNumber, buf, bufsize,
USB_CTRL_SET_TIMEOUT, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "failed get req 0x%x size %d status: %d\n",
req, bufsize, result);
return result;
}
return 0;
}
/*
* Reads any 8-bit CP210X_ register identified by req.
*/
static int cp210x_read_u8_reg(struct usb_serial_port *port, u8 req, u8 *val)
{
return cp210x_read_reg_block(port, req, val, sizeof(*val));
}
/*
* Reads a variable-sized vendor block of CP210X_ registers, identified by val.
* Returns data into buf in native USB byte order.
*/
static int cp210x_read_vendor_block(struct usb_serial *serial, u8 type, u16 val,
void *buf, int bufsize)
{
int result;
result = usb_control_msg_recv(serial->dev, 0, CP210X_VENDOR_SPECIFIC,
type, val, cp210x_interface_num(serial), buf, bufsize,
USB_CTRL_GET_TIMEOUT, GFP_KERNEL);
if (result) {
dev_err(&serial->interface->dev,
"failed to get vendor val 0x%04x size %d: %d\n", val,
bufsize, result);
return result;
}
return 0;
}
/*
* Writes any 16-bit CP210X_ register (req) whose value is passed
* entirely in the wValue field of the USB request.
*/
static int cp210x_write_u16_reg(struct usb_serial_port *port, u8 req, u16 val)
{
struct usb_serial *serial = port->serial;
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
int result;
result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
req, REQTYPE_HOST_TO_INTERFACE, val,
port_priv->bInterfaceNumber, NULL, 0,
USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(&port->dev, "failed set request 0x%x status: %d\n",
req, result);
}
return result;
}
/*
* Writes a variable-sized block of CP210X_ registers, identified by req.
* Data in buf must be in native USB byte order.
*/
static int cp210x_write_reg_block(struct usb_serial_port *port, u8 req,
void *buf, int bufsize)
{
struct usb_serial *serial = port->serial;
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
int result;
result = usb_control_msg_send(serial->dev, 0, req,
REQTYPE_HOST_TO_INTERFACE, 0,
port_priv->bInterfaceNumber, buf, bufsize,
USB_CTRL_SET_TIMEOUT, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "failed set req 0x%x size %d status: %d\n",
req, bufsize, result);
return result;
}
return 0;
}
/*
* Writes any 32-bit CP210X_ register identified by req.
*/
static int cp210x_write_u32_reg(struct usb_serial_port *port, u8 req, u32 val)
{
__le32 le32_val;
le32_val = cpu_to_le32(val);
return cp210x_write_reg_block(port, req, &le32_val, sizeof(le32_val));
}
#ifdef CONFIG_GPIOLIB
/*
* Writes a variable-sized vendor block of CP210X_ registers, identified by val.
* Data in buf must be in native USB byte order.
*/
static int cp210x_write_vendor_block(struct usb_serial *serial, u8 type,
u16 val, void *buf, int bufsize)
{
int result;
result = usb_control_msg_send(serial->dev, 0, CP210X_VENDOR_SPECIFIC,
type, val, cp210x_interface_num(serial), buf, bufsize,
USB_CTRL_SET_TIMEOUT, GFP_KERNEL);
if (result) {
dev_err(&serial->interface->dev,
"failed to set vendor val 0x%04x size %d: %d\n", val,
bufsize, result);
return result;
}
return 0;
}
#endif
static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
int result;
result = cp210x_write_u16_reg(port, CP210X_IFC_ENABLE, UART_ENABLE);
if (result) {
dev_err(&port->dev, "%s - Unable to enable UART\n", __func__);
return result;
}
if (tty)
cp210x_set_termios(tty, port, NULL);
result = usb_serial_generic_open(tty, port);
if (result)
goto err_disable;
return 0;
err_disable:
cp210x_write_u16_reg(port, CP210X_IFC_ENABLE, UART_DISABLE);
port_priv->event_mode = false;
return result;
}
static void cp210x_close(struct usb_serial_port *port)
{
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
usb_serial_generic_close(port);
/* Clear both queues; cp2108 needs this to avoid an occasional hang */
cp210x_write_u16_reg(port, CP210X_PURGE, PURGE_ALL);
cp210x_write_u16_reg(port, CP210X_IFC_ENABLE, UART_DISABLE);
/* Disabling the interface disables event-insertion mode. */
port_priv->event_mode = false;
}
static void cp210x_process_lsr(struct usb_serial_port *port, unsigned char lsr, char *flag)
{
if (lsr & CP210X_LSR_BREAK) {
port->icount.brk++;
*flag = TTY_BREAK;
} else if (lsr & CP210X_LSR_PARITY) {
port->icount.parity++;
*flag = TTY_PARITY;
} else if (lsr & CP210X_LSR_FRAME) {
port->icount.frame++;
*flag = TTY_FRAME;
}
if (lsr & CP210X_LSR_OVERRUN) {
port->icount.overrun++;
tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
}
}
static bool cp210x_process_char(struct usb_serial_port *port, unsigned char *ch, char *flag)
{
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
switch (port_priv->event_state) {
case ES_DATA:
if (*ch == CP210X_ESCCHAR) {
port_priv->event_state = ES_ESCAPE;
break;
}
return false;
case ES_ESCAPE:
switch (*ch) {
case 0:
dev_dbg(&port->dev, "%s - escape char\n", __func__);
*ch = CP210X_ESCCHAR;
port_priv->event_state = ES_DATA;
return false;
case 1:
port_priv->event_state = ES_LSR_DATA_0;
break;
case 2:
port_priv->event_state = ES_LSR;
break;
case 3:
port_priv->event_state = ES_MSR;
break;
default:
dev_err(&port->dev, "malformed event 0x%02x\n", *ch);
port_priv->event_state = ES_DATA;
break;
}
break;
case ES_LSR_DATA_0:
port_priv->lsr = *ch;
port_priv->event_state = ES_LSR_DATA_1;
break;
case ES_LSR_DATA_1:
dev_dbg(&port->dev, "%s - lsr = 0x%02x, data = 0x%02x\n",
__func__, port_priv->lsr, *ch);
cp210x_process_lsr(port, port_priv->lsr, flag);
port_priv->event_state = ES_DATA;
return false;
case ES_LSR:
dev_dbg(&port->dev, "%s - lsr = 0x%02x\n", __func__, *ch);
port_priv->lsr = *ch;
cp210x_process_lsr(port, port_priv->lsr, flag);
port_priv->event_state = ES_DATA;
break;
case ES_MSR:
dev_dbg(&port->dev, "%s - msr = 0x%02x\n", __func__, *ch);
/* unimplemented */
port_priv->event_state = ES_DATA;
break;
}
return true;
}
static void cp210x_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
unsigned char *ch = urb->transfer_buffer;
char flag;
int i;
if (!urb->actual_length)
return;
if (port_priv->event_mode) {
for (i = 0; i < urb->actual_length; i++, ch++) {
flag = TTY_NORMAL;
if (cp210x_process_char(port, ch, &flag))
continue;
tty_insert_flip_char(&port->port, *ch, flag);
}
} else {
tty_insert_flip_string(&port->port, ch, urb->actual_length);
}
tty_flip_buffer_push(&port->port);
}
/*
* Read how many bytes are waiting in the TX queue.
*/
static int cp210x_get_tx_queue_byte_count(struct usb_serial_port *port,
u32 *count)
{
struct usb_serial *serial = port->serial;
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
struct cp210x_comm_status sts;
int result;
result = usb_control_msg_recv(serial->dev, 0, CP210X_GET_COMM_STATUS,
REQTYPE_INTERFACE_TO_HOST, 0,
port_priv->bInterfaceNumber, &sts, sizeof(sts),
USB_CTRL_GET_TIMEOUT, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "failed to get comm status: %d\n", result);
return result;
}
*count = le32_to_cpu(sts.ulAmountInOutQueue);
return 0;
}
static bool cp210x_tx_empty(struct usb_serial_port *port)
{
int err;
u32 count;
err = cp210x_get_tx_queue_byte_count(port, &count);
if (err)
return true;
return !count;
}
struct cp210x_rate {
speed_t rate;
speed_t high;
};
static const struct cp210x_rate cp210x_an205_table1[] = {
{ 300, 300 },
{ 600, 600 },
{ 1200, 1200 },
{ 1800, 1800 },
{ 2400, 2400 },
{ 4000, 4000 },
{ 4800, 4803 },
{ 7200, 7207 },
{ 9600, 9612 },
{ 14400, 14428 },
{ 16000, 16062 },
{ 19200, 19250 },
{ 28800, 28912 },
{ 38400, 38601 },
{ 51200, 51558 },
{ 56000, 56280 },
{ 57600, 58053 },
{ 64000, 64111 },
{ 76800, 77608 },
{ 115200, 117028 },
{ 128000, 129347 },
{ 153600, 156868 },
{ 230400, 237832 },
{ 250000, 254234 },
{ 256000, 273066 },
{ 460800, 491520 },
{ 500000, 567138 },
{ 576000, 670254 },
{ 921600, UINT_MAX }
};
/*
* Quantises the baud rate as per AN205 Table 1
*/
static speed_t cp210x_get_an205_rate(speed_t baud)
{
int i;
for (i = 0; i < ARRAY_SIZE(cp210x_an205_table1); ++i) {
if (baud <= cp210x_an205_table1[i].high)
break;
}
return cp210x_an205_table1[i].rate;
}
static speed_t cp210x_get_actual_rate(speed_t baud)
{
unsigned int prescale = 1;
unsigned int div;
if (baud <= 365)
prescale = 4;
div = DIV_ROUND_CLOSEST(48000000, 2 * prescale * baud);
baud = 48000000 / (2 * prescale * div);
return baud;
}
/*
* CP2101 supports the following baud rates:
*
* 300, 600, 1200, 1800, 2400, 4800, 7200, 9600, 14400, 19200, 28800,
* 38400, 56000, 57600, 115200, 128000, 230400, 460800, 921600
*
* CP2102 and CP2103 support the following additional rates:
*
* 4000, 16000, 51200, 64000, 76800, 153600, 250000, 256000, 500000,
* 576000
*
* The device will map a requested rate to a supported one, but the result
* of requests for rates greater than 1053257 is undefined (see AN205).
*
* CP2104, CP2105 and CP2110 support most rates up to 2M, 921k and 1M baud,
* respectively, with an error less than 1%. The actual rates are determined
* by
*
* div = round(freq / (2 x prescale x request))
* actual = freq / (2 x prescale x div)
*
* For CP2104 and CP2105 freq is 48Mhz and prescale is 4 for request <= 365bps
* or 1 otherwise.
* For CP2110 freq is 24Mhz and prescale is 4 for request <= 300bps or 1
* otherwise.
*/
static void cp210x_change_speed(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
u32 baud;
if (tty->termios.c_ospeed == 0)
return;
/*
* This maps the requested rate to the actual rate, a valid rate on
* cp2102 or cp2103, or to an arbitrary rate in [1M, max_speed].
*/
baud = clamp(tty->termios.c_ospeed, priv->min_speed, priv->max_speed);
if (priv->use_actual_rate)
baud = cp210x_get_actual_rate(baud);
else if (baud < 1000000)
baud = cp210x_get_an205_rate(baud);
dev_dbg(&port->dev, "%s - setting baud rate to %u\n", __func__, baud);
if (cp210x_write_u32_reg(port, CP210X_SET_BAUDRATE, baud)) {
dev_warn(&port->dev, "failed to set baud rate to %u\n", baud);
if (old_termios)
baud = old_termios->c_ospeed;
else
baud = 9600;
}
tty_encode_baud_rate(tty, baud, baud);
}
static void cp210x_enable_event_mode(struct usb_serial_port *port)
{
struct cp210x_serial_private *priv = usb_get_serial_data(port->serial);
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
int ret;
if (port_priv->event_mode)
return;
if (priv->no_event_mode)
return;
port_priv->event_state = ES_DATA;
port_priv->event_mode = true;
ret = cp210x_write_u16_reg(port, CP210X_EMBED_EVENTS, CP210X_ESCCHAR);
if (ret) {
dev_err(&port->dev, "failed to enable events: %d\n", ret);
port_priv->event_mode = false;
}
}
static void cp210x_disable_event_mode(struct usb_serial_port *port)
{
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
int ret;
if (!port_priv->event_mode)
return;
ret = cp210x_write_u16_reg(port, CP210X_EMBED_EVENTS, 0);
if (ret) {
dev_err(&port->dev, "failed to disable events: %d\n", ret);
return;
}
port_priv->event_mode = false;
}
static bool cp210x_termios_change(const struct ktermios *a, const struct ktermios *b)
{
bool iflag_change, cc_change;
iflag_change = ((a->c_iflag ^ b->c_iflag) & (INPCK | IXON | IXOFF));
cc_change = a->c_cc[VSTART] != b->c_cc[VSTART] ||
a->c_cc[VSTOP] != b->c_cc[VSTOP];
return tty_termios_hw_change(a, b) || iflag_change || cc_change;
}
static void cp210x_set_flow_control(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct cp210x_serial_private *priv = usb_get_serial_data(port->serial);
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
struct cp210x_special_chars chars;
struct cp210x_flow_ctl flow_ctl;
u32 flow_repl;
u32 ctl_hs;
bool crtscts;
int ret;
/*
* Some CP2102N interpret ulXonLimit as ulFlowReplace (erratum
* CP2102N_E104). Report back that flow control is not supported.
*/
if (priv->no_flow_control) {
tty->termios.c_cflag &= ~CRTSCTS;
tty->termios.c_iflag &= ~(IXON | IXOFF);
}
if (tty->termios.c_ospeed != 0 &&
old_termios && old_termios->c_ospeed != 0 &&
C_CRTSCTS(tty) == (old_termios->c_cflag & CRTSCTS) &&
I_IXON(tty) == (old_termios->c_iflag & IXON) &&
I_IXOFF(tty) == (old_termios->c_iflag & IXOFF) &&
START_CHAR(tty) == old_termios->c_cc[VSTART] &&
STOP_CHAR(tty) == old_termios->c_cc[VSTOP]) {
return;
}
if (I_IXON(tty) || I_IXOFF(tty)) {
memset(&chars, 0, sizeof(chars));
chars.bXonChar = START_CHAR(tty);
chars.bXoffChar = STOP_CHAR(tty);
ret = cp210x_write_reg_block(port, CP210X_SET_CHARS, &chars,
sizeof(chars));
if (ret) {
dev_err(&port->dev, "failed to set special chars: %d\n",
ret);
}
}
mutex_lock(&port_priv->mutex);
if (tty->termios.c_ospeed == 0) {
port_priv->dtr = false;
port_priv->rts = false;
} else if (old_termios && old_termios->c_ospeed == 0) {
port_priv->dtr = true;
port_priv->rts = true;
}
ret = cp210x_read_reg_block(port, CP210X_GET_FLOW, &flow_ctl,
sizeof(flow_ctl));
if (ret)
goto out_unlock;
ctl_hs = le32_to_cpu(flow_ctl.ulControlHandshake);
flow_repl = le32_to_cpu(flow_ctl.ulFlowReplace);
ctl_hs &= ~CP210X_SERIAL_DSR_HANDSHAKE;
ctl_hs &= ~CP210X_SERIAL_DCD_HANDSHAKE;
ctl_hs &= ~CP210X_SERIAL_DSR_SENSITIVITY;
ctl_hs &= ~CP210X_SERIAL_DTR_MASK;
if (port_priv->dtr)
ctl_hs |= CP210X_SERIAL_DTR_ACTIVE;
else
ctl_hs |= CP210X_SERIAL_DTR_INACTIVE;
flow_repl &= ~CP210X_SERIAL_RTS_MASK;
if (C_CRTSCTS(tty)) {
ctl_hs |= CP210X_SERIAL_CTS_HANDSHAKE;
if (port_priv->rts)
flow_repl |= CP210X_SERIAL_RTS_FLOW_CTL;
else
flow_repl |= CP210X_SERIAL_RTS_INACTIVE;
crtscts = true;
} else {
ctl_hs &= ~CP210X_SERIAL_CTS_HANDSHAKE;
if (port_priv->rts)
flow_repl |= CP210X_SERIAL_RTS_ACTIVE;
else
flow_repl |= CP210X_SERIAL_RTS_INACTIVE;
crtscts = false;
}
if (I_IXOFF(tty)) {
flow_repl |= CP210X_SERIAL_AUTO_RECEIVE;
flow_ctl.ulXonLimit = cpu_to_le32(128);
flow_ctl.ulXoffLimit = cpu_to_le32(128);
} else {
flow_repl &= ~CP210X_SERIAL_AUTO_RECEIVE;
}
if (I_IXON(tty))
flow_repl |= CP210X_SERIAL_AUTO_TRANSMIT;
else
flow_repl &= ~CP210X_SERIAL_AUTO_TRANSMIT;
dev_dbg(&port->dev, "%s - ctrl = 0x%02x, flow = 0x%02x\n", __func__,
ctl_hs, flow_repl);
flow_ctl.ulControlHandshake = cpu_to_le32(ctl_hs);
flow_ctl.ulFlowReplace = cpu_to_le32(flow_repl);
ret = cp210x_write_reg_block(port, CP210X_SET_FLOW, &flow_ctl,
sizeof(flow_ctl));
if (ret)
goto out_unlock;
port_priv->crtscts = crtscts;
out_unlock:
mutex_unlock(&port_priv->mutex);
}
static void cp210x_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct cp210x_serial_private *priv = usb_get_serial_data(port->serial);
u16 bits;
int ret;
if (old_termios && !cp210x_termios_change(&tty->termios, old_termios) &&
tty->termios.c_ospeed != 0)
return;
if (!old_termios || tty->termios.c_ospeed != old_termios->c_ospeed)
cp210x_change_speed(tty, port, old_termios);
/* CP2101 only supports CS8, 1 stop bit and non-stick parity. */
if (priv->partnum == CP210X_PARTNUM_CP2101) {
tty->termios.c_cflag &= ~(CSIZE | CSTOPB | CMSPAR);
tty->termios.c_cflag |= CS8;
}
bits = 0;
switch (C_CSIZE(tty)) {
case CS5:
bits |= BITS_DATA_5;
break;
case CS6:
bits |= BITS_DATA_6;
break;
case CS7:
bits |= BITS_DATA_7;
break;
case CS8:
default:
bits |= BITS_DATA_8;
break;
}
if (C_PARENB(tty)) {
if (C_CMSPAR(tty)) {
if (C_PARODD(tty))
bits |= BITS_PARITY_MARK;
else
bits |= BITS_PARITY_SPACE;
} else {
if (C_PARODD(tty))
bits |= BITS_PARITY_ODD;
else
bits |= BITS_PARITY_EVEN;
}
}
if (C_CSTOPB(tty))
bits |= BITS_STOP_2;
else
bits |= BITS_STOP_1;
ret = cp210x_write_u16_reg(port, CP210X_SET_LINE_CTL, bits);
if (ret)
dev_err(&port->dev, "failed to set line control: %d\n", ret);
cp210x_set_flow_control(tty, port, old_termios);
/*
* Enable event-insertion mode only if input parity checking is
* enabled for now.
*/
if (I_INPCK(tty))
cp210x_enable_event_mode(port);
else
cp210x_disable_event_mode(port);
}
static int cp210x_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
return cp210x_tiocmset_port(port, set, clear);
}
static int cp210x_tiocmset_port(struct usb_serial_port *port,
unsigned int set, unsigned int clear)
{
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
struct cp210x_flow_ctl flow_ctl;
u32 ctl_hs, flow_repl;
u16 control = 0;
int ret;
mutex_lock(&port_priv->mutex);
if (set & TIOCM_RTS) {
port_priv->rts = true;
control |= CONTROL_RTS;
control |= CONTROL_WRITE_RTS;
}
if (set & TIOCM_DTR) {
port_priv->dtr = true;
control |= CONTROL_DTR;
control |= CONTROL_WRITE_DTR;
}
if (clear & TIOCM_RTS) {
port_priv->rts = false;
control &= ~CONTROL_RTS;
control |= CONTROL_WRITE_RTS;
}
if (clear & TIOCM_DTR) {
port_priv->dtr = false;
control &= ~CONTROL_DTR;
control |= CONTROL_WRITE_DTR;
}
/*
* Use SET_FLOW to set DTR and enable/disable auto-RTS when hardware
* flow control is enabled.
*/
if (port_priv->crtscts && control & CONTROL_WRITE_RTS) {
ret = cp210x_read_reg_block(port, CP210X_GET_FLOW, &flow_ctl,
sizeof(flow_ctl));
if (ret)
goto out_unlock;
ctl_hs = le32_to_cpu(flow_ctl.ulControlHandshake);
flow_repl = le32_to_cpu(flow_ctl.ulFlowReplace);
ctl_hs &= ~CP210X_SERIAL_DTR_MASK;
if (port_priv->dtr)
ctl_hs |= CP210X_SERIAL_DTR_ACTIVE;
else
ctl_hs |= CP210X_SERIAL_DTR_INACTIVE;
flow_repl &= ~CP210X_SERIAL_RTS_MASK;
if (port_priv->rts)
flow_repl |= CP210X_SERIAL_RTS_FLOW_CTL;
else
flow_repl |= CP210X_SERIAL_RTS_INACTIVE;
flow_ctl.ulControlHandshake = cpu_to_le32(ctl_hs);
flow_ctl.ulFlowReplace = cpu_to_le32(flow_repl);
dev_dbg(&port->dev, "%s - ctrl = 0x%02x, flow = 0x%02x\n",
__func__, ctl_hs, flow_repl);
ret = cp210x_write_reg_block(port, CP210X_SET_FLOW, &flow_ctl,
sizeof(flow_ctl));
} else {
dev_dbg(&port->dev, "%s - control = 0x%04x\n", __func__, control);
ret = cp210x_write_u16_reg(port, CP210X_SET_MHS, control);
}
out_unlock:
mutex_unlock(&port_priv->mutex);
return ret;
}
static void cp210x_dtr_rts(struct usb_serial_port *port, int on)
{
if (on)
cp210x_tiocmset_port(port, TIOCM_DTR | TIOCM_RTS, 0);
else
cp210x_tiocmset_port(port, 0, TIOCM_DTR | TIOCM_RTS);
}
static int cp210x_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
u8 control;
int result;
result = cp210x_read_u8_reg(port, CP210X_GET_MDMSTS, &control);
if (result)
return result;
result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0)
|((control & CONTROL_RTS) ? TIOCM_RTS : 0)
|((control & CONTROL_CTS) ? TIOCM_CTS : 0)
|((control & CONTROL_DSR) ? TIOCM_DSR : 0)
|((control & CONTROL_RING)? TIOCM_RI : 0)
|((control & CONTROL_DCD) ? TIOCM_CD : 0);
dev_dbg(&port->dev, "%s - control = 0x%02x\n", __func__, control);
return result;
}
static int cp210x_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct cp210x_serial_private *priv = usb_get_serial_data(port->serial);
u16 state;
if (priv->partnum == CP210X_PARTNUM_CP2105) {
if (cp210x_interface_num(port->serial) == 1)
return -ENOTTY;
}
if (break_state == 0)
state = BREAK_OFF;
else
state = BREAK_ON;
dev_dbg(&port->dev, "%s - turning break %s\n", __func__,
state == BREAK_OFF ? "off" : "on");
return cp210x_write_u16_reg(port, CP210X_SET_BREAK, state);
}
#ifdef CONFIG_GPIOLIB
static int cp210x_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct usb_serial *serial = gpiochip_get_data(gc);
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
u8 req_type;
u16 mask;
int result;
int len;
result = usb_autopm_get_interface(serial->interface);
if (result)
return result;
switch (priv->partnum) {
case CP210X_PARTNUM_CP2105:
req_type = REQTYPE_INTERFACE_TO_HOST;
len = 1;
break;
case CP210X_PARTNUM_CP2108:
req_type = REQTYPE_INTERFACE_TO_HOST;
len = 2;
break;
default:
req_type = REQTYPE_DEVICE_TO_HOST;
len = 1;
break;
}
mask = 0;
result = cp210x_read_vendor_block(serial, req_type, CP210X_READ_LATCH,
&mask, len);
usb_autopm_put_interface(serial->interface);
if (result < 0)
return result;
le16_to_cpus(&mask);
return !!(mask & BIT(gpio));
}
static void cp210x_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
struct usb_serial *serial = gpiochip_get_data(gc);
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
struct cp210x_gpio_write16 buf16;
struct cp210x_gpio_write buf;
u16 mask, state;
u16 wIndex;
int result;
if (value == 1)
state = BIT(gpio);
else
state = 0;
mask = BIT(gpio);
result = usb_autopm_get_interface(serial->interface);
if (result)
goto out;
switch (priv->partnum) {
case CP210X_PARTNUM_CP2105:
buf.mask = (u8)mask;
buf.state = (u8)state;
result = cp210x_write_vendor_block(serial,
REQTYPE_HOST_TO_INTERFACE,
CP210X_WRITE_LATCH, &buf,
sizeof(buf));
break;
case CP210X_PARTNUM_CP2108:
buf16.mask = cpu_to_le16(mask);
buf16.state = cpu_to_le16(state);
result = cp210x_write_vendor_block(serial,
REQTYPE_HOST_TO_INTERFACE,
CP210X_WRITE_LATCH, &buf16,
sizeof(buf16));
break;
default:
wIndex = state << 8 | mask;
result = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0),
CP210X_VENDOR_SPECIFIC,
REQTYPE_HOST_TO_DEVICE,
CP210X_WRITE_LATCH,
wIndex,
NULL, 0, USB_CTRL_SET_TIMEOUT);
break;
}
usb_autopm_put_interface(serial->interface);
out:
if (result < 0) {
dev_err(&serial->interface->dev, "failed to set GPIO value: %d\n",
result);
}
}
static int cp210x_gpio_direction_get(struct gpio_chip *gc, unsigned int gpio)
{
struct usb_serial *serial = gpiochip_get_data(gc);
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
return priv->gpio_input & BIT(gpio);
}
static int cp210x_gpio_direction_input(struct gpio_chip *gc, unsigned int gpio)
{
struct usb_serial *serial = gpiochip_get_data(gc);
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
if (priv->partnum == CP210X_PARTNUM_CP2105) {
/* hardware does not support an input mode */
return -ENOTSUPP;
}
/* push-pull pins cannot be changed to be inputs */
if (priv->gpio_pushpull & BIT(gpio))
return -EINVAL;
/* make sure to release pin if it is being driven low */
cp210x_gpio_set(gc, gpio, 1);
priv->gpio_input |= BIT(gpio);
return 0;
}
static int cp210x_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio,
int value)
{
struct usb_serial *serial = gpiochip_get_data(gc);
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
priv->gpio_input &= ~BIT(gpio);
cp210x_gpio_set(gc, gpio, value);
return 0;
}
static int cp210x_gpio_set_config(struct gpio_chip *gc, unsigned int gpio,
unsigned long config)
{
struct usb_serial *serial = gpiochip_get_data(gc);
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
enum pin_config_param param = pinconf_to_config_param(config);
/* Succeed only if in correct mode (this can't be set at runtime) */
if ((param == PIN_CONFIG_DRIVE_PUSH_PULL) &&
(priv->gpio_pushpull & BIT(gpio)))
return 0;
if ((param == PIN_CONFIG_DRIVE_OPEN_DRAIN) &&
!(priv->gpio_pushpull & BIT(gpio)))
return 0;
return -ENOTSUPP;
}
static int cp210x_gpio_init_valid_mask(struct gpio_chip *gc,
unsigned long *valid_mask, unsigned int ngpios)
{
struct usb_serial *serial = gpiochip_get_data(gc);
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
struct device *dev = &serial->interface->dev;
unsigned long altfunc_mask = priv->gpio_altfunc;
bitmap_complement(valid_mask, &altfunc_mask, ngpios);
if (bitmap_empty(valid_mask, ngpios))
dev_dbg(dev, "no pin configured for GPIO\n");
else
dev_dbg(dev, "GPIO.%*pbl configured for GPIO\n", ngpios,
valid_mask);
return 0;
}
/*
* This function is for configuring GPIO using shared pins, where other signals
* are made unavailable by configuring the use of GPIO. This is believed to be
* only applicable to the cp2105 at this point, the other devices supported by
* this driver that provide GPIO do so in a way that does not impact other
* signals and are thus expected to have very different initialisation.
*/
static int cp2105_gpioconf_init(struct usb_serial *serial)
{
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
struct cp210x_pin_mode mode;
struct cp210x_dual_port_config config;
u8 intf_num = cp210x_interface_num(serial);
u8 iface_config;
int result;
result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
CP210X_GET_DEVICEMODE, &mode,
sizeof(mode));
if (result < 0)
return result;
result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
CP210X_GET_PORTCONFIG, &config,
sizeof(config));
if (result < 0)
return result;
/* 2 banks of GPIO - One for the pins taken from each serial port */
if (intf_num == 0) {
priv->gc.ngpio = 2;
if (mode.eci == CP210X_PIN_MODE_MODEM) {
/* mark all GPIOs of this interface as reserved */
priv->gpio_altfunc = 0xff;
return 0;
}
iface_config = config.eci_cfg;
priv->gpio_pushpull = (u8)((le16_to_cpu(config.gpio_mode) &
CP210X_ECI_GPIO_MODE_MASK) >>
CP210X_ECI_GPIO_MODE_OFFSET);
} else if (intf_num == 1) {
priv->gc.ngpio = 3;
if (mode.sci == CP210X_PIN_MODE_MODEM) {
/* mark all GPIOs of this interface as reserved */
priv->gpio_altfunc = 0xff;
return 0;
}
iface_config = config.sci_cfg;
priv->gpio_pushpull = (u8)((le16_to_cpu(config.gpio_mode) &
CP210X_SCI_GPIO_MODE_MASK) >>
CP210X_SCI_GPIO_MODE_OFFSET);
} else {
return -ENODEV;
}
/* mark all pins which are not in GPIO mode */
if (iface_config & CP2105_GPIO0_TXLED_MODE) /* GPIO 0 */
priv->gpio_altfunc |= BIT(0);
if (iface_config & (CP2105_GPIO1_RXLED_MODE | /* GPIO 1 */
CP2105_GPIO1_RS485_MODE))
priv->gpio_altfunc |= BIT(1);
/* driver implementation for CP2105 only supports outputs */
priv->gpio_input = 0;
return 0;
}
static int cp2104_gpioconf_init(struct usb_serial *serial)
{
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
struct cp210x_single_port_config config;
u8 iface_config;
u8 gpio_latch;
int result;
u8 i;
result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
CP210X_GET_PORTCONFIG, &config,
sizeof(config));
if (result < 0)
return result;
priv->gc.ngpio = 4;
iface_config = config.device_cfg;
priv->gpio_pushpull = (u8)((le16_to_cpu(config.gpio_mode) &
CP210X_GPIO_MODE_MASK) >>
CP210X_GPIO_MODE_OFFSET);
gpio_latch = (u8)((le16_to_cpu(config.reset_state) &
CP210X_GPIO_MODE_MASK) >>
CP210X_GPIO_MODE_OFFSET);
/* mark all pins which are not in GPIO mode */
if (iface_config & CP2104_GPIO0_TXLED_MODE) /* GPIO 0 */
priv->gpio_altfunc |= BIT(0);
if (iface_config & CP2104_GPIO1_RXLED_MODE) /* GPIO 1 */
priv->gpio_altfunc |= BIT(1);
if (iface_config & CP2104_GPIO2_RS485_MODE) /* GPIO 2 */
priv->gpio_altfunc |= BIT(2);
/*
* Like CP2102N, CP2104 has also no strict input and output pin
* modes.
* Do the same input mode emulation as CP2102N.
*/
for (i = 0; i < priv->gc.ngpio; ++i) {
/*
* Set direction to "input" iff pin is open-drain and reset
* value is 1.
*/
if (!(priv->gpio_pushpull & BIT(i)) && (gpio_latch & BIT(i)))
priv->gpio_input |= BIT(i);
}
return 0;
}
static int cp2108_gpio_init(struct usb_serial *serial)
{
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
struct cp210x_quad_port_config config;
u16 gpio_latch;
int result;
u8 i;
result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
CP210X_GET_PORTCONFIG, &config,
sizeof(config));
if (result < 0)
return result;
priv->gc.ngpio = 16;
priv->gpio_pushpull = le16_to_cpu(config.reset_state.gpio_mode_pb1);
gpio_latch = le16_to_cpu(config.reset_state.gpio_latch_pb1);
/*
* Mark all pins which are not in GPIO mode.
*
* Refer to table 9.1 "GPIO Mode alternate Functions" in the datasheet:
* https://www.silabs.com/documents/public/data-sheets/cp2108-datasheet.pdf
*
* Alternate functions of GPIO0 to GPIO3 are determine by enhancedfxn_ifc[0]
* and the similarly for the other pins; enhancedfxn_ifc[1]: GPIO4 to GPIO7,
* enhancedfxn_ifc[2]: GPIO8 to GPIO11, enhancedfxn_ifc[3]: GPIO12 to GPIO15.
*/
for (i = 0; i < 4; i++) {
if (config.enhancedfxn_ifc[i] & CP2108_EF_IFC_GPIO_TXLED)
priv->gpio_altfunc |= BIT(i * 4);
if (config.enhancedfxn_ifc[i] & CP2108_EF_IFC_GPIO_RXLED)
priv->gpio_altfunc |= BIT((i * 4) + 1);
if (config.enhancedfxn_ifc[i] & CP2108_EF_IFC_GPIO_RS485)
priv->gpio_altfunc |= BIT((i * 4) + 2);
if (config.enhancedfxn_ifc[i] & CP2108_EF_IFC_GPIO_CLOCK)
priv->gpio_altfunc |= BIT((i * 4) + 3);
}
/*
* Like CP2102N, CP2108 has also no strict input and output pin
* modes. Do the same input mode emulation as CP2102N.
*/
for (i = 0; i < priv->gc.ngpio; ++i) {
/*
* Set direction to "input" iff pin is open-drain and reset
* value is 1.
*/
if (!(priv->gpio_pushpull & BIT(i)) && (gpio_latch & BIT(i)))
priv->gpio_input |= BIT(i);
}
return 0;
}
static int cp2102n_gpioconf_init(struct usb_serial *serial)
{
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
const u16 config_size = 0x02a6;
u8 gpio_rst_latch;
u8 config_version;
u8 gpio_pushpull;
u8 *config_buf;
u8 gpio_latch;
u8 gpio_ctrl;
int result;
u8 i;
/*
* Retrieve device configuration from the device.
* The array received contains all customization settings done at the
* factory/manufacturer. Format of the array is documented at the
* time of writing at:
* https://www.silabs.com/community/interface/knowledge-base.entry.html/2017/03/31/cp2102n_setconfig-xsfa
*/
config_buf = kmalloc(config_size, GFP_KERNEL);
if (!config_buf)
return -ENOMEM;
result = cp210x_read_vendor_block(serial,
REQTYPE_DEVICE_TO_HOST,
CP210X_READ_2NCONFIG,
config_buf,
config_size);
if (result < 0) {
kfree(config_buf);
return result;
}
config_version = config_buf[CP210X_2NCONFIG_CONFIG_VERSION_IDX];
gpio_pushpull = config_buf[CP210X_2NCONFIG_GPIO_MODE_IDX];
gpio_ctrl = config_buf[CP210X_2NCONFIG_GPIO_CONTROL_IDX];
gpio_rst_latch = config_buf[CP210X_2NCONFIG_GPIO_RSTLATCH_IDX];
kfree(config_buf);
/* Make sure this is a config format we understand. */
if (config_version != 0x01)
return -ENOTSUPP;
priv->gc.ngpio = 4;
/*
* Get default pin states after reset. Needed so we can determine
* the direction of an open-drain pin.
*/
gpio_latch = (gpio_rst_latch >> 3) & 0x0f;
/* 0 indicates open-drain mode, 1 is push-pull */
priv->gpio_pushpull = (gpio_pushpull >> 3) & 0x0f;
/* 0 indicates GPIO mode, 1 is alternate function */
if (priv->partnum == CP210X_PARTNUM_CP2102N_QFN20) {
/* QFN20 is special... */
if (gpio_ctrl & CP2102N_QFN20_GPIO0_CLK_MODE) /* GPIO 0 */
priv->gpio_altfunc |= BIT(0);
if (gpio_ctrl & CP2102N_QFN20_GPIO1_RS485_MODE) /* GPIO 1 */
priv->gpio_altfunc |= BIT(1);
if (gpio_ctrl & CP2102N_QFN20_GPIO2_TXLED_MODE) /* GPIO 2 */
priv->gpio_altfunc |= BIT(2);
if (gpio_ctrl & CP2102N_QFN20_GPIO3_RXLED_MODE) /* GPIO 3 */
priv->gpio_altfunc |= BIT(3);
} else {
priv->gpio_altfunc = (gpio_ctrl >> 2) & 0x0f;
}
if (priv->partnum == CP210X_PARTNUM_CP2102N_QFN28) {
/*
* For the QFN28 package, GPIO4-6 are controlled by
* the low three bits of the mode/latch fields.
* Contrary to the document linked above, the bits for
* the SUSPEND pins are elsewhere. No alternate
* function is available for these pins.
*/
priv->gc.ngpio = 7;
gpio_latch |= (gpio_rst_latch & 7) << 4;
priv->gpio_pushpull |= (gpio_pushpull & 7) << 4;
}
/*
* The CP2102N does not strictly has input and output pin modes,
* it only knows open-drain and push-pull modes which is set at
* factory. An open-drain pin can function both as an
* input or an output. We emulate input mode for open-drain pins
* by making sure they are not driven low, and we do not allow
* push-pull pins to be set as an input.
*/
for (i = 0; i < priv->gc.ngpio; ++i) {
/*
* Set direction to "input" iff pin is open-drain and reset
* value is 1.
*/
if (!(priv->gpio_pushpull & BIT(i)) && (gpio_latch & BIT(i)))
priv->gpio_input |= BIT(i);
}
return 0;
}
static int cp210x_gpio_init(struct usb_serial *serial)
{
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
int result;
switch (priv->partnum) {
case CP210X_PARTNUM_CP2104:
result = cp2104_gpioconf_init(serial);
break;
case CP210X_PARTNUM_CP2105:
result = cp2105_gpioconf_init(serial);
break;
case CP210X_PARTNUM_CP2108:
/*
* The GPIOs are not tied to any specific port so only register
* once for interface 0.
*/
if (cp210x_interface_num(serial) != 0)
return 0;
result = cp2108_gpio_init(serial);
break;
case CP210X_PARTNUM_CP2102N_QFN28:
case CP210X_PARTNUM_CP2102N_QFN24:
case CP210X_PARTNUM_CP2102N_QFN20:
result = cp2102n_gpioconf_init(serial);
break;
default:
return 0;
}
if (result < 0)
return result;
priv->gc.label = "cp210x";
priv->gc.get_direction = cp210x_gpio_direction_get;
priv->gc.direction_input = cp210x_gpio_direction_input;
priv->gc.direction_output = cp210x_gpio_direction_output;
priv->gc.get = cp210x_gpio_get;
priv->gc.set = cp210x_gpio_set;
priv->gc.set_config = cp210x_gpio_set_config;
priv->gc.init_valid_mask = cp210x_gpio_init_valid_mask;
priv->gc.owner = THIS_MODULE;
priv->gc.parent = &serial->interface->dev;
priv->gc.base = -1;
priv->gc.can_sleep = true;
result = gpiochip_add_data(&priv->gc, serial);
if (!result)
priv->gpio_registered = true;
return result;
}
static void cp210x_gpio_remove(struct usb_serial *serial)
{
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
if (priv->gpio_registered) {
gpiochip_remove(&priv->gc);
priv->gpio_registered = false;
}
}
#else
static int cp210x_gpio_init(struct usb_serial *serial)
{
return 0;
}
static void cp210x_gpio_remove(struct usb_serial *serial)
{
/* Nothing to do */
}
#endif
static int cp210x_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct cp210x_port_private *port_priv;
port_priv = kzalloc(sizeof(*port_priv), GFP_KERNEL);
if (!port_priv)
return -ENOMEM;
port_priv->bInterfaceNumber = cp210x_interface_num(serial);
mutex_init(&port_priv->mutex);
usb_set_serial_port_data(port, port_priv);
return 0;
}
static void cp210x_port_remove(struct usb_serial_port *port)
{
struct cp210x_port_private *port_priv;
port_priv = usb_get_serial_port_data(port);
kfree(port_priv);
}
static void cp210x_init_max_speed(struct usb_serial *serial)
{
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
bool use_actual_rate = false;
speed_t min = 300;
speed_t max;
switch (priv->partnum) {
case CP210X_PARTNUM_CP2101:
max = 921600;
break;
case CP210X_PARTNUM_CP2102:
case CP210X_PARTNUM_CP2103:
max = 1000000;
break;
case CP210X_PARTNUM_CP2104:
use_actual_rate = true;
max = 2000000;
break;
case CP210X_PARTNUM_CP2108:
max = 2000000;
break;
case CP210X_PARTNUM_CP2105:
if (cp210x_interface_num(serial) == 0) {
use_actual_rate = true;
max = 2000000; /* ECI */
} else {
min = 2400;
max = 921600; /* SCI */
}
break;
case CP210X_PARTNUM_CP2102N_QFN28:
case CP210X_PARTNUM_CP2102N_QFN24:
case CP210X_PARTNUM_CP2102N_QFN20:
use_actual_rate = true;
max = 3000000;
break;
default:
max = 2000000;
break;
}
priv->min_speed = min;
priv->max_speed = max;
priv->use_actual_rate = use_actual_rate;
}
static void cp2102_determine_quirks(struct usb_serial *serial)
{
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
u8 *buf;
int ret;
buf = kmalloc(2, GFP_KERNEL);
if (!buf)
return;
/*
* Some (possibly counterfeit) CP2102 do not support event-insertion
* mode and respond differently to malformed vendor requests.
* Specifically, they return one instead of two bytes when sent a
* two-byte part-number request.
*/
ret = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
CP210X_VENDOR_SPECIFIC, REQTYPE_DEVICE_TO_HOST,
CP210X_GET_PARTNUM, 0, buf, 2, USB_CTRL_GET_TIMEOUT);
if (ret == 1) {
dev_dbg(&serial->interface->dev,
"device does not support event-insertion mode\n");
priv->no_event_mode = true;
}
kfree(buf);
}
static int cp210x_get_fw_version(struct usb_serial *serial, u16 value)
{
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
u8 ver[3];
int ret;
ret = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, value,
ver, sizeof(ver));
if (ret)
return ret;
dev_dbg(&serial->interface->dev, "%s - %d.%d.%d\n", __func__,
ver[0], ver[1], ver[2]);
priv->fw_version = ver[0] << 16 | ver[1] << 8 | ver[2];
return 0;
}
static void cp210x_determine_type(struct usb_serial *serial)
{
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
int ret;
ret = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
CP210X_GET_PARTNUM, &priv->partnum,
sizeof(priv->partnum));
if (ret < 0) {
dev_warn(&serial->interface->dev,
"querying part number failed\n");
priv->partnum = CP210X_PARTNUM_UNKNOWN;
return;
}
dev_dbg(&serial->interface->dev, "partnum = 0x%02x\n", priv->partnum);
switch (priv->partnum) {
case CP210X_PARTNUM_CP2102:
cp2102_determine_quirks(serial);
break;
case CP210X_PARTNUM_CP2105:
case CP210X_PARTNUM_CP2108:
cp210x_get_fw_version(serial, CP210X_GET_FW_VER);
break;
case CP210X_PARTNUM_CP2102N_QFN28:
case CP210X_PARTNUM_CP2102N_QFN24:
case CP210X_PARTNUM_CP2102N_QFN20:
ret = cp210x_get_fw_version(serial, CP210X_GET_FW_VER_2N);
if (ret)
break;
if (priv->fw_version <= 0x10004)
priv->no_flow_control = true;
break;
default:
break;
}
}
static int cp210x_attach(struct usb_serial *serial)
{
int result;
struct cp210x_serial_private *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
usb_set_serial_data(serial, priv);
cp210x_determine_type(serial);
cp210x_init_max_speed(serial);
result = cp210x_gpio_init(serial);
if (result < 0) {
dev_err(&serial->interface->dev, "GPIO initialisation failed: %d\n",
result);
}
return 0;
}
static void cp210x_disconnect(struct usb_serial *serial)
{
cp210x_gpio_remove(serial);
}
static void cp210x_release(struct usb_serial *serial)
{
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
cp210x_gpio_remove(serial);
kfree(priv);
}
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/cp210x.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Garmin GPS driver
*
* Copyright (C) 2006-2011 Hermann Kneissel [email protected]
*
* The latest version of the driver can be found at
* http://sourceforge.net/projects/garmin-gps/
*
* This driver has been derived from v2.1 of the visor driver.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/atomic.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
/* the mode to be set when the port ist opened */
static int initial_mode = 1;
#define GARMIN_VENDOR_ID 0x091E
/*
* Version Information
*/
#define VERSION_MAJOR 0
#define VERSION_MINOR 36
#define _STR(s) #s
#define _DRIVER_VERSION(a, b) "v" _STR(a) "." _STR(b)
#define DRIVER_VERSION _DRIVER_VERSION(VERSION_MAJOR, VERSION_MINOR)
#define DRIVER_AUTHOR "hermann kneissel"
#define DRIVER_DESC "garmin gps driver"
/* error codes returned by the driver */
#define EINVPKT 1000 /* invalid packet structure */
/* size of the header of a packet using the usb protocol */
#define GARMIN_PKTHDR_LENGTH 12
/* max. possible size of a packet using the serial protocol */
#define MAX_SERIAL_PKT_SIZ (3 + 255 + 3)
/* max. possible size of a packet with worst case stuffing */
#define MAX_SERIAL_PKT_SIZ_STUFFED (MAX_SERIAL_PKT_SIZ + 256)
/* size of a buffer able to hold a complete (no stuffing) packet
* (the document protocol does not contain packets with a larger
* size, but in theory a packet may be 64k+12 bytes - if in
* later protocol versions larger packet sizes occur, this value
* should be increased accordingly, so the input buffer is always
* large enough the store a complete packet inclusive header) */
#define GPS_IN_BUFSIZ (GARMIN_PKTHDR_LENGTH+MAX_SERIAL_PKT_SIZ)
/* size of a buffer able to hold a complete (incl. stuffing) packet */
#define GPS_OUT_BUFSIZ (GARMIN_PKTHDR_LENGTH+MAX_SERIAL_PKT_SIZ_STUFFED)
/* where to place the packet id of a serial packet, so we can
* prepend the usb-packet header without the need to move the
* packets data */
#define GSP_INITIAL_OFFSET (GARMIN_PKTHDR_LENGTH-2)
/* max. size of incoming private packets (header+1 param) */
#define PRIVPKTSIZ (GARMIN_PKTHDR_LENGTH+4)
#define GARMIN_LAYERID_TRANSPORT 0
#define GARMIN_LAYERID_APPL 20
/* our own layer-id to use for some control mechanisms */
#define GARMIN_LAYERID_PRIVATE 0x01106E4B
#define GARMIN_PKTID_PVT_DATA 51
#define GARMIN_PKTID_L001_COMMAND_DATA 10
#define CMND_ABORT_TRANSFER 0
/* packet ids used in private layer */
#define PRIV_PKTID_SET_DEBUG 1
#define PRIV_PKTID_SET_MODE 2
#define PRIV_PKTID_INFO_REQ 3
#define PRIV_PKTID_INFO_RESP 4
#define PRIV_PKTID_RESET_REQ 5
#define PRIV_PKTID_SET_DEF_MODE 6
#define ETX 0x03
#define DLE 0x10
#define ACK 0x06
#define NAK 0x15
/* structure used to queue incoming packets */
struct garmin_packet {
struct list_head list;
int seq;
/* the real size of the data array, always > 0 */
int size;
__u8 data[];
};
/* structure used to keep the current state of the driver */
struct garmin_data {
__u8 state;
__u16 flags;
__u8 mode;
__u8 count;
__u8 pkt_id;
__u32 serial_num;
struct timer_list timer;
struct usb_serial_port *port;
int seq_counter;
int insize;
int outsize;
__u8 inbuffer [GPS_IN_BUFSIZ]; /* tty -> usb */
__u8 outbuffer[GPS_OUT_BUFSIZ]; /* usb -> tty */
__u8 privpkt[4*6];
spinlock_t lock;
struct list_head pktlist;
struct usb_anchor write_urbs;
};
#define STATE_NEW 0
#define STATE_INITIAL_DELAY 1
#define STATE_TIMEOUT 2
#define STATE_SESSION_REQ1 3
#define STATE_SESSION_REQ2 4
#define STATE_ACTIVE 5
#define STATE_RESET 8
#define STATE_DISCONNECTED 9
#define STATE_WAIT_TTY_ACK 10
#define STATE_GSP_WAIT_DATA 11
#define MODE_NATIVE 0
#define MODE_GARMIN_SERIAL 1
/* Flags used in garmin_data.flags: */
#define FLAGS_SESSION_REPLY_MASK 0x00C0
#define FLAGS_SESSION_REPLY1_SEEN 0x0080
#define FLAGS_SESSION_REPLY2_SEEN 0x0040
#define FLAGS_BULK_IN_ACTIVE 0x0020
#define FLAGS_BULK_IN_RESTART 0x0010
#define FLAGS_THROTTLED 0x0008
#define APP_REQ_SEEN 0x0004
#define APP_RESP_SEEN 0x0002
#define CLEAR_HALT_REQUIRED 0x0001
#define FLAGS_QUEUING 0x0100
#define FLAGS_DROP_DATA 0x0800
#define FLAGS_GSP_SKIP 0x1000
#define FLAGS_GSP_DLESEEN 0x2000
/* function prototypes */
static int gsp_next_packet(struct garmin_data *garmin_data_p);
static int garmin_write_bulk(struct usb_serial_port *port,
const unsigned char *buf, int count,
int dismiss_ack);
/* some special packets to be send or received */
static unsigned char const GARMIN_START_SESSION_REQ[]
= { 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0 };
static unsigned char const GARMIN_START_SESSION_REPLY[]
= { 0, 0, 0, 0, 6, 0, 0, 0, 4, 0, 0, 0 };
static unsigned char const GARMIN_BULK_IN_AVAIL_REPLY[]
= { 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 };
static unsigned char const GARMIN_STOP_TRANSFER_REQ[]
= { 20, 0, 0, 0, 10, 0, 0, 0, 2, 0, 0, 0, 0, 0 };
static unsigned char const GARMIN_STOP_TRANSFER_REQ_V2[]
= { 20, 0, 0, 0, 10, 0, 0, 0, 1, 0, 0, 0, 0 };
/* packets currently unused, left as documentation */
#if 0
static unsigned char const GARMIN_APP_LAYER_REPLY[]
= { 0x14, 0, 0, 0 };
static unsigned char const GARMIN_START_PVT_REQ[]
= { 20, 0, 0, 0, 10, 0, 0, 0, 2, 0, 0, 0, 49, 0 };
static unsigned char const GARMIN_STOP_PVT_REQ[]
= { 20, 0, 0, 0, 10, 0, 0, 0, 2, 0, 0, 0, 50, 0 };
static unsigned char const PRIVATE_REQ[]
= { 0x4B, 0x6E, 0x10, 0x01, 0xFF, 0, 0, 0, 0xFF, 0, 0, 0 };
#endif
static const struct usb_device_id id_table[] = {
/* the same device id seems to be used by all
usb enabled GPS devices */
{ USB_DEVICE(GARMIN_VENDOR_ID, 3) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
static inline int getLayerId(const __u8 *usbPacket)
{
return __le32_to_cpup((__le32 *)(usbPacket));
}
static inline int getPacketId(const __u8 *usbPacket)
{
return __le32_to_cpup((__le32 *)(usbPacket+4));
}
static inline int getDataLength(const __u8 *usbPacket)
{
return __le32_to_cpup((__le32 *)(usbPacket+8));
}
/*
* check if the usb-packet in buf contains an abort-transfer command.
* (if yes, all queued data will be dropped)
*/
static inline int isAbortTrfCmnd(const unsigned char *buf)
{
if (memcmp(buf, GARMIN_STOP_TRANSFER_REQ,
sizeof(GARMIN_STOP_TRANSFER_REQ)) == 0 ||
memcmp(buf, GARMIN_STOP_TRANSFER_REQ_V2,
sizeof(GARMIN_STOP_TRANSFER_REQ_V2)) == 0)
return 1;
else
return 0;
}
static void send_to_tty(struct usb_serial_port *port,
char *data, unsigned int actual_length)
{
if (actual_length) {
usb_serial_debug_data(&port->dev, __func__, actual_length, data);
tty_insert_flip_string(&port->port, data, actual_length);
tty_flip_buffer_push(&port->port);
}
}
/******************************************************************************
* packet queue handling
******************************************************************************/
/*
* queue a received (usb-)packet for later processing
*/
static int pkt_add(struct garmin_data *garmin_data_p,
unsigned char *data, unsigned int data_length)
{
int state = 0;
int result = 0;
unsigned long flags;
struct garmin_packet *pkt;
/* process only packets containing data ... */
if (data_length) {
pkt = kmalloc(sizeof(struct garmin_packet)+data_length,
GFP_ATOMIC);
if (!pkt)
return 0;
pkt->size = data_length;
memcpy(pkt->data, data, data_length);
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= FLAGS_QUEUING;
result = list_empty(&garmin_data_p->pktlist);
pkt->seq = garmin_data_p->seq_counter++;
list_add_tail(&pkt->list, &garmin_data_p->pktlist);
state = garmin_data_p->state;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
dev_dbg(&garmin_data_p->port->dev,
"%s - added: pkt: %d - %d bytes\n", __func__,
pkt->seq, data_length);
/* in serial mode, if someone is waiting for data from
the device, convert and send the next packet to tty. */
if (result && (state == STATE_GSP_WAIT_DATA))
gsp_next_packet(garmin_data_p);
}
return result;
}
/* get the next pending packet */
static struct garmin_packet *pkt_pop(struct garmin_data *garmin_data_p)
{
unsigned long flags;
struct garmin_packet *result = NULL;
spin_lock_irqsave(&garmin_data_p->lock, flags);
if (!list_empty(&garmin_data_p->pktlist)) {
result = (struct garmin_packet *)garmin_data_p->pktlist.next;
list_del(&result->list);
}
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
return result;
}
/* free up all queued data */
static void pkt_clear(struct garmin_data *garmin_data_p)
{
unsigned long flags;
struct garmin_packet *result = NULL;
spin_lock_irqsave(&garmin_data_p->lock, flags);
while (!list_empty(&garmin_data_p->pktlist)) {
result = (struct garmin_packet *)garmin_data_p->pktlist.next;
list_del(&result->list);
kfree(result);
}
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
}
/******************************************************************************
* garmin serial protocol handling handling
******************************************************************************/
/* send an ack packet back to the tty */
static int gsp_send_ack(struct garmin_data *garmin_data_p, __u8 pkt_id)
{
__u8 pkt[10];
__u8 cksum = 0;
__u8 *ptr = pkt;
unsigned l = 0;
dev_dbg(&garmin_data_p->port->dev, "%s - pkt-id: 0x%X.\n", __func__,
pkt_id);
*ptr++ = DLE;
*ptr++ = ACK;
cksum += ACK;
*ptr++ = 2;
cksum += 2;
*ptr++ = pkt_id;
cksum += pkt_id;
if (pkt_id == DLE)
*ptr++ = DLE;
*ptr++ = 0;
*ptr++ = (-cksum) & 0xFF;
*ptr++ = DLE;
*ptr++ = ETX;
l = ptr-pkt;
send_to_tty(garmin_data_p->port, pkt, l);
return 0;
}
/*
* called for a complete packet received from tty layer
*
* the complete packet (pktid ... cksum) is in garmin_data_p->inbuf starting
* at GSP_INITIAL_OFFSET.
*
* count - number of bytes in the input buffer including space reserved for
* the usb header: GSP_INITIAL_OFFSET + number of bytes in packet
* (including pkt-id, data-length a. cksum)
*/
static int gsp_rec_packet(struct garmin_data *garmin_data_p, int count)
{
struct device *dev = &garmin_data_p->port->dev;
unsigned long flags;
const __u8 *recpkt = garmin_data_p->inbuffer+GSP_INITIAL_OFFSET;
__le32 *usbdata = (__le32 *) garmin_data_p->inbuffer;
int cksum = 0;
int n = 0;
int pktid = recpkt[0];
int size = recpkt[1];
usb_serial_debug_data(&garmin_data_p->port->dev, __func__,
count-GSP_INITIAL_OFFSET, recpkt);
if (size != (count-GSP_INITIAL_OFFSET-3)) {
dev_dbg(dev, "%s - invalid size, expected %d bytes, got %d\n",
__func__, size, (count-GSP_INITIAL_OFFSET-3));
return -EINVPKT;
}
cksum += *recpkt++;
cksum += *recpkt++;
/* sanity check, remove after test ... */
if ((__u8 *)&(usbdata[3]) != recpkt) {
dev_dbg(dev, "%s - ptr mismatch %p - %p\n", __func__,
&(usbdata[4]), recpkt);
return -EINVPKT;
}
while (n < size) {
cksum += *recpkt++;
n++;
}
if (((cksum + *recpkt) & 0xff) != 0) {
dev_dbg(dev, "%s - invalid checksum, expected %02x, got %02x\n",
__func__, -cksum & 0xff, *recpkt);
return -EINVPKT;
}
usbdata[0] = __cpu_to_le32(GARMIN_LAYERID_APPL);
usbdata[1] = __cpu_to_le32(pktid);
usbdata[2] = __cpu_to_le32(size);
garmin_write_bulk(garmin_data_p->port, garmin_data_p->inbuffer,
GARMIN_PKTHDR_LENGTH+size, 0);
/* if this was an abort-transfer command, flush all
queued data. */
if (isAbortTrfCmnd(garmin_data_p->inbuffer)) {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= FLAGS_DROP_DATA;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
pkt_clear(garmin_data_p);
}
return count;
}
/*
* Called for data received from tty
*
* buf contains the data read, it may span more than one packet or even
* incomplete packets
*
* input record should be a serial-record, but it may not be complete.
* Copy it into our local buffer, until an etx is seen (or an error
* occurs).
* Once the record is complete, convert into a usb packet and send it
* to the bulk pipe, send an ack back to the tty.
*
* If the input is an ack, just send the last queued packet to the
* tty layer.
*
* if the input is an abort command, drop all queued data.
*/
static int gsp_receive(struct garmin_data *garmin_data_p,
const unsigned char *buf, int count)
{
struct device *dev = &garmin_data_p->port->dev;
unsigned long flags;
int offs = 0;
int ack_or_nak_seen = 0;
__u8 *dest;
int size;
/* dleSeen: set if last byte read was a DLE */
int dleSeen;
/* skip: if set, skip incoming data until possible start of
* new packet
*/
int skip;
__u8 data;
spin_lock_irqsave(&garmin_data_p->lock, flags);
dest = garmin_data_p->inbuffer;
size = garmin_data_p->insize;
dleSeen = garmin_data_p->flags & FLAGS_GSP_DLESEEN;
skip = garmin_data_p->flags & FLAGS_GSP_SKIP;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
/* dev_dbg(dev, "%s - dle=%d skip=%d size=%d count=%d\n",
__func__, dleSeen, skip, size, count); */
if (size == 0)
size = GSP_INITIAL_OFFSET;
while (offs < count) {
data = *(buf+offs);
offs++;
if (data == DLE) {
if (skip) { /* start of a new pkt */
skip = 0;
size = GSP_INITIAL_OFFSET;
dleSeen = 1;
} else if (dleSeen) {
dest[size++] = data;
dleSeen = 0;
} else {
dleSeen = 1;
}
} else if (data == ETX) {
if (dleSeen) {
/* packet complete */
data = dest[GSP_INITIAL_OFFSET];
if (data == ACK) {
ack_or_nak_seen = ACK;
dev_dbg(dev, "ACK packet complete.\n");
} else if (data == NAK) {
ack_or_nak_seen = NAK;
dev_dbg(dev, "NAK packet complete.\n");
} else {
dev_dbg(dev, "packet complete - id=0x%X.\n",
data);
gsp_rec_packet(garmin_data_p, size);
}
skip = 1;
size = GSP_INITIAL_OFFSET;
dleSeen = 0;
} else {
dest[size++] = data;
}
} else if (!skip) {
if (dleSeen) {
size = GSP_INITIAL_OFFSET;
dleSeen = 0;
}
dest[size++] = data;
}
if (size >= GPS_IN_BUFSIZ) {
dev_dbg(dev, "%s - packet too large.\n", __func__);
skip = 1;
size = GSP_INITIAL_OFFSET;
dleSeen = 0;
}
}
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->insize = size;
/* copy flags back to structure */
if (skip)
garmin_data_p->flags |= FLAGS_GSP_SKIP;
else
garmin_data_p->flags &= ~FLAGS_GSP_SKIP;
if (dleSeen)
garmin_data_p->flags |= FLAGS_GSP_DLESEEN;
else
garmin_data_p->flags &= ~FLAGS_GSP_DLESEEN;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
if (ack_or_nak_seen) {
if (gsp_next_packet(garmin_data_p) > 0)
garmin_data_p->state = STATE_ACTIVE;
else
garmin_data_p->state = STATE_GSP_WAIT_DATA;
}
return count;
}
/*
* Sends a usb packet to the tty
*
* Assumes, that all packages and at an usb-packet boundary.
*
* return <0 on error, 0 if packet is incomplete or > 0 if packet was sent
*/
static int gsp_send(struct garmin_data *garmin_data_p,
const unsigned char *buf, int count)
{
struct device *dev = &garmin_data_p->port->dev;
const unsigned char *src;
unsigned char *dst;
int pktid = 0;
int datalen = 0;
int cksum = 0;
int i = 0;
int k;
dev_dbg(dev, "%s - state %d - %d bytes.\n", __func__,
garmin_data_p->state, count);
k = garmin_data_p->outsize;
if ((k+count) > GPS_OUT_BUFSIZ) {
dev_dbg(dev, "packet too large\n");
garmin_data_p->outsize = 0;
return -4;
}
memcpy(garmin_data_p->outbuffer+k, buf, count);
k += count;
garmin_data_p->outsize = k;
if (k >= GARMIN_PKTHDR_LENGTH) {
pktid = getPacketId(garmin_data_p->outbuffer);
datalen = getDataLength(garmin_data_p->outbuffer);
i = GARMIN_PKTHDR_LENGTH + datalen;
if (k < i)
return 0;
} else {
return 0;
}
dev_dbg(dev, "%s - %d bytes in buffer, %d bytes in pkt.\n", __func__, k, i);
/* garmin_data_p->outbuffer now contains a complete packet */
usb_serial_debug_data(&garmin_data_p->port->dev, __func__, k,
garmin_data_p->outbuffer);
garmin_data_p->outsize = 0;
if (getLayerId(garmin_data_p->outbuffer) != GARMIN_LAYERID_APPL) {
dev_dbg(dev, "not an application packet (%d)\n",
getLayerId(garmin_data_p->outbuffer));
return -1;
}
if (pktid > 255) {
dev_dbg(dev, "packet-id %d too large\n", pktid);
return -2;
}
if (datalen > 255) {
dev_dbg(dev, "packet-size %d too large\n", datalen);
return -3;
}
/* the serial protocol should be able to handle this packet */
k = 0;
src = garmin_data_p->outbuffer+GARMIN_PKTHDR_LENGTH;
for (i = 0; i < datalen; i++) {
if (*src++ == DLE)
k++;
}
src = garmin_data_p->outbuffer+GARMIN_PKTHDR_LENGTH;
if (k > (GARMIN_PKTHDR_LENGTH-2)) {
/* can't add stuffing DLEs in place, move data to end
of buffer ... */
dst = garmin_data_p->outbuffer+GPS_OUT_BUFSIZ-datalen;
memcpy(dst, src, datalen);
src = dst;
}
dst = garmin_data_p->outbuffer;
*dst++ = DLE;
*dst++ = pktid;
cksum += pktid;
*dst++ = datalen;
cksum += datalen;
if (datalen == DLE)
*dst++ = DLE;
for (i = 0; i < datalen; i++) {
__u8 c = *src++;
*dst++ = c;
cksum += c;
if (c == DLE)
*dst++ = DLE;
}
cksum = -cksum & 0xFF;
*dst++ = cksum;
if (cksum == DLE)
*dst++ = DLE;
*dst++ = DLE;
*dst++ = ETX;
i = dst-garmin_data_p->outbuffer;
send_to_tty(garmin_data_p->port, garmin_data_p->outbuffer, i);
garmin_data_p->pkt_id = pktid;
garmin_data_p->state = STATE_WAIT_TTY_ACK;
return i;
}
/*
* Process the next pending data packet - if there is one
*/
static int gsp_next_packet(struct garmin_data *garmin_data_p)
{
int result = 0;
struct garmin_packet *pkt = NULL;
while ((pkt = pkt_pop(garmin_data_p)) != NULL) {
dev_dbg(&garmin_data_p->port->dev, "%s - next pkt: %d\n", __func__, pkt->seq);
result = gsp_send(garmin_data_p, pkt->data, pkt->size);
if (result > 0) {
kfree(pkt);
return result;
}
kfree(pkt);
}
return result;
}
/******************************************************************************
* garmin native mode
******************************************************************************/
/*
* Called for data received from tty
*
* The input data is expected to be in garmin usb-packet format.
*
* buf contains the data read, it may span more than one packet
* or even incomplete packets
*/
static int nat_receive(struct garmin_data *garmin_data_p,
const unsigned char *buf, int count)
{
unsigned long flags;
__u8 *dest;
int offs = 0;
int result = count;
int len;
while (offs < count) {
/* if buffer contains header, copy rest of data */
if (garmin_data_p->insize >= GARMIN_PKTHDR_LENGTH)
len = GARMIN_PKTHDR_LENGTH
+getDataLength(garmin_data_p->inbuffer);
else
len = GARMIN_PKTHDR_LENGTH;
if (len >= GPS_IN_BUFSIZ) {
/* seems to be an invalid packet, ignore rest
of input */
dev_dbg(&garmin_data_p->port->dev,
"%s - packet size too large: %d\n",
__func__, len);
garmin_data_p->insize = 0;
count = 0;
result = -EINVPKT;
} else {
len -= garmin_data_p->insize;
if (len > (count-offs))
len = (count-offs);
if (len > 0) {
dest = garmin_data_p->inbuffer
+ garmin_data_p->insize;
memcpy(dest, buf+offs, len);
garmin_data_p->insize += len;
offs += len;
}
}
/* do we have a complete packet ? */
if (garmin_data_p->insize >= GARMIN_PKTHDR_LENGTH) {
len = GARMIN_PKTHDR_LENGTH+
getDataLength(garmin_data_p->inbuffer);
if (garmin_data_p->insize >= len) {
garmin_write_bulk(garmin_data_p->port,
garmin_data_p->inbuffer,
len, 0);
garmin_data_p->insize = 0;
/* if this was an abort-transfer command,
flush all queued data. */
if (isAbortTrfCmnd(garmin_data_p->inbuffer)) {
spin_lock_irqsave(&garmin_data_p->lock,
flags);
garmin_data_p->flags |= FLAGS_DROP_DATA;
spin_unlock_irqrestore(
&garmin_data_p->lock, flags);
pkt_clear(garmin_data_p);
}
}
}
}
return result;
}
/******************************************************************************
* private packets
******************************************************************************/
static void priv_status_resp(struct usb_serial_port *port)
{
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
__le32 *pkt = (__le32 *)garmin_data_p->privpkt;
pkt[0] = __cpu_to_le32(GARMIN_LAYERID_PRIVATE);
pkt[1] = __cpu_to_le32(PRIV_PKTID_INFO_RESP);
pkt[2] = __cpu_to_le32(12);
pkt[3] = __cpu_to_le32(VERSION_MAJOR << 16 | VERSION_MINOR);
pkt[4] = __cpu_to_le32(garmin_data_p->mode);
pkt[5] = __cpu_to_le32(garmin_data_p->serial_num);
send_to_tty(port, (__u8 *)pkt, 6 * 4);
}
/******************************************************************************
* Garmin specific driver functions
******************************************************************************/
static int process_resetdev_request(struct usb_serial_port *port)
{
unsigned long flags;
int status;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags &= ~(CLEAR_HALT_REQUIRED);
garmin_data_p->state = STATE_RESET;
garmin_data_p->serial_num = 0;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
usb_kill_urb(port->interrupt_in_urb);
dev_dbg(&port->dev, "%s - usb_reset_device\n", __func__);
status = usb_reset_device(port->serial->dev);
if (status)
dev_dbg(&port->dev, "%s - usb_reset_device failed: %d\n",
__func__, status);
return status;
}
/*
* clear all cached data
*/
static int garmin_clear(struct garmin_data *garmin_data_p)
{
unsigned long flags;
/* flush all queued data */
pkt_clear(garmin_data_p);
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->insize = 0;
garmin_data_p->outsize = 0;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
return 0;
}
static int garmin_init_session(struct usb_serial_port *port)
{
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
int status;
int i;
usb_kill_urb(port->interrupt_in_urb);
status = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
status);
return status;
}
/*
* using the initialization method from gpsbabel. See comments in
* gpsbabel/jeeps/gpslibusb.c gusb_reset_toggles()
*/
dev_dbg(&port->dev, "%s - starting session ...\n", __func__);
garmin_data_p->state = STATE_ACTIVE;
for (i = 0; i < 3; i++) {
status = garmin_write_bulk(port, GARMIN_START_SESSION_REQ,
sizeof(GARMIN_START_SESSION_REQ), 0);
if (status < 0)
goto err_kill_urbs;
}
return 0;
err_kill_urbs:
usb_kill_anchored_urbs(&garmin_data_p->write_urbs);
usb_kill_urb(port->interrupt_in_urb);
return status;
}
static int garmin_open(struct tty_struct *tty, struct usb_serial_port *port)
{
unsigned long flags;
int status = 0;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->mode = initial_mode;
garmin_data_p->count = 0;
garmin_data_p->flags &= FLAGS_SESSION_REPLY1_SEEN;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
/* shutdown any bulk reads that might be going on */
usb_kill_urb(port->read_urb);
if (garmin_data_p->state == STATE_RESET)
status = garmin_init_session(port);
garmin_data_p->state = STATE_ACTIVE;
return status;
}
static void garmin_close(struct usb_serial_port *port)
{
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
dev_dbg(&port->dev, "%s - mode=%d state=%d flags=0x%X\n",
__func__, garmin_data_p->mode, garmin_data_p->state,
garmin_data_p->flags);
garmin_clear(garmin_data_p);
/* shutdown our urbs */
usb_kill_urb(port->read_urb);
usb_kill_anchored_urbs(&garmin_data_p->write_urbs);
/* keep reset state so we know that we must start a new session */
if (garmin_data_p->state != STATE_RESET)
garmin_data_p->state = STATE_DISCONNECTED;
}
static void garmin_write_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
if (port) {
struct garmin_data *garmin_data_p =
usb_get_serial_port_data(port);
if (getLayerId(urb->transfer_buffer) == GARMIN_LAYERID_APPL) {
if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
gsp_send_ack(garmin_data_p,
((__u8 *)urb->transfer_buffer)[4]);
}
}
usb_serial_port_softint(port);
}
/* Ignore errors that resulted from garmin_write_bulk with
dismiss_ack = 1 */
/* free up the transfer buffer, as usb_free_urb() does not do this */
kfree(urb->transfer_buffer);
}
static int garmin_write_bulk(struct usb_serial_port *port,
const unsigned char *buf, int count,
int dismiss_ack)
{
unsigned long flags;
struct usb_serial *serial = port->serial;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
struct urb *urb;
unsigned char *buffer;
int status;
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags &= ~FLAGS_DROP_DATA;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
buffer = kmemdup(buf, count, GFP_ATOMIC);
if (!buffer)
return -ENOMEM;
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
kfree(buffer);
return -ENOMEM;
}
usb_serial_debug_data(&port->dev, __func__, count, buffer);
usb_fill_bulk_urb(urb, serial->dev,
usb_sndbulkpipe(serial->dev,
port->bulk_out_endpointAddress),
buffer, count,
garmin_write_bulk_callback,
dismiss_ack ? NULL : port);
urb->transfer_flags |= URB_ZERO_PACKET;
if (getLayerId(buffer) == GARMIN_LAYERID_APPL) {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= APP_REQ_SEEN;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
pkt_clear(garmin_data_p);
garmin_data_p->state = STATE_GSP_WAIT_DATA;
}
}
/* send it down the pipe */
usb_anchor_urb(urb, &garmin_data_p->write_urbs);
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
dev_err(&port->dev,
"%s - usb_submit_urb(write bulk) failed with status = %d\n",
__func__, status);
count = status;
usb_unanchor_urb(urb);
kfree(buffer);
}
/* we are done with this urb, so let the host driver
* really free it when it is finished with it */
usb_free_urb(urb);
return count;
}
static int garmin_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
struct device *dev = &port->dev;
int pktid, pktsiz, len;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
__le32 *privpkt = (__le32 *)garmin_data_p->privpkt;
usb_serial_debug_data(dev, __func__, count, buf);
if (garmin_data_p->state == STATE_RESET)
return -EIO;
/* check for our private packets */
if (count >= GARMIN_PKTHDR_LENGTH) {
len = PRIVPKTSIZ;
if (count < len)
len = count;
memcpy(garmin_data_p->privpkt, buf, len);
pktsiz = getDataLength(garmin_data_p->privpkt);
pktid = getPacketId(garmin_data_p->privpkt);
if (count == (GARMIN_PKTHDR_LENGTH + pktsiz) &&
getLayerId(garmin_data_p->privpkt) ==
GARMIN_LAYERID_PRIVATE) {
dev_dbg(dev, "%s - processing private request %d\n",
__func__, pktid);
/* drop all unfinished transfers */
garmin_clear(garmin_data_p);
switch (pktid) {
case PRIV_PKTID_SET_MODE:
if (pktsiz != 4)
return -EINVPKT;
garmin_data_p->mode = __le32_to_cpu(privpkt[3]);
dev_dbg(dev, "%s - mode set to %d\n",
__func__, garmin_data_p->mode);
break;
case PRIV_PKTID_INFO_REQ:
priv_status_resp(port);
break;
case PRIV_PKTID_RESET_REQ:
process_resetdev_request(port);
break;
case PRIV_PKTID_SET_DEF_MODE:
if (pktsiz != 4)
return -EINVPKT;
initial_mode = __le32_to_cpu(privpkt[3]);
dev_dbg(dev, "%s - initial_mode set to %d\n",
__func__,
garmin_data_p->mode);
break;
}
return count;
}
}
if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
return gsp_receive(garmin_data_p, buf, count);
} else { /* MODE_NATIVE */
return nat_receive(garmin_data_p, buf, count);
}
}
static unsigned int garmin_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
/*
* Report back the bytes currently available in the output buffer.
*/
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
return GPS_OUT_BUFSIZ-garmin_data_p->outsize;
}
static void garmin_read_process(struct garmin_data *garmin_data_p,
unsigned char *data, unsigned data_length,
int bulk_data)
{
unsigned long flags;
if (garmin_data_p->flags & FLAGS_DROP_DATA) {
/* abort-transfer cmd is active */
dev_dbg(&garmin_data_p->port->dev, "%s - pkt dropped\n", __func__);
} else if (garmin_data_p->state != STATE_DISCONNECTED &&
garmin_data_p->state != STATE_RESET) {
/* if throttling is active or postprecessing is required
put the received data in the input queue, otherwise
send it directly to the tty port */
if (garmin_data_p->flags & FLAGS_QUEUING) {
pkt_add(garmin_data_p, data, data_length);
} else if (bulk_data || (data_length >= sizeof(u32) &&
getLayerId(data) == GARMIN_LAYERID_APPL)) {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= APP_RESP_SEEN;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
pkt_add(garmin_data_p, data, data_length);
} else {
send_to_tty(garmin_data_p->port, data,
data_length);
}
}
/* ignore system layer packets ... */
}
}
static void garmin_read_bulk_callback(struct urb *urb)
{
unsigned long flags;
struct usb_serial_port *port = urb->context;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
int retval;
if (status) {
dev_dbg(&urb->dev->dev, "%s - nonzero read bulk status received: %d\n",
__func__, status);
return;
}
usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
garmin_read_process(garmin_data_p, data, urb->actual_length, 1);
if (urb->actual_length == 0 &&
(garmin_data_p->flags & FLAGS_BULK_IN_RESTART) != 0) {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags &= ~FLAGS_BULK_IN_RESTART;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (retval)
dev_err(&port->dev,
"%s - failed resubmitting read urb, error %d\n",
__func__, retval);
} else if (urb->actual_length > 0) {
/* Continue trying to read until nothing more is received */
if ((garmin_data_p->flags & FLAGS_THROTTLED) == 0) {
retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (retval)
dev_err(&port->dev,
"%s - failed resubmitting read urb, error %d\n",
__func__, retval);
}
} else {
dev_dbg(&port->dev, "%s - end of bulk data\n", __func__);
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags &= ~FLAGS_BULK_IN_ACTIVE;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
}
}
static void garmin_read_int_callback(struct urb *urb)
{
unsigned long flags;
int retval;
struct usb_serial_port *port = urb->context;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_dbg(&urb->dev->dev, "%s - nonzero urb status received: %d\n",
__func__, status);
return;
}
usb_serial_debug_data(&port->dev, __func__, urb->actual_length,
urb->transfer_buffer);
if (urb->actual_length == sizeof(GARMIN_BULK_IN_AVAIL_REPLY) &&
memcmp(data, GARMIN_BULK_IN_AVAIL_REPLY,
sizeof(GARMIN_BULK_IN_AVAIL_REPLY)) == 0) {
dev_dbg(&port->dev, "%s - bulk data available.\n", __func__);
if ((garmin_data_p->flags & FLAGS_BULK_IN_ACTIVE) == 0) {
/* bulk data available */
retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (retval) {
dev_err(&port->dev,
"%s - failed submitting read urb, error %d\n",
__func__, retval);
} else {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= FLAGS_BULK_IN_ACTIVE;
spin_unlock_irqrestore(&garmin_data_p->lock,
flags);
}
} else {
/* bulk-in transfer still active */
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= FLAGS_BULK_IN_RESTART;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
}
} else if (urb->actual_length == (4+sizeof(GARMIN_START_SESSION_REPLY))
&& memcmp(data, GARMIN_START_SESSION_REPLY,
sizeof(GARMIN_START_SESSION_REPLY)) == 0) {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= FLAGS_SESSION_REPLY1_SEEN;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
/* save the serial number */
garmin_data_p->serial_num = __le32_to_cpup(
(__le32 *)(data+GARMIN_PKTHDR_LENGTH));
dev_dbg(&port->dev, "%s - start-of-session reply seen - serial %u.\n",
__func__, garmin_data_p->serial_num);
}
garmin_read_process(garmin_data_p, data, urb->actual_length, 0);
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&urb->dev->dev,
"%s - Error %d submitting interrupt urb\n",
__func__, retval);
}
/*
* Sends the next queued packt to the tty port (garmin native mode only)
* and then sets a timer to call itself again until all queued data
* is sent.
*/
static int garmin_flush_queue(struct garmin_data *garmin_data_p)
{
unsigned long flags;
struct garmin_packet *pkt;
if ((garmin_data_p->flags & FLAGS_THROTTLED) == 0) {
pkt = pkt_pop(garmin_data_p);
if (pkt != NULL) {
send_to_tty(garmin_data_p->port, pkt->data, pkt->size);
kfree(pkt);
mod_timer(&garmin_data_p->timer, (1)+jiffies);
} else {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags &= ~FLAGS_QUEUING;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
}
}
return 0;
}
static void garmin_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
/* set flag, data received will be put into a queue
for later processing */
spin_lock_irq(&garmin_data_p->lock);
garmin_data_p->flags |= FLAGS_QUEUING|FLAGS_THROTTLED;
spin_unlock_irq(&garmin_data_p->lock);
}
static void garmin_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
int status;
spin_lock_irq(&garmin_data_p->lock);
garmin_data_p->flags &= ~FLAGS_THROTTLED;
spin_unlock_irq(&garmin_data_p->lock);
/* in native mode send queued data to tty, in
serial mode nothing needs to be done here */
if (garmin_data_p->mode == MODE_NATIVE)
garmin_flush_queue(garmin_data_p);
if ((garmin_data_p->flags & FLAGS_BULK_IN_ACTIVE) != 0) {
status = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (status)
dev_err(&port->dev,
"%s - failed resubmitting read urb, error %d\n",
__func__, status);
}
}
/*
* The timer is currently only used to send queued packets to
* the tty in cases where the protocol provides no own handshaking
* to initiate the transfer.
*/
static void timeout_handler(struct timer_list *t)
{
struct garmin_data *garmin_data_p = from_timer(garmin_data_p, t, timer);
/* send the next queued packet to the tty port */
if (garmin_data_p->mode == MODE_NATIVE)
if (garmin_data_p->flags & FLAGS_QUEUING)
garmin_flush_queue(garmin_data_p);
}
static int garmin_port_probe(struct usb_serial_port *port)
{
int status;
struct garmin_data *garmin_data_p;
garmin_data_p = kzalloc(sizeof(struct garmin_data), GFP_KERNEL);
if (!garmin_data_p)
return -ENOMEM;
timer_setup(&garmin_data_p->timer, timeout_handler, 0);
spin_lock_init(&garmin_data_p->lock);
INIT_LIST_HEAD(&garmin_data_p->pktlist);
garmin_data_p->port = port;
garmin_data_p->state = 0;
garmin_data_p->flags = 0;
garmin_data_p->count = 0;
init_usb_anchor(&garmin_data_p->write_urbs);
usb_set_serial_port_data(port, garmin_data_p);
status = garmin_init_session(port);
if (status)
goto err_free;
return 0;
err_free:
kfree(garmin_data_p);
return status;
}
static void garmin_port_remove(struct usb_serial_port *port)
{
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
usb_kill_anchored_urbs(&garmin_data_p->write_urbs);
usb_kill_urb(port->interrupt_in_urb);
timer_shutdown_sync(&garmin_data_p->timer);
kfree(garmin_data_p);
}
/* All of the device info needed */
static struct usb_serial_driver garmin_device = {
.driver = {
.owner = THIS_MODULE,
.name = "garmin_gps",
},
.description = "Garmin GPS usb/tty",
.id_table = id_table,
.num_ports = 1,
.open = garmin_open,
.close = garmin_close,
.throttle = garmin_throttle,
.unthrottle = garmin_unthrottle,
.port_probe = garmin_port_probe,
.port_remove = garmin_port_remove,
.write = garmin_write,
.write_room = garmin_write_room,
.write_bulk_callback = garmin_write_bulk_callback,
.read_bulk_callback = garmin_read_bulk_callback,
.read_int_callback = garmin_read_int_callback,
};
static struct usb_serial_driver * const serial_drivers[] = {
&garmin_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(initial_mode, int, 0444);
MODULE_PARM_DESC(initial_mode, "Initial mode");
| linux-master | drivers/usb/serial/garmin_gps.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Opticon USB barcode to serial driver
*
* Copyright (C) 2011 - 2012 Johan Hovold <[email protected]>
* Copyright (C) 2011 Martin Jansen <[email protected]>
* Copyright (C) 2008 - 2009 Greg Kroah-Hartman <[email protected]>
* Copyright (C) 2008 - 2009 Novell Inc.
*/
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/slab.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
#define CONTROL_RTS 0x02
#define RESEND_CTS_STATE 0x03
/* max number of write urbs in flight */
#define URB_UPPER_LIMIT 8
/* This driver works for the Opticon 1D barcode reader
* an examples of 1D barcode types are EAN, UPC, Code39, IATA etc.. */
#define DRIVER_DESC "Opticon USB barcode to serial driver (1D)"
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x065a, 0x0009) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
/* This structure holds all of the individual device information */
struct opticon_private {
spinlock_t lock; /* protects the following flags */
bool rts;
bool cts;
int outstanding_urbs;
int outstanding_bytes;
struct usb_anchor anchor;
};
static void opticon_process_data_packet(struct usb_serial_port *port,
const unsigned char *buf, size_t len)
{
tty_insert_flip_string(&port->port, buf, len);
tty_flip_buffer_push(&port->port);
}
static void opticon_process_status_packet(struct usb_serial_port *port,
const unsigned char *buf, size_t len)
{
struct opticon_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
if (buf[0] == 0x00)
priv->cts = false;
else
priv->cts = true;
spin_unlock_irqrestore(&priv->lock, flags);
}
static void opticon_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
const unsigned char *hdr = urb->transfer_buffer;
const unsigned char *data = hdr + 2;
size_t data_len = urb->actual_length - 2;
if (urb->actual_length <= 2) {
dev_dbg(&port->dev, "malformed packet received: %d bytes\n",
urb->actual_length);
return;
}
/*
* Data from the device comes with a 2 byte header:
*
* <0x00><0x00>data...
* This is real data to be sent to the tty layer
* <0x00><0x01>level
* This is a CTS level change, the third byte is the CTS
* value (0 for low, 1 for high).
*/
if ((hdr[0] == 0x00) && (hdr[1] == 0x00)) {
opticon_process_data_packet(port, data, data_len);
} else if ((hdr[0] == 0x00) && (hdr[1] == 0x01)) {
opticon_process_status_packet(port, data, data_len);
} else {
dev_dbg(&port->dev, "unknown packet received: %02x %02x\n",
hdr[0], hdr[1]);
}
}
static int send_control_msg(struct usb_serial_port *port, u8 requesttype,
u8 val)
{
struct usb_serial *serial = port->serial;
int retval;
u8 *buffer;
buffer = kzalloc(1, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
buffer[0] = val;
/* Send the message to the vendor control endpoint
* of the connected device */
retval = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
requesttype,
USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
0, 0, buffer, 1, USB_CTRL_SET_TIMEOUT);
kfree(buffer);
if (retval < 0)
return retval;
return 0;
}
static int opticon_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct opticon_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
int res;
spin_lock_irqsave(&priv->lock, flags);
priv->rts = false;
spin_unlock_irqrestore(&priv->lock, flags);
/* Clear RTS line */
send_control_msg(port, CONTROL_RTS, 0);
/* clear the halt status of the endpoint */
usb_clear_halt(port->serial->dev, port->read_urb->pipe);
res = usb_serial_generic_open(tty, port);
if (res)
return res;
/* Request CTS line state, sometimes during opening the current
* CTS state can be missed. */
send_control_msg(port, RESEND_CTS_STATE, 1);
return res;
}
static void opticon_close(struct usb_serial_port *port)
{
struct opticon_private *priv = usb_get_serial_port_data(port);
usb_kill_anchored_urbs(&priv->anchor);
usb_serial_generic_close(port);
}
static void opticon_write_control_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct opticon_private *priv = usb_get_serial_port_data(port);
int status = urb->status;
unsigned long flags;
/* free up the transfer buffer, as usb_free_urb() does not do this */
kfree(urb->transfer_buffer);
/* setup packet may be set if we're using it for writing */
kfree(urb->setup_packet);
if (status)
dev_dbg(&port->dev,
"%s - non-zero urb status received: %d\n",
__func__, status);
spin_lock_irqsave(&priv->lock, flags);
--priv->outstanding_urbs;
priv->outstanding_bytes -= urb->transfer_buffer_length;
spin_unlock_irqrestore(&priv->lock, flags);
usb_serial_port_softint(port);
}
static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
struct opticon_private *priv = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
struct urb *urb;
unsigned char *buffer;
unsigned long flags;
struct usb_ctrlrequest *dr;
int ret = -ENOMEM;
spin_lock_irqsave(&priv->lock, flags);
if (priv->outstanding_urbs > URB_UPPER_LIMIT) {
spin_unlock_irqrestore(&priv->lock, flags);
dev_dbg(&port->dev, "%s - write limit hit\n", __func__);
return 0;
}
priv->outstanding_urbs++;
priv->outstanding_bytes += count;
spin_unlock_irqrestore(&priv->lock, flags);
buffer = kmemdup(buf, count, GFP_ATOMIC);
if (!buffer)
goto error_no_buffer;
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb)
goto error_no_urb;
usb_serial_debug_data(&port->dev, __func__, count, buffer);
/* The connected devices do not have a bulk write endpoint,
* to transmit data to de barcode device the control endpoint is used */
dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
if (!dr)
goto error_no_dr;
dr->bRequestType = USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT;
dr->bRequest = 0x01;
dr->wValue = 0;
dr->wIndex = 0;
dr->wLength = cpu_to_le16(count);
usb_fill_control_urb(urb, serial->dev,
usb_sndctrlpipe(serial->dev, 0),
(unsigned char *)dr, buffer, count,
opticon_write_control_callback, port);
usb_anchor_urb(urb, &priv->anchor);
/* send it down the pipe */
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret) {
dev_err(&port->dev, "failed to submit write urb: %d\n", ret);
usb_unanchor_urb(urb);
goto error;
}
/* we are done with this urb, so let the host driver
* really free it when it is finished with it */
usb_free_urb(urb);
return count;
error:
kfree(dr);
error_no_dr:
usb_free_urb(urb);
error_no_urb:
kfree(buffer);
error_no_buffer:
spin_lock_irqsave(&priv->lock, flags);
--priv->outstanding_urbs;
priv->outstanding_bytes -= count;
spin_unlock_irqrestore(&priv->lock, flags);
return ret;
}
static unsigned int opticon_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct opticon_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
/*
* We really can take almost anything the user throws at us
* but let's pick a nice big number to tell the tty
* layer that we have lots of free space, unless we don't.
*/
spin_lock_irqsave(&priv->lock, flags);
if (priv->outstanding_urbs > URB_UPPER_LIMIT * 2 / 3) {
spin_unlock_irqrestore(&priv->lock, flags);
dev_dbg(&port->dev, "%s - write limit hit\n", __func__);
return 0;
}
spin_unlock_irqrestore(&priv->lock, flags);
return 2048;
}
static unsigned int opticon_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct opticon_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
unsigned int count;
spin_lock_irqsave(&priv->lock, flags);
count = priv->outstanding_bytes;
spin_unlock_irqrestore(&priv->lock, flags);
return count;
}
static int opticon_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct opticon_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
int result = 0;
spin_lock_irqsave(&priv->lock, flags);
if (priv->rts)
result |= TIOCM_RTS;
if (priv->cts)
result |= TIOCM_CTS;
spin_unlock_irqrestore(&priv->lock, flags);
dev_dbg(&port->dev, "%s - %x\n", __func__, result);
return result;
}
static int opticon_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct opticon_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
bool rts;
bool changed = false;
int ret;
/* We only support RTS so we only handle that */
spin_lock_irqsave(&priv->lock, flags);
rts = priv->rts;
if (set & TIOCM_RTS)
priv->rts = true;
if (clear & TIOCM_RTS)
priv->rts = false;
changed = rts ^ priv->rts;
spin_unlock_irqrestore(&priv->lock, flags);
if (!changed)
return 0;
ret = send_control_msg(port, CONTROL_RTS, !rts);
if (ret)
return usb_translate_errors(ret);
return 0;
}
static int opticon_port_probe(struct usb_serial_port *port)
{
struct opticon_private *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
spin_lock_init(&priv->lock);
init_usb_anchor(&priv->anchor);
usb_set_serial_port_data(port, priv);
return 0;
}
static void opticon_port_remove(struct usb_serial_port *port)
{
struct opticon_private *priv = usb_get_serial_port_data(port);
kfree(priv);
}
static struct usb_serial_driver opticon_device = {
.driver = {
.owner = THIS_MODULE,
.name = "opticon",
},
.id_table = id_table,
.num_ports = 1,
.num_bulk_in = 1,
.bulk_in_size = 256,
.port_probe = opticon_port_probe,
.port_remove = opticon_port_remove,
.open = opticon_open,
.close = opticon_close,
.write = opticon_write,
.write_room = opticon_write_room,
.chars_in_buffer = opticon_chars_in_buffer,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
.tiocmget = opticon_tiocmget,
.tiocmset = opticon_tiocmset,
.process_read_urb = opticon_process_read_urb,
};
static struct usb_serial_driver * const serial_drivers[] = {
&opticon_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/opticon.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* KLSI KL5KUSB105 chip RS232 converter driver
*
* Copyright (C) 2010 Johan Hovold <[email protected]>
* Copyright (C) 2001 Utz-Uwe Haus <[email protected]>
*
* All information about the device was acquired using SniffUSB ans snoopUSB
* on Windows98.
* It was written out of frustration with the PalmConnect USB Serial adapter
* sold by Palm Inc.
* Neither Palm, nor their contractor (MCCI) or their supplier (KLSI) provided
* information that was not already available.
*
* It seems that KLSI bought some silicon-design information from ScanLogic,
* whose SL11R processor is at the core of the KL5KUSB chipset from KLSI.
* KLSI has firmware available for their devices; it is probable that the
* firmware differs from that used by KLSI in their products. If you have an
* original KLSI device and can provide some information on it, I would be
* most interested in adding support for it here. If you have any information
* on the protocol used (or find errors in my reverse-engineered stuff), please
* let me know.
*
* The code was only tested with a PalmConnect USB adapter; if you
* are adventurous, try it with any KLSI-based device and let me know how it
* breaks so that I can fix it!
*/
/* TODO:
* check modem line signals
* implement handshaking or decide that we do not support it
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include "kl5kusb105.h"
#define DRIVER_AUTHOR "Utz-Uwe Haus <[email protected]>, Johan Hovold <[email protected]>"
#define DRIVER_DESC "KLSI KL5KUSB105 chipset USB->Serial Converter driver"
/*
* Function prototypes
*/
static int klsi_105_port_probe(struct usb_serial_port *port);
static void klsi_105_port_remove(struct usb_serial_port *port);
static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port);
static void klsi_105_close(struct usb_serial_port *port);
static void klsi_105_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios);
static int klsi_105_tiocmget(struct tty_struct *tty);
static void klsi_105_process_read_urb(struct urb *urb);
static int klsi_105_prepare_write_buffer(struct usb_serial_port *port,
void *dest, size_t size);
/*
* All of the device info needed for the KLSI converters.
*/
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PALMCONNECT_VID, PALMCONNECT_PID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver kl5kusb105d_device = {
.driver = {
.owner = THIS_MODULE,
.name = "kl5kusb105d",
},
.description = "KL5KUSB105D / PalmConnect",
.id_table = id_table,
.num_ports = 1,
.bulk_out_size = 64,
.open = klsi_105_open,
.close = klsi_105_close,
.set_termios = klsi_105_set_termios,
.tiocmget = klsi_105_tiocmget,
.port_probe = klsi_105_port_probe,
.port_remove = klsi_105_port_remove,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
.process_read_urb = klsi_105_process_read_urb,
.prepare_write_buffer = klsi_105_prepare_write_buffer,
};
static struct usb_serial_driver * const serial_drivers[] = {
&kl5kusb105d_device, NULL
};
struct klsi_105_port_settings {
u8 pktlen; /* always 5, it seems */
u8 baudrate;
u8 databits;
u8 unknown1;
u8 unknown2;
};
struct klsi_105_private {
struct klsi_105_port_settings cfg;
unsigned long line_state; /* modem line settings */
spinlock_t lock;
};
/*
* Handle vendor specific USB requests
*/
#define KLSI_TIMEOUT 5000 /* default urb timeout */
static int klsi_105_chg_port_settings(struct usb_serial_port *port,
struct klsi_105_port_settings *settings)
{
int rc;
rc = usb_control_msg_send(port->serial->dev,
0,
KL5KUSB105A_SIO_SET_DATA,
USB_TYPE_VENDOR | USB_DIR_OUT |
USB_RECIP_INTERFACE,
0, /* value */
0, /* index */
settings,
sizeof(struct klsi_105_port_settings),
KLSI_TIMEOUT,
GFP_KERNEL);
if (rc)
dev_err(&port->dev,
"Change port settings failed (error = %d)\n", rc);
dev_dbg(&port->dev,
"pktlen %u, baudrate 0x%02x, databits %u, u1 %u, u2 %u\n",
settings->pktlen, settings->baudrate, settings->databits,
settings->unknown1, settings->unknown2);
return rc;
}
/*
* Read line control via vendor command and return result through
* the state pointer.
*/
static int klsi_105_get_line_state(struct usb_serial_port *port,
unsigned long *state)
{
u16 status;
int rc;
rc = usb_control_msg_recv(port->serial->dev, 0,
KL5KUSB105A_SIO_POLL,
USB_TYPE_VENDOR | USB_DIR_IN,
0, /* value */
0, /* index */
&status, sizeof(status),
10000,
GFP_KERNEL);
if (rc) {
dev_err(&port->dev, "reading line status failed: %d\n", rc);
return rc;
}
le16_to_cpus(&status);
dev_dbg(&port->dev, "read status %04x\n", status);
*state = ((status & KL5KUSB105A_DSR) ? TIOCM_DSR : 0) |
((status & KL5KUSB105A_CTS) ? TIOCM_CTS : 0);
return 0;
}
/*
* Driver's tty interface functions
*/
static int klsi_105_port_probe(struct usb_serial_port *port)
{
struct klsi_105_private *priv;
priv = kmalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
/* set initial values for control structures */
priv->cfg.pktlen = 5;
priv->cfg.baudrate = kl5kusb105a_sio_b9600;
priv->cfg.databits = kl5kusb105a_dtb_8;
priv->cfg.unknown1 = 0;
priv->cfg.unknown2 = 1;
priv->line_state = 0;
spin_lock_init(&priv->lock);
usb_set_serial_port_data(port, priv);
return 0;
}
static void klsi_105_port_remove(struct usb_serial_port *port)
{
struct klsi_105_private *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
}
static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct klsi_105_private *priv = usb_get_serial_port_data(port);
int retval = 0;
int rc;
unsigned long line_state;
struct klsi_105_port_settings cfg;
unsigned long flags;
/* Do a defined restart:
* Set up sane default baud rate and send the 'READ_ON'
* vendor command.
* FIXME: set modem line control (how?)
* Then read the modem line control and store values in
* priv->line_state.
*/
cfg.pktlen = 5;
cfg.baudrate = kl5kusb105a_sio_b9600;
cfg.databits = kl5kusb105a_dtb_8;
cfg.unknown1 = 0;
cfg.unknown2 = 1;
klsi_105_chg_port_settings(port, &cfg);
spin_lock_irqsave(&priv->lock, flags);
priv->cfg.pktlen = cfg.pktlen;
priv->cfg.baudrate = cfg.baudrate;
priv->cfg.databits = cfg.databits;
priv->cfg.unknown1 = cfg.unknown1;
priv->cfg.unknown2 = cfg.unknown2;
spin_unlock_irqrestore(&priv->lock, flags);
/* READ_ON and urb submission */
rc = usb_serial_generic_open(tty, port);
if (rc)
return rc;
rc = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
KL5KUSB105A_SIO_CONFIGURE,
USB_TYPE_VENDOR|USB_DIR_OUT|USB_RECIP_INTERFACE,
KL5KUSB105A_SIO_CONFIGURE_READ_ON,
0, /* index */
NULL,
0,
KLSI_TIMEOUT);
if (rc < 0) {
dev_err(&port->dev, "Enabling read failed (error = %d)\n", rc);
retval = rc;
goto err_generic_close;
} else
dev_dbg(&port->dev, "%s - enabled reading\n", __func__);
rc = klsi_105_get_line_state(port, &line_state);
if (rc < 0) {
retval = rc;
goto err_disable_read;
}
spin_lock_irqsave(&priv->lock, flags);
priv->line_state = line_state;
spin_unlock_irqrestore(&priv->lock, flags);
dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__,
line_state);
return 0;
err_disable_read:
usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
KL5KUSB105A_SIO_CONFIGURE,
USB_TYPE_VENDOR | USB_DIR_OUT,
KL5KUSB105A_SIO_CONFIGURE_READ_OFF,
0, /* index */
NULL, 0,
KLSI_TIMEOUT);
err_generic_close:
usb_serial_generic_close(port);
return retval;
}
static void klsi_105_close(struct usb_serial_port *port)
{
int rc;
/* send READ_OFF */
rc = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
KL5KUSB105A_SIO_CONFIGURE,
USB_TYPE_VENDOR | USB_DIR_OUT,
KL5KUSB105A_SIO_CONFIGURE_READ_OFF,
0, /* index */
NULL, 0,
KLSI_TIMEOUT);
if (rc < 0)
dev_err(&port->dev, "failed to disable read: %d\n", rc);
/* shutdown our bulk reads and writes */
usb_serial_generic_close(port);
}
/* We need to write a complete 64-byte data block and encode the
* number actually sent in the first double-byte, LSB-order. That
* leaves at most 62 bytes of payload.
*/
#define KLSI_HDR_LEN 2
static int klsi_105_prepare_write_buffer(struct usb_serial_port *port,
void *dest, size_t size)
{
unsigned char *buf = dest;
int count;
count = kfifo_out_locked(&port->write_fifo, buf + KLSI_HDR_LEN, size,
&port->lock);
put_unaligned_le16(count, buf);
return count + KLSI_HDR_LEN;
}
/* The data received is preceded by a length double-byte in LSB-first order.
*/
static void klsi_105_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
unsigned len;
/* empty urbs seem to happen, we ignore them */
if (!urb->actual_length)
return;
if (urb->actual_length <= KLSI_HDR_LEN) {
dev_dbg(&port->dev, "%s - malformed packet\n", __func__);
return;
}
len = get_unaligned_le16(data);
if (len > urb->actual_length - KLSI_HDR_LEN) {
dev_dbg(&port->dev, "%s - packet length mismatch\n", __func__);
len = urb->actual_length - KLSI_HDR_LEN;
}
tty_insert_flip_string(&port->port, data + KLSI_HDR_LEN, len);
tty_flip_buffer_push(&port->port);
}
static void klsi_105_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct klsi_105_private *priv = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
unsigned int iflag = tty->termios.c_iflag;
unsigned int old_iflag = old_termios->c_iflag;
unsigned int cflag = tty->termios.c_cflag;
unsigned int old_cflag = old_termios->c_cflag;
struct klsi_105_port_settings *cfg;
unsigned long flags;
speed_t baud;
cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg)
return;
/* lock while we are modifying the settings */
spin_lock_irqsave(&priv->lock, flags);
/*
* Update baud rate
*/
baud = tty_get_baud_rate(tty);
switch (baud) {
case 0: /* handled below */
break;
case 1200:
priv->cfg.baudrate = kl5kusb105a_sio_b1200;
break;
case 2400:
priv->cfg.baudrate = kl5kusb105a_sio_b2400;
break;
case 4800:
priv->cfg.baudrate = kl5kusb105a_sio_b4800;
break;
case 9600:
priv->cfg.baudrate = kl5kusb105a_sio_b9600;
break;
case 19200:
priv->cfg.baudrate = kl5kusb105a_sio_b19200;
break;
case 38400:
priv->cfg.baudrate = kl5kusb105a_sio_b38400;
break;
case 57600:
priv->cfg.baudrate = kl5kusb105a_sio_b57600;
break;
case 115200:
priv->cfg.baudrate = kl5kusb105a_sio_b115200;
break;
default:
dev_dbg(dev, "unsupported baudrate, using 9600\n");
priv->cfg.baudrate = kl5kusb105a_sio_b9600;
baud = 9600;
break;
}
/*
* FIXME: implement B0 handling
*
* Maybe this should be simulated by sending read disable and read
* enable messages?
*/
tty_encode_baud_rate(tty, baud, baud);
if ((cflag & CSIZE) != (old_cflag & CSIZE)) {
/* set the number of data bits */
switch (cflag & CSIZE) {
case CS5:
dev_dbg(dev, "%s - 5 bits/byte not supported\n", __func__);
spin_unlock_irqrestore(&priv->lock, flags);
goto err;
case CS6:
dev_dbg(dev, "%s - 6 bits/byte not supported\n", __func__);
spin_unlock_irqrestore(&priv->lock, flags);
goto err;
case CS7:
priv->cfg.databits = kl5kusb105a_dtb_7;
break;
case CS8:
priv->cfg.databits = kl5kusb105a_dtb_8;
break;
default:
dev_err(dev, "CSIZE was not CS5-CS8, using default of 8\n");
priv->cfg.databits = kl5kusb105a_dtb_8;
break;
}
}
/*
* Update line control register (LCR)
*/
if ((cflag & (PARENB|PARODD)) != (old_cflag & (PARENB|PARODD))
|| (cflag & CSTOPB) != (old_cflag & CSTOPB)) {
/* Not currently supported */
tty->termios.c_cflag &= ~(PARENB|PARODD|CSTOPB);
}
/*
* Set flow control: well, I do not really now how to handle DTR/RTS.
* Just do what we have seen with SniffUSB on Win98.
*/
if ((iflag & IXOFF) != (old_iflag & IXOFF)
|| (iflag & IXON) != (old_iflag & IXON)
|| (cflag & CRTSCTS) != (old_cflag & CRTSCTS)) {
/* Not currently supported */
tty->termios.c_cflag &= ~CRTSCTS;
}
memcpy(cfg, &priv->cfg, sizeof(*cfg));
spin_unlock_irqrestore(&priv->lock, flags);
/* now commit changes to device */
klsi_105_chg_port_settings(port, cfg);
err:
kfree(cfg);
}
static int klsi_105_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct klsi_105_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
int rc;
unsigned long line_state;
rc = klsi_105_get_line_state(port, &line_state);
if (rc < 0) {
dev_err(&port->dev,
"Reading line control failed (error = %d)\n", rc);
/* better return value? EAGAIN? */
return rc;
}
spin_lock_irqsave(&priv->lock, flags);
priv->line_state = line_state;
spin_unlock_irqrestore(&priv->lock, flags);
dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__, line_state);
return (int)line_state;
}
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/serial/kl5kusb105.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* mxuport.c - MOXA UPort series driver
*
* Copyright (c) 2006 Moxa Technologies Co., Ltd.
* Copyright (c) 2013 Andrew Lunn <[email protected]>
*
* Supports the following Moxa USB to serial converters:
* 2 ports : UPort 1250, UPort 1250I
* 4 ports : UPort 1410, UPort 1450, UPort 1450I
* 8 ports : UPort 1610-8, UPort 1650-8
* 16 ports : UPort 1610-16, UPort 1650-16
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/jiffies.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <asm/unaligned.h>
/* Definitions for the vendor ID and device ID */
#define MX_USBSERIAL_VID 0x110A
#define MX_UPORT1250_PID 0x1250
#define MX_UPORT1251_PID 0x1251
#define MX_UPORT1410_PID 0x1410
#define MX_UPORT1450_PID 0x1450
#define MX_UPORT1451_PID 0x1451
#define MX_UPORT1618_PID 0x1618
#define MX_UPORT1658_PID 0x1658
#define MX_UPORT1613_PID 0x1613
#define MX_UPORT1653_PID 0x1653
/* Definitions for USB info */
#define HEADER_SIZE 4
#define EVENT_LENGTH 8
#define DOWN_BLOCK_SIZE 64
/* Definitions for firmware info */
#define VER_ADDR_1 0x20
#define VER_ADDR_2 0x24
#define VER_ADDR_3 0x28
/* Definitions for USB vendor request */
#define RQ_VENDOR_NONE 0x00
#define RQ_VENDOR_SET_BAUD 0x01 /* Set baud rate */
#define RQ_VENDOR_SET_LINE 0x02 /* Set line status */
#define RQ_VENDOR_SET_CHARS 0x03 /* Set Xon/Xoff chars */
#define RQ_VENDOR_SET_RTS 0x04 /* Set RTS */
#define RQ_VENDOR_SET_DTR 0x05 /* Set DTR */
#define RQ_VENDOR_SET_XONXOFF 0x06 /* Set auto Xon/Xoff */
#define RQ_VENDOR_SET_RX_HOST_EN 0x07 /* Set RX host enable */
#define RQ_VENDOR_SET_OPEN 0x08 /* Set open/close port */
#define RQ_VENDOR_PURGE 0x09 /* Purge Rx/Tx buffer */
#define RQ_VENDOR_SET_MCR 0x0A /* Set MCR register */
#define RQ_VENDOR_SET_BREAK 0x0B /* Set Break signal */
#define RQ_VENDOR_START_FW_DOWN 0x0C /* Start firmware download */
#define RQ_VENDOR_STOP_FW_DOWN 0x0D /* Stop firmware download */
#define RQ_VENDOR_QUERY_FW_READY 0x0E /* Query if new firmware ready */
#define RQ_VENDOR_SET_FIFO_DISABLE 0x0F /* Set fifo disable */
#define RQ_VENDOR_SET_INTERFACE 0x10 /* Set interface */
#define RQ_VENDOR_SET_HIGH_PERFOR 0x11 /* Set hi-performance */
#define RQ_VENDOR_ERASE_BLOCK 0x12 /* Erase flash block */
#define RQ_VENDOR_WRITE_PAGE 0x13 /* Write flash page */
#define RQ_VENDOR_PREPARE_WRITE 0x14 /* Prepare write flash */
#define RQ_VENDOR_CONFIRM_WRITE 0x15 /* Confirm write flash */
#define RQ_VENDOR_LOCATE 0x16 /* Locate the device */
#define RQ_VENDOR_START_ROM_DOWN 0x17 /* Start firmware download */
#define RQ_VENDOR_ROM_DATA 0x18 /* Rom file data */
#define RQ_VENDOR_STOP_ROM_DOWN 0x19 /* Stop firmware download */
#define RQ_VENDOR_FW_DATA 0x20 /* Firmware data */
#define RQ_VENDOR_RESET_DEVICE 0x23 /* Try to reset the device */
#define RQ_VENDOR_QUERY_FW_CONFIG 0x24
#define RQ_VENDOR_GET_VERSION 0x81 /* Get firmware version */
#define RQ_VENDOR_GET_PAGE 0x82 /* Read flash page */
#define RQ_VENDOR_GET_ROM_PROC 0x83 /* Get ROM process state */
#define RQ_VENDOR_GET_INQUEUE 0x84 /* Data in input buffer */
#define RQ_VENDOR_GET_OUTQUEUE 0x85 /* Data in output buffer */
#define RQ_VENDOR_GET_MSR 0x86 /* Get modem status register */
/* Definitions for UPort event type */
#define UPORT_EVENT_NONE 0 /* None */
#define UPORT_EVENT_TXBUF_THRESHOLD 1 /* Tx buffer threshold */
#define UPORT_EVENT_SEND_NEXT 2 /* Send next */
#define UPORT_EVENT_MSR 3 /* Modem status */
#define UPORT_EVENT_LSR 4 /* Line status */
#define UPORT_EVENT_MCR 5 /* Modem control */
/* Definitions for serial event type */
#define SERIAL_EV_CTS 0x0008 /* CTS changed state */
#define SERIAL_EV_DSR 0x0010 /* DSR changed state */
#define SERIAL_EV_RLSD 0x0020 /* RLSD changed state */
/* Definitions for modem control event type */
#define SERIAL_EV_XOFF 0x40 /* XOFF received */
/* Definitions for line control of communication */
#define MX_WORDLENGTH_5 5
#define MX_WORDLENGTH_6 6
#define MX_WORDLENGTH_7 7
#define MX_WORDLENGTH_8 8
#define MX_PARITY_NONE 0
#define MX_PARITY_ODD 1
#define MX_PARITY_EVEN 2
#define MX_PARITY_MARK 3
#define MX_PARITY_SPACE 4
#define MX_STOP_BITS_1 0
#define MX_STOP_BITS_1_5 1
#define MX_STOP_BITS_2 2
#define MX_RTS_DISABLE 0x0
#define MX_RTS_ENABLE 0x1
#define MX_RTS_HW 0x2
#define MX_RTS_NO_CHANGE 0x3 /* Flag, not valid register value*/
#define MX_INT_RS232 0
#define MX_INT_2W_RS485 1
#define MX_INT_RS422 2
#define MX_INT_4W_RS485 3
/* Definitions for holding reason */
#define MX_WAIT_FOR_CTS 0x0001
#define MX_WAIT_FOR_DSR 0x0002
#define MX_WAIT_FOR_DCD 0x0004
#define MX_WAIT_FOR_XON 0x0008
#define MX_WAIT_FOR_START_TX 0x0010
#define MX_WAIT_FOR_UNTHROTTLE 0x0020
#define MX_WAIT_FOR_LOW_WATER 0x0040
#define MX_WAIT_FOR_SEND_NEXT 0x0080
#define MX_UPORT_2_PORT BIT(0)
#define MX_UPORT_4_PORT BIT(1)
#define MX_UPORT_8_PORT BIT(2)
#define MX_UPORT_16_PORT BIT(3)
/* This structure holds all of the local port information */
struct mxuport_port {
u8 mcr_state; /* Last MCR state */
u8 msr_state; /* Last MSR state */
struct mutex mutex; /* Protects mcr_state */
spinlock_t spinlock; /* Protects msr_state */
};
/* Table of devices that work with this driver */
static const struct usb_device_id mxuport_idtable[] = {
{ USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1250_PID),
.driver_info = MX_UPORT_2_PORT },
{ USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1251_PID),
.driver_info = MX_UPORT_2_PORT },
{ USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1410_PID),
.driver_info = MX_UPORT_4_PORT },
{ USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1450_PID),
.driver_info = MX_UPORT_4_PORT },
{ USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1451_PID),
.driver_info = MX_UPORT_4_PORT },
{ USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1618_PID),
.driver_info = MX_UPORT_8_PORT },
{ USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1658_PID),
.driver_info = MX_UPORT_8_PORT },
{ USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1613_PID),
.driver_info = MX_UPORT_16_PORT },
{ USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1653_PID),
.driver_info = MX_UPORT_16_PORT },
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, mxuport_idtable);
/*
* Add a four byte header containing the port number and the number of
* bytes of data in the message. Return the number of bytes in the
* buffer.
*/
static int mxuport_prepare_write_buffer(struct usb_serial_port *port,
void *dest, size_t size)
{
u8 *buf = dest;
int count;
count = kfifo_out_locked(&port->write_fifo, buf + HEADER_SIZE,
size - HEADER_SIZE,
&port->lock);
put_unaligned_be16(port->port_number, buf);
put_unaligned_be16(count, buf + 2);
dev_dbg(&port->dev, "%s - size %zd count %d\n", __func__,
size, count);
return count + HEADER_SIZE;
}
/* Read the given buffer in from the control pipe. */
static int mxuport_recv_ctrl_urb(struct usb_serial *serial,
u8 request, u16 value, u16 index,
u8 *data, size_t size)
{
int status;
status = usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
request,
(USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE), value, index,
data, size,
USB_CTRL_GET_TIMEOUT);
if (status < 0) {
dev_err(&serial->interface->dev,
"%s - usb_control_msg failed (%d)\n",
__func__, status);
return status;
}
if (status != size) {
dev_err(&serial->interface->dev,
"%s - short read (%d / %zd)\n",
__func__, status, size);
return -EIO;
}
return status;
}
/* Write the given buffer out to the control pipe. */
static int mxuport_send_ctrl_data_urb(struct usb_serial *serial,
u8 request,
u16 value, u16 index,
u8 *data, size_t size)
{
int status;
status = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0),
request,
(USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_DEVICE), value, index,
data, size,
USB_CTRL_SET_TIMEOUT);
if (status < 0) {
dev_err(&serial->interface->dev,
"%s - usb_control_msg failed (%d)\n",
__func__, status);
return status;
}
return 0;
}
/* Send a vendor request without any data */
static int mxuport_send_ctrl_urb(struct usb_serial *serial,
u8 request, u16 value, u16 index)
{
return mxuport_send_ctrl_data_urb(serial, request, value, index,
NULL, 0);
}
/*
* mxuport_throttle - throttle function of driver
*
* This function is called by the tty driver when it wants to stop the
* data being read from the port. Since all the data comes over one
* bulk in endpoint, we cannot stop submitting urbs by setting
* port->throttle. Instead tell the device to stop sending us data for
* the port.
*/
static void mxuport_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
dev_dbg(&port->dev, "%s\n", __func__);
mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_RX_HOST_EN,
0, port->port_number);
}
/*
* mxuport_unthrottle - unthrottle function of driver
*
* This function is called by the tty driver when it wants to resume
* the data being read from the port. Tell the device it can resume
* sending us received data from the port.
*/
static void mxuport_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
dev_dbg(&port->dev, "%s\n", __func__);
mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_RX_HOST_EN,
1, port->port_number);
}
/*
* Processes one chunk of data received for a port. Mostly a copy of
* usb_serial_generic_process_read_urb().
*/
static void mxuport_process_read_urb_data(struct usb_serial_port *port,
char *data, int size)
{
int i;
if (port->sysrq) {
for (i = 0; i < size; i++, data++) {
if (!usb_serial_handle_sysrq_char(port, *data))
tty_insert_flip_char(&port->port, *data,
TTY_NORMAL);
}
} else {
tty_insert_flip_string(&port->port, data, size);
}
tty_flip_buffer_push(&port->port);
}
static void mxuport_msr_event(struct usb_serial_port *port, u8 buf[4])
{
struct mxuport_port *mxport = usb_get_serial_port_data(port);
u8 rcv_msr_hold = buf[2] & 0xF0;
u16 rcv_msr_event = get_unaligned_be16(buf);
unsigned long flags;
if (rcv_msr_event == 0)
return;
/* Update MSR status */
spin_lock_irqsave(&mxport->spinlock, flags);
dev_dbg(&port->dev, "%s - current MSR status = 0x%x\n",
__func__, mxport->msr_state);
if (rcv_msr_hold & UART_MSR_CTS) {
mxport->msr_state |= UART_MSR_CTS;
dev_dbg(&port->dev, "%s - CTS high\n", __func__);
} else {
mxport->msr_state &= ~UART_MSR_CTS;
dev_dbg(&port->dev, "%s - CTS low\n", __func__);
}
if (rcv_msr_hold & UART_MSR_DSR) {
mxport->msr_state |= UART_MSR_DSR;
dev_dbg(&port->dev, "%s - DSR high\n", __func__);
} else {
mxport->msr_state &= ~UART_MSR_DSR;
dev_dbg(&port->dev, "%s - DSR low\n", __func__);
}
if (rcv_msr_hold & UART_MSR_DCD) {
mxport->msr_state |= UART_MSR_DCD;
dev_dbg(&port->dev, "%s - DCD high\n", __func__);
} else {
mxport->msr_state &= ~UART_MSR_DCD;
dev_dbg(&port->dev, "%s - DCD low\n", __func__);
}
spin_unlock_irqrestore(&mxport->spinlock, flags);
if (rcv_msr_event &
(SERIAL_EV_CTS | SERIAL_EV_DSR | SERIAL_EV_RLSD)) {
if (rcv_msr_event & SERIAL_EV_CTS) {
port->icount.cts++;
dev_dbg(&port->dev, "%s - CTS change\n", __func__);
}
if (rcv_msr_event & SERIAL_EV_DSR) {
port->icount.dsr++;
dev_dbg(&port->dev, "%s - DSR change\n", __func__);
}
if (rcv_msr_event & SERIAL_EV_RLSD) {
port->icount.dcd++;
dev_dbg(&port->dev, "%s - DCD change\n", __func__);
}
wake_up_interruptible(&port->port.delta_msr_wait);
}
}
static void mxuport_lsr_event(struct usb_serial_port *port, u8 buf[4])
{
u8 lsr_event = buf[2];
if (lsr_event & UART_LSR_BI) {
port->icount.brk++;
dev_dbg(&port->dev, "%s - break error\n", __func__);
}
if (lsr_event & UART_LSR_FE) {
port->icount.frame++;
dev_dbg(&port->dev, "%s - frame error\n", __func__);
}
if (lsr_event & UART_LSR_PE) {
port->icount.parity++;
dev_dbg(&port->dev, "%s - parity error\n", __func__);
}
if (lsr_event & UART_LSR_OE) {
port->icount.overrun++;
dev_dbg(&port->dev, "%s - overrun error\n", __func__);
}
}
/*
* When something interesting happens, modem control lines XON/XOFF
* etc, the device sends an event. Process these events.
*/
static void mxuport_process_read_urb_event(struct usb_serial_port *port,
u8 buf[4], u32 event)
{
dev_dbg(&port->dev, "%s - receive event : %04x\n", __func__, event);
switch (event) {
case UPORT_EVENT_SEND_NEXT:
/*
* Sent as part of the flow control on device buffers.
* Not currently used.
*/
break;
case UPORT_EVENT_MSR:
mxuport_msr_event(port, buf);
break;
case UPORT_EVENT_LSR:
mxuport_lsr_event(port, buf);
break;
case UPORT_EVENT_MCR:
/*
* Event to indicate a change in XON/XOFF from the
* peer. Currently not used. We just continue
* sending the device data and it will buffer it if
* needed. This event could be used for flow control
* between the host and the device.
*/
break;
default:
dev_dbg(&port->dev, "Unexpected event\n");
break;
}
}
/*
* One URB can contain data for multiple ports. Demultiplex the data,
* checking the port exists, is opened and the message is valid.
*/
static void mxuport_process_read_urb_demux_data(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct usb_serial *serial = port->serial;
u8 *data = urb->transfer_buffer;
u8 *end = data + urb->actual_length;
struct usb_serial_port *demux_port;
u8 *ch;
u16 rcv_port;
u16 rcv_len;
while (data < end) {
if (data + HEADER_SIZE > end) {
dev_warn(&port->dev, "%s - message with short header\n",
__func__);
return;
}
rcv_port = get_unaligned_be16(data);
if (rcv_port >= serial->num_ports) {
dev_warn(&port->dev, "%s - message for invalid port\n",
__func__);
return;
}
demux_port = serial->port[rcv_port];
rcv_len = get_unaligned_be16(data + 2);
if (!rcv_len || data + HEADER_SIZE + rcv_len > end) {
dev_warn(&port->dev, "%s - short data\n", __func__);
return;
}
if (tty_port_initialized(&demux_port->port)) {
ch = data + HEADER_SIZE;
mxuport_process_read_urb_data(demux_port, ch, rcv_len);
} else {
dev_dbg(&demux_port->dev, "%s - data for closed port\n",
__func__);
}
data += HEADER_SIZE + rcv_len;
}
}
/*
* One URB can contain events for multiple ports. Demultiplex the event,
* checking the port exists, and is opened.
*/
static void mxuport_process_read_urb_demux_event(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct usb_serial *serial = port->serial;
u8 *data = urb->transfer_buffer;
u8 *end = data + urb->actual_length;
struct usb_serial_port *demux_port;
u8 *ch;
u16 rcv_port;
u16 rcv_event;
while (data < end) {
if (data + EVENT_LENGTH > end) {
dev_warn(&port->dev, "%s - message with short event\n",
__func__);
return;
}
rcv_port = get_unaligned_be16(data);
if (rcv_port >= serial->num_ports) {
dev_warn(&port->dev, "%s - message for invalid port\n",
__func__);
return;
}
demux_port = serial->port[rcv_port];
if (tty_port_initialized(&demux_port->port)) {
ch = data + HEADER_SIZE;
rcv_event = get_unaligned_be16(data + 2);
mxuport_process_read_urb_event(demux_port, ch,
rcv_event);
} else {
dev_dbg(&demux_port->dev,
"%s - event for closed port\n", __func__);
}
data += EVENT_LENGTH;
}
}
/*
* This is called when we have received data on the bulk in
* endpoint. Depending on which port it was received on, it can
* contain serial data or events.
*/
static void mxuport_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct usb_serial *serial = port->serial;
if (port == serial->port[0])
mxuport_process_read_urb_demux_data(urb);
if (port == serial->port[1])
mxuport_process_read_urb_demux_event(urb);
}
/*
* Ask the device how many bytes it has queued to be sent out. If
* there are none, return true.
*/
static bool mxuport_tx_empty(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
bool is_empty = true;
u32 txlen;
u8 *len_buf;
int err;
len_buf = kzalloc(4, GFP_KERNEL);
if (!len_buf)
goto out;
err = mxuport_recv_ctrl_urb(serial, RQ_VENDOR_GET_OUTQUEUE, 0,
port->port_number, len_buf, 4);
if (err < 0)
goto out;
txlen = get_unaligned_be32(len_buf);
dev_dbg(&port->dev, "%s - tx len = %u\n", __func__, txlen);
if (txlen != 0)
is_empty = false;
out:
kfree(len_buf);
return is_empty;
}
static int mxuport_set_mcr(struct usb_serial_port *port, u8 mcr_state)
{
struct usb_serial *serial = port->serial;
int err;
dev_dbg(&port->dev, "%s - %02x\n", __func__, mcr_state);
err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_MCR,
mcr_state, port->port_number);
if (err)
dev_err(&port->dev, "%s - failed to change MCR\n", __func__);
return err;
}
static int mxuport_set_dtr(struct usb_serial_port *port, int on)
{
struct mxuport_port *mxport = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
int err;
mutex_lock(&mxport->mutex);
err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_DTR,
!!on, port->port_number);
if (!err) {
if (on)
mxport->mcr_state |= UART_MCR_DTR;
else
mxport->mcr_state &= ~UART_MCR_DTR;
}
mutex_unlock(&mxport->mutex);
return err;
}
static int mxuport_set_rts(struct usb_serial_port *port, u8 state)
{
struct mxuport_port *mxport = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
int err;
u8 mcr_state;
mutex_lock(&mxport->mutex);
mcr_state = mxport->mcr_state;
switch (state) {
case MX_RTS_DISABLE:
mcr_state &= ~UART_MCR_RTS;
break;
case MX_RTS_ENABLE:
mcr_state |= UART_MCR_RTS;
break;
case MX_RTS_HW:
/*
* Do not update mxport->mcr_state when doing hardware
* flow control.
*/
break;
default:
/*
* Should not happen, but somebody might try passing
* MX_RTS_NO_CHANGE, which is not valid.
*/
err = -EINVAL;
goto out;
}
err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_RTS,
state, port->port_number);
if (!err)
mxport->mcr_state = mcr_state;
out:
mutex_unlock(&mxport->mutex);
return err;
}
static void mxuport_dtr_rts(struct usb_serial_port *port, int on)
{
struct mxuport_port *mxport = usb_get_serial_port_data(port);
u8 mcr_state;
int err;
mutex_lock(&mxport->mutex);
mcr_state = mxport->mcr_state;
if (on)
mcr_state |= (UART_MCR_RTS | UART_MCR_DTR);
else
mcr_state &= ~(UART_MCR_RTS | UART_MCR_DTR);
err = mxuport_set_mcr(port, mcr_state);
if (!err)
mxport->mcr_state = mcr_state;
mutex_unlock(&mxport->mutex);
}
static int mxuport_tiocmset(struct tty_struct *tty, unsigned int set,
unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct mxuport_port *mxport = usb_get_serial_port_data(port);
int err;
u8 mcr_state;
mutex_lock(&mxport->mutex);
mcr_state = mxport->mcr_state;
if (set & TIOCM_RTS)
mcr_state |= UART_MCR_RTS;
if (set & TIOCM_DTR)
mcr_state |= UART_MCR_DTR;
if (clear & TIOCM_RTS)
mcr_state &= ~UART_MCR_RTS;
if (clear & TIOCM_DTR)
mcr_state &= ~UART_MCR_DTR;
err = mxuport_set_mcr(port, mcr_state);
if (!err)
mxport->mcr_state = mcr_state;
mutex_unlock(&mxport->mutex);
return err;
}
static int mxuport_tiocmget(struct tty_struct *tty)
{
struct mxuport_port *mxport;
struct usb_serial_port *port = tty->driver_data;
unsigned int result;
unsigned long flags;
unsigned int msr;
unsigned int mcr;
mxport = usb_get_serial_port_data(port);
mutex_lock(&mxport->mutex);
spin_lock_irqsave(&mxport->spinlock, flags);
msr = mxport->msr_state;
mcr = mxport->mcr_state;
spin_unlock_irqrestore(&mxport->spinlock, flags);
mutex_unlock(&mxport->mutex);
result = (((mcr & UART_MCR_DTR) ? TIOCM_DTR : 0) | /* 0x002 */
((mcr & UART_MCR_RTS) ? TIOCM_RTS : 0) | /* 0x004 */
((msr & UART_MSR_CTS) ? TIOCM_CTS : 0) | /* 0x020 */
((msr & UART_MSR_DCD) ? TIOCM_CAR : 0) | /* 0x040 */
((msr & UART_MSR_RI) ? TIOCM_RI : 0) | /* 0x080 */
((msr & UART_MSR_DSR) ? TIOCM_DSR : 0)); /* 0x100 */
dev_dbg(&port->dev, "%s - 0x%04x\n", __func__, result);
return result;
}
static int mxuport_set_termios_flow(struct tty_struct *tty,
const struct ktermios *old_termios,
struct usb_serial_port *port,
struct usb_serial *serial)
{
u8 xon = START_CHAR(tty);
u8 xoff = STOP_CHAR(tty);
int enable;
int err;
u8 *buf;
u8 rts;
buf = kmalloc(2, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* S/W flow control settings */
if (I_IXOFF(tty) || I_IXON(tty)) {
enable = 1;
buf[0] = xon;
buf[1] = xoff;
err = mxuport_send_ctrl_data_urb(serial, RQ_VENDOR_SET_CHARS,
0, port->port_number,
buf, 2);
if (err)
goto out;
dev_dbg(&port->dev, "%s - XON = 0x%02x, XOFF = 0x%02x\n",
__func__, xon, xoff);
} else {
enable = 0;
}
err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_XONXOFF,
enable, port->port_number);
if (err)
goto out;
rts = MX_RTS_NO_CHANGE;
/* H/W flow control settings */
if (!old_termios ||
C_CRTSCTS(tty) != (old_termios->c_cflag & CRTSCTS)) {
if (C_CRTSCTS(tty))
rts = MX_RTS_HW;
else
rts = MX_RTS_ENABLE;
}
if (C_BAUD(tty)) {
if (old_termios && (old_termios->c_cflag & CBAUD) == B0) {
/* Raise DTR and RTS */
if (C_CRTSCTS(tty))
rts = MX_RTS_HW;
else
rts = MX_RTS_ENABLE;
mxuport_set_dtr(port, 1);
}
} else {
/* Drop DTR and RTS */
rts = MX_RTS_DISABLE;
mxuport_set_dtr(port, 0);
}
if (rts != MX_RTS_NO_CHANGE)
err = mxuport_set_rts(port, rts);
out:
kfree(buf);
return err;
}
static void mxuport_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
u8 *buf;
u8 data_bits;
u8 stop_bits;
u8 parity;
int baud;
int err;
if (old_termios &&
!tty_termios_hw_change(&tty->termios, old_termios) &&
tty->termios.c_iflag == old_termios->c_iflag) {
dev_dbg(&port->dev, "%s - nothing to change\n", __func__);
return;
}
buf = kmalloc(4, GFP_KERNEL);
if (!buf)
return;
/* Set data bit of termios */
switch (C_CSIZE(tty)) {
case CS5:
data_bits = MX_WORDLENGTH_5;
break;
case CS6:
data_bits = MX_WORDLENGTH_6;
break;
case CS7:
data_bits = MX_WORDLENGTH_7;
break;
case CS8:
default:
data_bits = MX_WORDLENGTH_8;
break;
}
/* Set parity of termios */
if (C_PARENB(tty)) {
if (C_CMSPAR(tty)) {
if (C_PARODD(tty))
parity = MX_PARITY_MARK;
else
parity = MX_PARITY_SPACE;
} else {
if (C_PARODD(tty))
parity = MX_PARITY_ODD;
else
parity = MX_PARITY_EVEN;
}
} else {
parity = MX_PARITY_NONE;
}
/* Set stop bit of termios */
if (C_CSTOPB(tty))
stop_bits = MX_STOP_BITS_2;
else
stop_bits = MX_STOP_BITS_1;
buf[0] = data_bits;
buf[1] = parity;
buf[2] = stop_bits;
buf[3] = 0;
err = mxuport_send_ctrl_data_urb(serial, RQ_VENDOR_SET_LINE,
0, port->port_number, buf, 4);
if (err)
goto out;
err = mxuport_set_termios_flow(tty, old_termios, port, serial);
if (err)
goto out;
baud = tty_get_baud_rate(tty);
if (!baud)
baud = 9600;
/* Note: Little Endian */
put_unaligned_le32(baud, buf);
err = mxuport_send_ctrl_data_urb(serial, RQ_VENDOR_SET_BAUD,
0, port->port_number,
buf, 4);
if (err)
goto out;
dev_dbg(&port->dev, "baud_rate : %d\n", baud);
dev_dbg(&port->dev, "data_bits : %d\n", data_bits);
dev_dbg(&port->dev, "parity : %d\n", parity);
dev_dbg(&port->dev, "stop_bits : %d\n", stop_bits);
out:
kfree(buf);
}
/*
* Determine how many ports this device has dynamically. It will be
* called after the probe() callback is called, but before attach().
*/
static int mxuport_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds)
{
unsigned long features = (unsigned long)usb_get_serial_data(serial);
int num_ports;
int i;
if (features & MX_UPORT_2_PORT) {
num_ports = 2;
} else if (features & MX_UPORT_4_PORT) {
num_ports = 4;
} else if (features & MX_UPORT_8_PORT) {
num_ports = 8;
} else if (features & MX_UPORT_16_PORT) {
num_ports = 16;
} else {
dev_warn(&serial->interface->dev,
"unknown device, assuming two ports\n");
num_ports = 2;
}
/*
* Setup bulk-out endpoint multiplexing. All ports share the same
* bulk-out endpoint.
*/
BUILD_BUG_ON(ARRAY_SIZE(epds->bulk_out) < 16);
for (i = 1; i < num_ports; ++i)
epds->bulk_out[i] = epds->bulk_out[0];
epds->num_bulk_out = num_ports;
return num_ports;
}
/* Get the version of the firmware currently running. */
static int mxuport_get_fw_version(struct usb_serial *serial, u32 *version)
{
u8 *ver_buf;
int err;
ver_buf = kzalloc(4, GFP_KERNEL);
if (!ver_buf)
return -ENOMEM;
/* Get firmware version from SDRAM */
err = mxuport_recv_ctrl_urb(serial, RQ_VENDOR_GET_VERSION, 0, 0,
ver_buf, 4);
if (err != 4) {
err = -EIO;
goto out;
}
*version = (ver_buf[0] << 16) | (ver_buf[1] << 8) | ver_buf[2];
err = 0;
out:
kfree(ver_buf);
return err;
}
/* Given a firmware blob, download it to the device. */
static int mxuport_download_fw(struct usb_serial *serial,
const struct firmware *fw_p)
{
u8 *fw_buf;
size_t txlen;
size_t fwidx;
int err;
fw_buf = kmalloc(DOWN_BLOCK_SIZE, GFP_KERNEL);
if (!fw_buf)
return -ENOMEM;
dev_dbg(&serial->interface->dev, "Starting firmware download...\n");
err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_START_FW_DOWN, 0, 0);
if (err)
goto out;
fwidx = 0;
do {
txlen = min_t(size_t, (fw_p->size - fwidx), DOWN_BLOCK_SIZE);
memcpy(fw_buf, &fw_p->data[fwidx], txlen);
err = mxuport_send_ctrl_data_urb(serial, RQ_VENDOR_FW_DATA,
0, 0, fw_buf, txlen);
if (err) {
mxuport_send_ctrl_urb(serial, RQ_VENDOR_STOP_FW_DOWN,
0, 0);
goto out;
}
fwidx += txlen;
usleep_range(1000, 2000);
} while (fwidx < fw_p->size);
msleep(1000);
err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_STOP_FW_DOWN, 0, 0);
if (err)
goto out;
msleep(1000);
err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_QUERY_FW_READY, 0, 0);
out:
kfree(fw_buf);
return err;
}
static int mxuport_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
u16 productid = le16_to_cpu(serial->dev->descriptor.idProduct);
const struct firmware *fw_p = NULL;
u32 version;
int local_ver;
char buf[32];
int err;
/* Load our firmware */
err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_QUERY_FW_CONFIG, 0, 0);
if (err) {
mxuport_send_ctrl_urb(serial, RQ_VENDOR_RESET_DEVICE, 0, 0);
return err;
}
err = mxuport_get_fw_version(serial, &version);
if (err < 0)
return err;
dev_dbg(&serial->interface->dev, "Device firmware version v%x.%x.%x\n",
(version & 0xff0000) >> 16,
(version & 0xff00) >> 8,
(version & 0xff));
snprintf(buf, sizeof(buf) - 1, "moxa/moxa-%04x.fw", productid);
err = request_firmware(&fw_p, buf, &serial->interface->dev);
if (err) {
dev_warn(&serial->interface->dev, "Firmware %s not found\n",
buf);
/* Use the firmware already in the device */
err = 0;
} else {
local_ver = ((fw_p->data[VER_ADDR_1] << 16) |
(fw_p->data[VER_ADDR_2] << 8) |
fw_p->data[VER_ADDR_3]);
dev_dbg(&serial->interface->dev,
"Available firmware version v%x.%x.%x\n",
fw_p->data[VER_ADDR_1], fw_p->data[VER_ADDR_2],
fw_p->data[VER_ADDR_3]);
if (local_ver > version) {
err = mxuport_download_fw(serial, fw_p);
if (err)
goto out;
err = mxuport_get_fw_version(serial, &version);
if (err < 0)
goto out;
}
}
dev_info(&serial->interface->dev,
"Using device firmware version v%x.%x.%x\n",
(version & 0xff0000) >> 16,
(version & 0xff00) >> 8,
(version & 0xff));
/*
* Contains the features of this hardware. Store away for
* later use, eg, number of ports.
*/
usb_set_serial_data(serial, (void *)id->driver_info);
out:
if (fw_p)
release_firmware(fw_p);
return err;
}
static int mxuport_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct mxuport_port *mxport;
int err;
mxport = devm_kzalloc(&port->dev, sizeof(struct mxuport_port),
GFP_KERNEL);
if (!mxport)
return -ENOMEM;
mutex_init(&mxport->mutex);
spin_lock_init(&mxport->spinlock);
/* Set the port private data */
usb_set_serial_port_data(port, mxport);
/* Set FIFO (Enable) */
err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_FIFO_DISABLE,
0, port->port_number);
if (err)
return err;
/* Set transmission mode (Hi-Performance) */
err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_HIGH_PERFOR,
0, port->port_number);
if (err)
return err;
/* Set interface (RS-232) */
return mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_INTERFACE,
MX_INT_RS232,
port->port_number);
}
static int mxuport_attach(struct usb_serial *serial)
{
struct usb_serial_port *port0 = serial->port[0];
struct usb_serial_port *port1 = serial->port[1];
int err;
/*
* All data from the ports is received on the first bulk in
* endpoint, with a multiplex header. The second bulk in is
* used for events.
*
* Start to read from the device.
*/
err = usb_serial_generic_submit_read_urbs(port0, GFP_KERNEL);
if (err)
return err;
err = usb_serial_generic_submit_read_urbs(port1, GFP_KERNEL);
if (err) {
usb_serial_generic_close(port0);
return err;
}
return 0;
}
static void mxuport_release(struct usb_serial *serial)
{
struct usb_serial_port *port0 = serial->port[0];
struct usb_serial_port *port1 = serial->port[1];
usb_serial_generic_close(port1);
usb_serial_generic_close(port0);
}
static int mxuport_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct mxuport_port *mxport = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
int err;
/* Set receive host (enable) */
err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_RX_HOST_EN,
1, port->port_number);
if (err)
return err;
err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_OPEN,
1, port->port_number);
if (err) {
mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_RX_HOST_EN,
0, port->port_number);
return err;
}
/* Initial port termios */
if (tty)
mxuport_set_termios(tty, port, NULL);
/*
* TODO: use RQ_VENDOR_GET_MSR, once we know what it
* returns.
*/
mxport->msr_state = 0;
return err;
}
static void mxuport_close(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_OPEN, 0,
port->port_number);
mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_RX_HOST_EN, 0,
port->port_number);
}
/* Send a break to the port. */
static int mxuport_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
int enable;
if (break_state == -1) {
enable = 1;
dev_dbg(&port->dev, "%s - sending break\n", __func__);
} else {
enable = 0;
dev_dbg(&port->dev, "%s - clearing break\n", __func__);
}
return mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_BREAK,
enable, port->port_number);
}
static int mxuport_resume(struct usb_serial *serial)
{
struct usb_serial_port *port;
int c = 0;
int i;
int r;
for (i = 0; i < 2; i++) {
port = serial->port[i];
r = usb_serial_generic_submit_read_urbs(port, GFP_NOIO);
if (r < 0)
c++;
}
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
if (!tty_port_initialized(&port->port))
continue;
r = usb_serial_generic_write_start(port, GFP_NOIO);
if (r < 0)
c++;
}
return c ? -EIO : 0;
}
static struct usb_serial_driver mxuport_device = {
.driver = {
.owner = THIS_MODULE,
.name = "mxuport",
},
.description = "MOXA UPort",
.id_table = mxuport_idtable,
.num_bulk_in = 2,
.num_bulk_out = 1,
.probe = mxuport_probe,
.port_probe = mxuport_port_probe,
.attach = mxuport_attach,
.release = mxuport_release,
.calc_num_ports = mxuport_calc_num_ports,
.open = mxuport_open,
.close = mxuport_close,
.set_termios = mxuport_set_termios,
.break_ctl = mxuport_break_ctl,
.tx_empty = mxuport_tx_empty,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
.throttle = mxuport_throttle,
.unthrottle = mxuport_unthrottle,
.tiocmget = mxuport_tiocmget,
.tiocmset = mxuport_tiocmset,
.dtr_rts = mxuport_dtr_rts,
.process_read_urb = mxuport_process_read_urb,
.prepare_write_buffer = mxuport_prepare_write_buffer,
.resume = mxuport_resume,
};
static struct usb_serial_driver *const serial_drivers[] = {
&mxuport_device, NULL
};
module_usb_serial_driver(serial_drivers, mxuport_idtable);
MODULE_AUTHOR("Andrew Lunn <[email protected]>");
MODULE_AUTHOR("<[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/serial/mxuport.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Ours Technology Inc. OTi-6858 USB to serial adapter driver.
*
* Copyleft (C) 2007 Kees Lemmens (adapted for kernel 2.6.20)
* Copyright (C) 2006 Tomasz Michal Lukaszewski (FIXME: add e-mail)
* Copyright (C) 2001-2004 Greg Kroah-Hartman ([email protected])
* Copyright (C) 2003 IBM Corp.
*
* Many thanks to the authors of pl2303 driver: all functions in this file
* are heavily based on pl2303 code, buffering code is a 1-to-1 copy.
*
* Warning! You use this driver on your own risk! The only official
* description of this device I have is datasheet from manufacturer,
* and it doesn't contain almost any information needed to write a driver.
* Almost all knowlegde used while writing this driver was gathered by:
* - analyzing traffic between device and the M$ Windows 2000 driver,
* - trying different bit combinations and checking pin states
* with a voltmeter,
* - receiving malformed frames and producing buffer overflows
* to learn how errors are reported,
* So, THIS CODE CAN DESTROY OTi-6858 AND ANY OTHER DEVICES, THAT ARE
* CONNECTED TO IT!
*
* See Documentation/usb/usb-serial.rst for more information on using this
* driver
*
* TODO:
* - implement correct flushing for ioctls and oti6858_close()
* - check how errors (rx overflow, parity error, framing error) are reported
* - implement oti6858_break_ctl()
* - implement more ioctls
* - test/implement flow control
* - allow setting custom baud rates
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
#include <linux/kfifo.h>
#include "oti6858.h"
#define OTI6858_DESCRIPTION \
"Ours Technology Inc. OTi-6858 USB to serial adapter driver"
#define OTI6858_AUTHOR "Tomasz Michal Lukaszewski <FIXME@FIXME>"
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(OTI6858_VENDOR_ID, OTI6858_PRODUCT_ID) },
{ }
};
MODULE_DEVICE_TABLE(usb, id_table);
/* requests */
#define OTI6858_REQ_GET_STATUS (USB_DIR_IN | USB_TYPE_VENDOR | 0x00)
#define OTI6858_REQ_T_GET_STATUS 0x01
#define OTI6858_REQ_SET_LINE (USB_DIR_OUT | USB_TYPE_VENDOR | 0x00)
#define OTI6858_REQ_T_SET_LINE 0x00
#define OTI6858_REQ_CHECK_TXBUFF (USB_DIR_IN | USB_TYPE_VENDOR | 0x01)
#define OTI6858_REQ_T_CHECK_TXBUFF 0x00
/* format of the control packet */
struct oti6858_control_pkt {
__le16 divisor; /* baud rate = 96000000 / (16 * divisor), LE */
#define OTI6858_MAX_BAUD_RATE 3000000
u8 frame_fmt;
#define FMT_STOP_BITS_MASK 0xc0
#define FMT_STOP_BITS_1 0x00
#define FMT_STOP_BITS_2 0x40 /* 1.5 stop bits if FMT_DATA_BITS_5 */
#define FMT_PARITY_MASK 0x38
#define FMT_PARITY_NONE 0x00
#define FMT_PARITY_ODD 0x08
#define FMT_PARITY_EVEN 0x18
#define FMT_PARITY_MARK 0x28
#define FMT_PARITY_SPACE 0x38
#define FMT_DATA_BITS_MASK 0x03
#define FMT_DATA_BITS_5 0x00
#define FMT_DATA_BITS_6 0x01
#define FMT_DATA_BITS_7 0x02
#define FMT_DATA_BITS_8 0x03
u8 something; /* always equals 0x43 */
u8 control; /* settings of flow control lines */
#define CONTROL_MASK 0x0c
#define CONTROL_DTR_HIGH 0x08
#define CONTROL_RTS_HIGH 0x04
u8 tx_status;
#define TX_BUFFER_EMPTIED 0x09
u8 pin_state;
#define PIN_MASK 0x3f
#define PIN_MSR_MASK 0x1b
#define PIN_RTS 0x20 /* output pin */
#define PIN_CTS 0x10 /* input pin, active low */
#define PIN_DSR 0x08 /* input pin, active low */
#define PIN_DTR 0x04 /* output pin */
#define PIN_RI 0x02 /* input pin, active low */
#define PIN_DCD 0x01 /* input pin, active low */
u8 rx_bytes_avail; /* number of bytes in rx buffer */;
};
#define OTI6858_CTRL_PKT_SIZE sizeof(struct oti6858_control_pkt)
#define OTI6858_CTRL_EQUALS_PENDING(a, priv) \
(((a)->divisor == (priv)->pending_setup.divisor) \
&& ((a)->control == (priv)->pending_setup.control) \
&& ((a)->frame_fmt == (priv)->pending_setup.frame_fmt))
/* function prototypes */
static int oti6858_open(struct tty_struct *tty, struct usb_serial_port *port);
static void oti6858_close(struct usb_serial_port *port);
static void oti6858_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios);
static void oti6858_init_termios(struct tty_struct *tty);
static void oti6858_read_int_callback(struct urb *urb);
static void oti6858_read_bulk_callback(struct urb *urb);
static void oti6858_write_bulk_callback(struct urb *urb);
static int oti6858_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static unsigned int oti6858_write_room(struct tty_struct *tty);
static unsigned int oti6858_chars_in_buffer(struct tty_struct *tty);
static int oti6858_tiocmget(struct tty_struct *tty);
static int oti6858_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
static int oti6858_port_probe(struct usb_serial_port *port);
static void oti6858_port_remove(struct usb_serial_port *port);
/* device info */
static struct usb_serial_driver oti6858_device = {
.driver = {
.owner = THIS_MODULE,
.name = "oti6858",
},
.id_table = id_table,
.num_ports = 1,
.num_bulk_in = 1,
.num_bulk_out = 1,
.num_interrupt_in = 1,
.open = oti6858_open,
.close = oti6858_close,
.write = oti6858_write,
.set_termios = oti6858_set_termios,
.init_termios = oti6858_init_termios,
.tiocmget = oti6858_tiocmget,
.tiocmset = oti6858_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.read_bulk_callback = oti6858_read_bulk_callback,
.read_int_callback = oti6858_read_int_callback,
.write_bulk_callback = oti6858_write_bulk_callback,
.write_room = oti6858_write_room,
.chars_in_buffer = oti6858_chars_in_buffer,
.port_probe = oti6858_port_probe,
.port_remove = oti6858_port_remove,
};
static struct usb_serial_driver * const serial_drivers[] = {
&oti6858_device, NULL
};
struct oti6858_private {
spinlock_t lock;
struct oti6858_control_pkt status;
struct {
u8 read_urb_in_use;
u8 write_urb_in_use;
} flags;
struct delayed_work delayed_write_work;
struct {
__le16 divisor;
u8 frame_fmt;
u8 control;
} pending_setup;
u8 transient;
u8 setup_done;
struct delayed_work delayed_setup_work;
struct usb_serial_port *port; /* USB port with which associated */
};
static void setup_line(struct work_struct *work)
{
struct oti6858_private *priv = container_of(work,
struct oti6858_private, delayed_setup_work.work);
struct usb_serial_port *port = priv->port;
struct oti6858_control_pkt *new_setup;
unsigned long flags;
int result;
new_setup = kmalloc(OTI6858_CTRL_PKT_SIZE, GFP_KERNEL);
if (!new_setup) {
/* we will try again */
schedule_delayed_work(&priv->delayed_setup_work,
msecs_to_jiffies(2));
return;
}
result = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
OTI6858_REQ_T_GET_STATUS,
OTI6858_REQ_GET_STATUS,
0, 0,
new_setup, OTI6858_CTRL_PKT_SIZE,
100);
if (result != OTI6858_CTRL_PKT_SIZE) {
dev_err(&port->dev, "%s(): error reading status\n", __func__);
kfree(new_setup);
/* we will try again */
schedule_delayed_work(&priv->delayed_setup_work,
msecs_to_jiffies(2));
return;
}
spin_lock_irqsave(&priv->lock, flags);
if (!OTI6858_CTRL_EQUALS_PENDING(new_setup, priv)) {
new_setup->divisor = priv->pending_setup.divisor;
new_setup->control = priv->pending_setup.control;
new_setup->frame_fmt = priv->pending_setup.frame_fmt;
spin_unlock_irqrestore(&priv->lock, flags);
result = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
OTI6858_REQ_T_SET_LINE,
OTI6858_REQ_SET_LINE,
0, 0,
new_setup, OTI6858_CTRL_PKT_SIZE,
100);
} else {
spin_unlock_irqrestore(&priv->lock, flags);
result = 0;
}
kfree(new_setup);
spin_lock_irqsave(&priv->lock, flags);
if (result != OTI6858_CTRL_PKT_SIZE)
priv->transient = 0;
priv->setup_done = 1;
spin_unlock_irqrestore(&priv->lock, flags);
dev_dbg(&port->dev, "%s(): submitting interrupt urb\n", __func__);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result != 0) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed with error %d\n",
__func__, result);
}
}
static void send_data(struct work_struct *work)
{
struct oti6858_private *priv = container_of(work,
struct oti6858_private, delayed_write_work.work);
struct usb_serial_port *port = priv->port;
int count = 0, result;
unsigned long flags;
u8 *allow;
spin_lock_irqsave(&priv->lock, flags);
if (priv->flags.write_urb_in_use) {
spin_unlock_irqrestore(&priv->lock, flags);
schedule_delayed_work(&priv->delayed_write_work,
msecs_to_jiffies(2));
return;
}
priv->flags.write_urb_in_use = 1;
spin_unlock_irqrestore(&priv->lock, flags);
spin_lock_irqsave(&port->lock, flags);
count = kfifo_len(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
if (count > port->bulk_out_size)
count = port->bulk_out_size;
if (count != 0) {
allow = kmalloc(1, GFP_KERNEL);
if (!allow)
return;
result = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
OTI6858_REQ_T_CHECK_TXBUFF,
OTI6858_REQ_CHECK_TXBUFF,
count, 0, allow, 1, 100);
if (result != 1 || *allow != 0)
count = 0;
kfree(allow);
}
if (count == 0) {
priv->flags.write_urb_in_use = 0;
dev_dbg(&port->dev, "%s(): submitting interrupt urb\n", __func__);
result = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
if (result != 0) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed with error %d\n",
__func__, result);
}
return;
}
count = kfifo_out_locked(&port->write_fifo,
port->write_urb->transfer_buffer,
count, &port->lock);
port->write_urb->transfer_buffer_length = count;
result = usb_submit_urb(port->write_urb, GFP_NOIO);
if (result != 0) {
dev_err_console(port, "%s(): usb_submit_urb() failed with error %d\n",
__func__, result);
priv->flags.write_urb_in_use = 0;
}
usb_serial_port_softint(port);
}
static int oti6858_port_probe(struct usb_serial_port *port)
{
struct oti6858_private *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
spin_lock_init(&priv->lock);
priv->port = port;
INIT_DELAYED_WORK(&priv->delayed_setup_work, setup_line);
INIT_DELAYED_WORK(&priv->delayed_write_work, send_data);
usb_set_serial_port_data(port, priv);
port->port.drain_delay = 256; /* FIXME: check the FIFO length */
return 0;
}
static void oti6858_port_remove(struct usb_serial_port *port)
{
struct oti6858_private *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
}
static int oti6858_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
if (!count)
return count;
count = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock);
return count;
}
static unsigned int oti6858_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
unsigned int room;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
room = kfifo_avail(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
return room;
}
static unsigned int oti6858_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
unsigned int chars;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
chars = kfifo_len(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
return chars;
}
static void oti6858_init_termios(struct tty_struct *tty)
{
tty_encode_baud_rate(tty, 38400, 38400);
}
static void oti6858_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct oti6858_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
unsigned int cflag;
u8 frame_fmt, control;
__le16 divisor;
int br;
cflag = tty->termios.c_cflag;
spin_lock_irqsave(&priv->lock, flags);
divisor = priv->pending_setup.divisor;
frame_fmt = priv->pending_setup.frame_fmt;
control = priv->pending_setup.control;
spin_unlock_irqrestore(&priv->lock, flags);
frame_fmt &= ~FMT_DATA_BITS_MASK;
switch (cflag & CSIZE) {
case CS5:
frame_fmt |= FMT_DATA_BITS_5;
break;
case CS6:
frame_fmt |= FMT_DATA_BITS_6;
break;
case CS7:
frame_fmt |= FMT_DATA_BITS_7;
break;
default:
case CS8:
frame_fmt |= FMT_DATA_BITS_8;
break;
}
/* manufacturer claims that this device can work with baud rates
* up to 3 Mbps; I've tested it only on 115200 bps, so I can't
* guarantee that any other baud rate will work (especially
* the higher ones)
*/
br = tty_get_baud_rate(tty);
if (br == 0) {
divisor = 0;
} else {
int real_br;
int new_divisor;
br = min(br, OTI6858_MAX_BAUD_RATE);
new_divisor = (96000000 + 8 * br) / (16 * br);
real_br = 96000000 / (16 * new_divisor);
divisor = cpu_to_le16(new_divisor);
tty_encode_baud_rate(tty, real_br, real_br);
}
frame_fmt &= ~FMT_STOP_BITS_MASK;
if ((cflag & CSTOPB) != 0)
frame_fmt |= FMT_STOP_BITS_2;
else
frame_fmt |= FMT_STOP_BITS_1;
frame_fmt &= ~FMT_PARITY_MASK;
if ((cflag & PARENB) != 0) {
if ((cflag & PARODD) != 0)
frame_fmt |= FMT_PARITY_ODD;
else
frame_fmt |= FMT_PARITY_EVEN;
} else {
frame_fmt |= FMT_PARITY_NONE;
}
control &= ~CONTROL_MASK;
if ((cflag & CRTSCTS) != 0)
control |= (CONTROL_DTR_HIGH | CONTROL_RTS_HIGH);
/* change control lines if we are switching to or from B0 */
/* FIXME:
spin_lock_irqsave(&priv->lock, flags);
control = priv->line_control;
if ((cflag & CBAUD) == B0)
priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
else
priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
if (control != priv->line_control) {
control = priv->line_control;
spin_unlock_irqrestore(&priv->lock, flags);
set_control_lines(serial->dev, control);
} else {
spin_unlock_irqrestore(&priv->lock, flags);
}
*/
spin_lock_irqsave(&priv->lock, flags);
if (divisor != priv->pending_setup.divisor
|| control != priv->pending_setup.control
|| frame_fmt != priv->pending_setup.frame_fmt) {
priv->pending_setup.divisor = divisor;
priv->pending_setup.control = control;
priv->pending_setup.frame_fmt = frame_fmt;
}
spin_unlock_irqrestore(&priv->lock, flags);
}
static int oti6858_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct oti6858_private *priv = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
struct oti6858_control_pkt *buf;
unsigned long flags;
int result;
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
buf = kmalloc(OTI6858_CTRL_PKT_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
OTI6858_REQ_T_GET_STATUS,
OTI6858_REQ_GET_STATUS,
0, 0,
buf, OTI6858_CTRL_PKT_SIZE,
100);
if (result != OTI6858_CTRL_PKT_SIZE) {
/* assume default (after power-on reset) values */
buf->divisor = cpu_to_le16(0x009c); /* 38400 bps */
buf->frame_fmt = 0x03; /* 8N1 */
buf->something = 0x43;
buf->control = 0x4c; /* DTR, RTS */
buf->tx_status = 0x00;
buf->pin_state = 0x5b; /* RTS, CTS, DSR, DTR, RI, DCD */
buf->rx_bytes_avail = 0x00;
}
spin_lock_irqsave(&priv->lock, flags);
memcpy(&priv->status, buf, OTI6858_CTRL_PKT_SIZE);
priv->pending_setup.divisor = buf->divisor;
priv->pending_setup.frame_fmt = buf->frame_fmt;
priv->pending_setup.control = buf->control;
spin_unlock_irqrestore(&priv->lock, flags);
kfree(buf);
dev_dbg(&port->dev, "%s(): submitting interrupt urb\n", __func__);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result != 0) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed with error %d\n",
__func__, result);
oti6858_close(port);
return result;
}
/* setup termios */
if (tty)
oti6858_set_termios(tty, port, NULL);
return 0;
}
static void oti6858_close(struct usb_serial_port *port)
{
struct oti6858_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
/* clear out any remaining data in the buffer */
kfifo_reset_out(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
dev_dbg(&port->dev, "%s(): after buf_clear()\n", __func__);
/* cancel scheduled setup */
cancel_delayed_work_sync(&priv->delayed_setup_work);
cancel_delayed_work_sync(&priv->delayed_write_work);
/* shutdown our urbs */
dev_dbg(&port->dev, "%s(): shutting down urbs\n", __func__);
usb_kill_urb(port->write_urb);
usb_kill_urb(port->read_urb);
usb_kill_urb(port->interrupt_in_urb);
}
static int oti6858_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct oti6858_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
u8 control;
dev_dbg(&port->dev, "%s(set = 0x%08x, clear = 0x%08x)\n",
__func__, set, clear);
/* FIXME: check if this is correct (active high/low) */
spin_lock_irqsave(&priv->lock, flags);
control = priv->pending_setup.control;
if ((set & TIOCM_RTS) != 0)
control |= CONTROL_RTS_HIGH;
if ((set & TIOCM_DTR) != 0)
control |= CONTROL_DTR_HIGH;
if ((clear & TIOCM_RTS) != 0)
control &= ~CONTROL_RTS_HIGH;
if ((clear & TIOCM_DTR) != 0)
control &= ~CONTROL_DTR_HIGH;
if (control != priv->pending_setup.control)
priv->pending_setup.control = control;
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static int oti6858_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct oti6858_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
unsigned pin_state;
unsigned result = 0;
spin_lock_irqsave(&priv->lock, flags);
pin_state = priv->status.pin_state & PIN_MASK;
spin_unlock_irqrestore(&priv->lock, flags);
/* FIXME: check if this is correct (active high/low) */
if ((pin_state & PIN_RTS) != 0)
result |= TIOCM_RTS;
if ((pin_state & PIN_CTS) != 0)
result |= TIOCM_CTS;
if ((pin_state & PIN_DSR) != 0)
result |= TIOCM_DSR;
if ((pin_state & PIN_DTR) != 0)
result |= TIOCM_DTR;
if ((pin_state & PIN_RI) != 0)
result |= TIOCM_RI;
if ((pin_state & PIN_DCD) != 0)
result |= TIOCM_CD;
dev_dbg(&port->dev, "%s() = 0x%08x\n", __func__, result);
return result;
}
static void oti6858_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct oti6858_private *priv = usb_get_serial_port_data(port);
int transient = 0, can_recv = 0, resubmit = 1;
int status = urb->status;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&urb->dev->dev, "%s(): urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_dbg(&urb->dev->dev, "%s(): nonzero urb status received: %d\n",
__func__, status);
break;
}
if (status == 0 && urb->actual_length == OTI6858_CTRL_PKT_SIZE) {
struct oti6858_control_pkt *xs = urb->transfer_buffer;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
if (!priv->transient) {
if (!OTI6858_CTRL_EQUALS_PENDING(xs, priv)) {
if (xs->rx_bytes_avail == 0) {
priv->transient = 4;
priv->setup_done = 0;
resubmit = 0;
dev_dbg(&port->dev, "%s(): scheduling setup_line()\n", __func__);
schedule_delayed_work(&priv->delayed_setup_work, 0);
}
}
} else {
if (OTI6858_CTRL_EQUALS_PENDING(xs, priv)) {
priv->transient = 0;
} else if (!priv->setup_done) {
resubmit = 0;
} else if (--priv->transient == 0) {
if (xs->rx_bytes_avail == 0) {
priv->transient = 4;
priv->setup_done = 0;
resubmit = 0;
dev_dbg(&port->dev, "%s(): scheduling setup_line()\n", __func__);
schedule_delayed_work(&priv->delayed_setup_work, 0);
}
}
}
if (!priv->transient) {
u8 delta = xs->pin_state ^ priv->status.pin_state;
if (delta & PIN_MSR_MASK) {
if (delta & PIN_CTS)
port->icount.cts++;
if (delta & PIN_DSR)
port->icount.dsr++;
if (delta & PIN_RI)
port->icount.rng++;
if (delta & PIN_DCD)
port->icount.dcd++;
wake_up_interruptible(&port->port.delta_msr_wait);
}
memcpy(&priv->status, xs, OTI6858_CTRL_PKT_SIZE);
}
if (!priv->transient && xs->rx_bytes_avail != 0) {
can_recv = xs->rx_bytes_avail;
priv->flags.read_urb_in_use = 1;
}
transient = priv->transient;
spin_unlock_irqrestore(&priv->lock, flags);
}
if (can_recv) {
int result;
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (result != 0) {
priv->flags.read_urb_in_use = 0;
dev_err(&port->dev, "%s(): usb_submit_urb() failed,"
" error %d\n", __func__, result);
} else {
resubmit = 0;
}
} else if (!transient) {
unsigned long flags;
int count;
spin_lock_irqsave(&port->lock, flags);
count = kfifo_len(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
spin_lock_irqsave(&priv->lock, flags);
if (priv->flags.write_urb_in_use == 0 && count != 0) {
schedule_delayed_work(&priv->delayed_write_work, 0);
resubmit = 0;
}
spin_unlock_irqrestore(&priv->lock, flags);
}
if (resubmit) {
int result;
/* dev_dbg(&urb->dev->dev, "%s(): submitting interrupt urb\n", __func__); */
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result != 0) {
dev_err(&urb->dev->dev,
"%s(): usb_submit_urb() failed with"
" error %d\n", __func__, result);
}
}
}
static void oti6858_read_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct oti6858_private *priv = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
unsigned long flags;
int status = urb->status;
int result;
spin_lock_irqsave(&priv->lock, flags);
priv->flags.read_urb_in_use = 0;
spin_unlock_irqrestore(&priv->lock, flags);
if (status != 0) {
dev_dbg(&urb->dev->dev, "%s(): unable to handle the error, exiting\n", __func__);
return;
}
if (urb->actual_length > 0) {
tty_insert_flip_string(&port->port, data, urb->actual_length);
tty_flip_buffer_push(&port->port);
}
/* schedule the interrupt urb */
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
if (result != 0 && result != -EPERM) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed,"
" error %d\n", __func__, result);
}
}
static void oti6858_write_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct oti6858_private *priv = usb_get_serial_port_data(port);
int status = urb->status;
int result;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&urb->dev->dev, "%s(): urb shutting down with status: %d\n", __func__, status);
priv->flags.write_urb_in_use = 0;
return;
default:
/* error in the urb, so we have to resubmit it */
dev_dbg(&urb->dev->dev, "%s(): nonzero write bulk status received: %d\n", __func__, status);
dev_dbg(&urb->dev->dev, "%s(): overflow in write\n", __func__);
port->write_urb->transfer_buffer_length = 1;
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (result) {
dev_err_console(port, "%s(): usb_submit_urb() failed,"
" error %d\n", __func__, result);
} else {
return;
}
}
priv->flags.write_urb_in_use = 0;
/* schedule the interrupt urb if we are still open */
dev_dbg(&port->dev, "%s(): submitting interrupt urb\n", __func__);
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
if (result != 0) {
dev_err(&port->dev, "%s(): failed submitting int urb,"
" error %d\n", __func__, result);
}
}
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION(OTI6858_DESCRIPTION);
MODULE_AUTHOR(OTI6858_AUTHOR);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/oti6858.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Qualcomm Serial USB driver
*
* Copyright (c) 2008 QUALCOMM Incorporated.
* Copyright (c) 2009 Greg Kroah-Hartman <[email protected]>
* Copyright (c) 2009 Novell Inc.
*/
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/slab.h>
#include "usb-wwan.h"
#define DRIVER_AUTHOR "Qualcomm Inc"
#define DRIVER_DESC "Qualcomm USB Serial driver"
#define QUECTEL_EC20_PID 0x9215
/* standard device layouts supported by this driver */
enum qcserial_layouts {
QCSERIAL_G2K = 0, /* Gobi 2000 */
QCSERIAL_G1K = 1, /* Gobi 1000 */
QCSERIAL_SWI = 2, /* Sierra Wireless */
QCSERIAL_HWI = 3, /* Huawei */
};
#define DEVICE_G1K(v, p) \
USB_DEVICE(v, p), .driver_info = QCSERIAL_G1K
#define DEVICE_SWI(v, p) \
USB_DEVICE(v, p), .driver_info = QCSERIAL_SWI
#define DEVICE_HWI(v, p) \
USB_DEVICE(v, p), .driver_info = QCSERIAL_HWI
static const struct usb_device_id id_table[] = {
/* Gobi 1000 devices */
{DEVICE_G1K(0x05c6, 0x9211)}, /* Acer Gobi QDL device */
{DEVICE_G1K(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
{DEVICE_G1K(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
{DEVICE_G1K(0x03f0, 0x201d)}, /* HP un2400 Gobi QDL Device */
{DEVICE_G1K(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */
{DEVICE_G1K(0x04da, 0x250c)}, /* Panasonic Gobi QDL device */
{DEVICE_G1K(0x413c, 0x8172)}, /* Dell Gobi Modem device */
{DEVICE_G1K(0x413c, 0x8171)}, /* Dell Gobi QDL device */
{DEVICE_G1K(0x1410, 0xa001)}, /* Novatel/Verizon USB-1000 */
{DEVICE_G1K(0x1410, 0xa002)}, /* Novatel Gobi Modem device */
{DEVICE_G1K(0x1410, 0xa003)}, /* Novatel Gobi Modem device */
{DEVICE_G1K(0x1410, 0xa004)}, /* Novatel Gobi Modem device */
{DEVICE_G1K(0x1410, 0xa005)}, /* Novatel Gobi Modem device */
{DEVICE_G1K(0x1410, 0xa006)}, /* Novatel Gobi Modem device */
{DEVICE_G1K(0x1410, 0xa007)}, /* Novatel Gobi Modem device */
{DEVICE_G1K(0x1410, 0xa008)}, /* Novatel Gobi QDL device */
{DEVICE_G1K(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
{DEVICE_G1K(0x0b05, 0x1774)}, /* Asus Gobi QDL device */
{DEVICE_G1K(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */
{DEVICE_G1K(0x19d2, 0xfff2)}, /* ONDA Gobi QDL device */
{DEVICE_G1K(0x1557, 0x0a80)}, /* OQO Gobi QDL device */
{DEVICE_G1K(0x05c6, 0x9001)}, /* Generic Gobi Modem device */
{DEVICE_G1K(0x05c6, 0x9002)}, /* Generic Gobi Modem device */
{DEVICE_G1K(0x05c6, 0x9202)}, /* Generic Gobi Modem device */
{DEVICE_G1K(0x05c6, 0x9203)}, /* Generic Gobi Modem device */
{DEVICE_G1K(0x05c6, 0x9222)}, /* Generic Gobi Modem device */
{DEVICE_G1K(0x05c6, 0x9008)}, /* Generic Gobi QDL device */
{DEVICE_G1K(0x05c6, 0x9009)}, /* Generic Gobi Modem device */
{DEVICE_G1K(0x05c6, 0x9201)}, /* Generic Gobi QDL device */
{DEVICE_G1K(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
{DEVICE_G1K(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
{DEVICE_G1K(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */
{DEVICE_G1K(0x1bc7, 0x900e)}, /* Telit Gobi QDL device */
/* Gobi 2000 devices */
{USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi 2000 QDL device */
{USB_DEVICE(0x1410, 0xa011)}, /* Novatel Gobi 2000 QDL device */
{USB_DEVICE(0x1410, 0xa012)}, /* Novatel Gobi 2000 QDL device */
{USB_DEVICE(0x1410, 0xa013)}, /* Novatel Gobi 2000 QDL device */
{USB_DEVICE(0x1410, 0xa014)}, /* Novatel Gobi 2000 QDL device */
{USB_DEVICE(0x413c, 0x8185)}, /* Dell Gobi 2000 QDL device (N0218, VU936) */
{USB_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
{USB_DEVICE(0x05c6, 0x9208)}, /* Generic Gobi 2000 QDL device */
{USB_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */
{USB_DEVICE(0x05c6, 0x9224)}, /* Sony Gobi 2000 QDL device (N0279, VU730) */
{USB_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
{USB_DEVICE(0x05c6, 0x9244)}, /* Samsung Gobi 2000 QDL device (VL176) */
{USB_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
{USB_DEVICE(0x03f0, 0x241d)}, /* HP Gobi 2000 QDL device (VP412) */
{USB_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
{USB_DEVICE(0x05c6, 0x9214)}, /* Acer Gobi 2000 QDL device (VP413) */
{USB_DEVICE(0x05c6, 0x9215)}, /* Acer Gobi 2000 Modem device (VP413) */
{USB_DEVICE(0x05c6, 0x9264)}, /* Asus Gobi 2000 QDL device (VR305) */
{USB_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */
{USB_DEVICE(0x05c6, 0x9234)}, /* Top Global Gobi 2000 QDL device (VR306) */
{USB_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */
{USB_DEVICE(0x05c6, 0x9274)}, /* iRex Technologies Gobi 2000 QDL device (VR307) */
{USB_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */
{USB_DEVICE(0x1199, 0x9000)}, /* Sierra Wireless Gobi 2000 QDL device (VT773) */
{USB_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9004)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9005)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9006)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9007)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9011)}, /* Sierra Wireless Gobi 2000 Modem device (MC8305) */
{USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */
{USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
{USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */
{USB_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
/* Gobi 3000 devices */
{USB_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Gobi 3000 QDL */
{USB_DEVICE(0x05c6, 0x920c)}, /* Gobi 3000 QDL */
{USB_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */
{USB_DEVICE(0x1410, 0xa020)}, /* Novatel Gobi 3000 QDL */
{USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */
{USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */
{USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
{USB_DEVICE(0x413c, 0x81a6)}, /* Dell DW5570 QDL (MC8805) */
{USB_DEVICE(0x1199, 0x68a4)}, /* Sierra Wireless QDL */
{USB_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */
{USB_DEVICE(0x1199, 0x68a8)}, /* Sierra Wireless QDL */
{USB_DEVICE(0x1199, 0x68a9)}, /* Sierra Wireless Modem */
{USB_DEVICE(0x1199, 0x9010)}, /* Sierra Wireless Gobi 3000 QDL */
{USB_DEVICE(0x1199, 0x9012)}, /* Sierra Wireless Gobi 3000 QDL */
{USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
{USB_DEVICE(0x1199, 0x9014)}, /* Sierra Wireless Gobi 3000 QDL */
{USB_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
{USB_DEVICE(0x1199, 0x9018)}, /* Sierra Wireless Gobi 3000 QDL */
{USB_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
{USB_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */
{USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */
{USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */
{USB_DEVICE(0x0AF0, 0x8120)}, /* Option GTM681W */
/* non-Gobi Sierra Wireless devices */
{DEVICE_SWI(0x03f0, 0x4e1d)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{DEVICE_SWI(0x0f3d, 0x68a2)}, /* Sierra Wireless MC7700 */
{DEVICE_SWI(0x114f, 0x68a2)}, /* Sierra Wireless MC7750 */
{DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */
{DEVICE_SWI(0x1199, 0x68c0)}, /* Sierra Wireless MC7304/MC7354 */
{DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */
{DEVICE_SWI(0x1199, 0x901e)}, /* Sierra Wireless EM7355 QDL */
{DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */
{DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9041)}, /* Sierra Wireless MC7305/MC7355 */
{DEVICE_SWI(0x1199, 0x9051)}, /* Netgear AirCard 340U */
{DEVICE_SWI(0x1199, 0x9053)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9054)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9055)}, /* Netgear AirCard 341U */
{DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9062)}, /* Sierra Wireless EM7305 QDL */
{DEVICE_SWI(0x1199, 0x9063)}, /* Sierra Wireless EM7305 */
{DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */
{DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */
{DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */
{DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */
{DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */
{DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */
{DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */
{DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */
{DEVICE_SWI(0x1199, 0x90d2)}, /* Sierra Wireless EM9191 QDL */
{DEVICE_SWI(0x1199, 0xc080)}, /* Sierra Wireless EM7590 QDL */
{DEVICE_SWI(0x1199, 0xc081)}, /* Sierra Wireless EM7590 */
{DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
{DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
{DEVICE_SWI(0x413c, 0x81c2)}, /* Dell Wireless 5811e */
{DEVICE_SWI(0x413c, 0x81cb)}, /* Dell Wireless 5816e QDL */
{DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */
{DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */
{DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */
{DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */
{DEVICE_SWI(0x413c, 0x81d2)}, /* Dell Wireless 5818 */
/* Huawei devices */
{DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
static int handle_quectel_ec20(struct device *dev, int ifnum)
{
int altsetting = 0;
/*
* Quectel EC20 Mini PCIe LTE module layout:
* 0: DM/DIAG (use libqcdm from ModemManager for communication)
* 1: NMEA
* 2: AT-capable modem port
* 3: Modem interface
* 4: NDIS
*/
switch (ifnum) {
case 0:
dev_dbg(dev, "Quectel EC20 DM/DIAG interface found\n");
break;
case 1:
dev_dbg(dev, "Quectel EC20 NMEA GPS interface found\n");
break;
case 2:
case 3:
dev_dbg(dev, "Quectel EC20 Modem port found\n");
break;
case 4:
/* Don't claim the QMI/net interface */
altsetting = -1;
break;
}
return altsetting;
}
static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
{
struct usb_host_interface *intf = serial->interface->cur_altsetting;
struct device *dev = &serial->dev->dev;
int retval = -ENODEV;
__u8 nintf;
__u8 ifnum;
int altsetting = -1;
bool sendsetup = false;
/* we only support vendor specific functions */
if (intf->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
goto done;
nintf = serial->dev->actconfig->desc.bNumInterfaces;
dev_dbg(dev, "Num Interfaces = %d\n", nintf);
ifnum = intf->desc.bInterfaceNumber;
dev_dbg(dev, "This Interface = %d\n", ifnum);
if (nintf == 1) {
/* QDL mode */
/* Gobi 2000 has a single altsetting, older ones have two */
if (serial->interface->num_altsetting == 2)
intf = usb_altnum_to_altsetting(serial->interface, 1);
else if (serial->interface->num_altsetting > 2)
goto done;
if (intf && intf->desc.bNumEndpoints == 2 &&
usb_endpoint_is_bulk_in(&intf->endpoint[0].desc) &&
usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) {
dev_dbg(dev, "QDL port found\n");
if (serial->interface->num_altsetting == 1)
retval = 0; /* Success */
else
altsetting = 1;
}
goto done;
}
/* default to enabling interface */
altsetting = 0;
/*
* Composite mode; don't bind to the QMI/net interface as that
* gets handled by other drivers.
*/
switch (id->driver_info) {
case QCSERIAL_G1K:
/*
* Gobi 1K USB layout:
* 0: DM/DIAG (use libqcdm from ModemManager for communication)
* 1: serial port (doesn't respond)
* 2: AT-capable modem port
* 3: QMI/net
*/
if (nintf < 3 || nintf > 4) {
dev_err(dev, "unknown number of interfaces: %d\n", nintf);
altsetting = -1;
goto done;
}
if (ifnum == 0) {
dev_dbg(dev, "Gobi 1K DM/DIAG interface found\n");
altsetting = 1;
} else if (ifnum == 2)
dev_dbg(dev, "Modem port found\n");
else
altsetting = -1;
break;
case QCSERIAL_G2K:
/* handle non-standard layouts */
if (nintf == 5 && id->idProduct == QUECTEL_EC20_PID) {
altsetting = handle_quectel_ec20(dev, ifnum);
goto done;
}
/*
* Gobi 2K+ USB layout:
* 0: QMI/net
* 1: DM/DIAG (use libqcdm from ModemManager for communication)
* 2: AT-capable modem port
* 3: NMEA
*/
if (nintf < 3 || nintf > 4) {
dev_err(dev, "unknown number of interfaces: %d\n", nintf);
altsetting = -1;
goto done;
}
switch (ifnum) {
case 0:
/* Don't claim the QMI/net interface */
altsetting = -1;
break;
case 1:
dev_dbg(dev, "Gobi 2K+ DM/DIAG interface found\n");
break;
case 2:
dev_dbg(dev, "Modem port found\n");
break;
case 3:
/*
* NMEA (serial line 9600 8N1)
* # echo "\$GPS_START" > /dev/ttyUSBx
* # echo "\$GPS_STOP" > /dev/ttyUSBx
*/
dev_dbg(dev, "Gobi 2K+ NMEA GPS interface found\n");
break;
}
break;
case QCSERIAL_SWI:
/*
* Sierra Wireless layout:
* 0: DM/DIAG (use libqcdm from ModemManager for communication)
* 2: NMEA
* 3: AT-capable modem port
* 8: QMI/net
*/
switch (ifnum) {
case 0:
dev_dbg(dev, "DM/DIAG interface found\n");
break;
case 2:
dev_dbg(dev, "NMEA GPS interface found\n");
sendsetup = true;
break;
case 3:
dev_dbg(dev, "Modem port found\n");
sendsetup = true;
break;
default:
/* don't claim any unsupported interface */
altsetting = -1;
break;
}
break;
case QCSERIAL_HWI:
/*
* Huawei devices map functions by subclass + protocol
* instead of interface numbers. The protocol identify
* a specific function, while the subclass indicate a
* specific firmware source
*
* This is a list of functions known to be non-serial. The rest
* are assumed to be serial and will be handled by this driver
*/
switch (intf->desc.bInterfaceProtocol) {
/* QMI combined (qmi_wwan) */
case 0x07:
case 0x37:
case 0x67:
/* QMI data (qmi_wwan) */
case 0x08:
case 0x38:
case 0x68:
/* QMI control (qmi_wwan) */
case 0x09:
case 0x39:
case 0x69:
/* NCM like (huawei_cdc_ncm) */
case 0x16:
case 0x46:
case 0x76:
altsetting = -1;
break;
default:
dev_dbg(dev, "Huawei type serial port found (%02x/%02x/%02x)\n",
intf->desc.bInterfaceClass,
intf->desc.bInterfaceSubClass,
intf->desc.bInterfaceProtocol);
}
break;
default:
dev_err(dev, "unsupported device layout type: %lu\n",
id->driver_info);
break;
}
done:
if (altsetting >= 0) {
retval = usb_set_interface(serial->dev, ifnum, altsetting);
if (retval < 0) {
dev_err(dev,
"Could not set interface, error %d\n",
retval);
retval = -ENODEV;
}
}
if (!retval)
usb_set_serial_data(serial, (void *)(unsigned long)sendsetup);
return retval;
}
static int qc_attach(struct usb_serial *serial)
{
struct usb_wwan_intf_private *data;
bool sendsetup;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
sendsetup = !!(unsigned long)(usb_get_serial_data(serial));
if (sendsetup)
data->use_send_setup = 1;
spin_lock_init(&data->susp_lock);
usb_set_serial_data(serial, data);
return 0;
}
static void qc_release(struct usb_serial *serial)
{
struct usb_wwan_intf_private *priv = usb_get_serial_data(serial);
usb_set_serial_data(serial, NULL);
kfree(priv);
}
static struct usb_serial_driver qcdevice = {
.driver = {
.owner = THIS_MODULE,
.name = "qcserial",
},
.description = "Qualcomm USB modem",
.id_table = id_table,
.num_ports = 1,
.probe = qcprobe,
.open = usb_wwan_open,
.close = usb_wwan_close,
.dtr_rts = usb_wwan_dtr_rts,
.write = usb_wwan_write,
.write_room = usb_wwan_write_room,
.chars_in_buffer = usb_wwan_chars_in_buffer,
.tiocmget = usb_wwan_tiocmget,
.tiocmset = usb_wwan_tiocmset,
.attach = qc_attach,
.release = qc_release,
.port_probe = usb_wwan_port_probe,
.port_remove = usb_wwan_port_remove,
#ifdef CONFIG_PM
.suspend = usb_wwan_suspend,
.resume = usb_wwan_resume,
#endif
};
static struct usb_serial_driver * const serial_drivers[] = {
&qcdevice, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/qcserial.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Clean ups from Moschip version and a few ioctl implementations by:
* Paul B Schroeder <pschroeder "at" uplogix "dot" com>
*
* Originally based on drivers/usb/serial/io_edgeport.c which is:
* Copyright (C) 2000 Inside Out Networks, All rights reserved.
* Copyright (C) 2001-2002 Greg Kroah-Hartman <[email protected]>
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/serial.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
#define DRIVER_DESC "Moschip 7840/7820 USB Serial Driver"
/*
* 16C50 UART register defines
*/
#define LCR_BITS_5 0x00 /* 5 bits/char */
#define LCR_BITS_6 0x01 /* 6 bits/char */
#define LCR_BITS_7 0x02 /* 7 bits/char */
#define LCR_BITS_8 0x03 /* 8 bits/char */
#define LCR_BITS_MASK 0x03 /* Mask for bits/char field */
#define LCR_STOP_1 0x00 /* 1 stop bit */
#define LCR_STOP_1_5 0x04 /* 1.5 stop bits (if 5 bits/char) */
#define LCR_STOP_2 0x04 /* 2 stop bits (if 6-8 bits/char) */
#define LCR_STOP_MASK 0x04 /* Mask for stop bits field */
#define LCR_PAR_NONE 0x00 /* No parity */
#define LCR_PAR_ODD 0x08 /* Odd parity */
#define LCR_PAR_EVEN 0x18 /* Even parity */
#define LCR_PAR_MARK 0x28 /* Force parity bit to 1 */
#define LCR_PAR_SPACE 0x38 /* Force parity bit to 0 */
#define LCR_PAR_MASK 0x38 /* Mask for parity field */
#define LCR_SET_BREAK 0x40 /* Set Break condition */
#define LCR_DL_ENABLE 0x80 /* Enable access to divisor latch */
#define MCR_DTR 0x01 /* Assert DTR */
#define MCR_RTS 0x02 /* Assert RTS */
#define MCR_OUT1 0x04 /* Loopback only: Sets state of RI */
#define MCR_MASTER_IE 0x08 /* Enable interrupt outputs */
#define MCR_LOOPBACK 0x10 /* Set internal (digital) loopback mode */
#define MCR_XON_ANY 0x20 /* Enable any char to exit XOFF mode */
#define MOS7840_MSR_CTS 0x10 /* Current state of CTS */
#define MOS7840_MSR_DSR 0x20 /* Current state of DSR */
#define MOS7840_MSR_RI 0x40 /* Current state of RI */
#define MOS7840_MSR_CD 0x80 /* Current state of CD */
/*
* Defines used for sending commands to port
*/
#define MOS_WDR_TIMEOUT 5000 /* default urb timeout */
#define MOS_PORT1 0x0200
#define MOS_PORT2 0x0300
#define MOS_VENREG 0x0000
#define MOS_MAX_PORT 0x02
#define MOS_WRITE 0x0E
#define MOS_READ 0x0D
/* Requests */
#define MCS_RD_RTYPE 0xC0
#define MCS_WR_RTYPE 0x40
#define MCS_RDREQ 0x0D
#define MCS_WRREQ 0x0E
#define MCS_CTRL_TIMEOUT 500
#define VENDOR_READ_LENGTH (0x01)
#define MAX_NAME_LEN 64
#define ZLP_REG1 0x3A /* Zero_Flag_Reg1 58 */
#define ZLP_REG5 0x3E /* Zero_Flag_Reg5 62 */
/* For higher baud Rates use TIOCEXBAUD */
#define TIOCEXBAUD 0x5462
/*
* Vendor id and device id defines
*
* NOTE: Do not add new defines, add entries directly to the id_table instead.
*/
#define USB_VENDOR_ID_BANDB 0x0856
#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22
#define BANDB_DEVICE_ID_USO9ML2_2P 0xBC00
#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24
#define BANDB_DEVICE_ID_USO9ML2_4P 0xBC01
#define BANDB_DEVICE_ID_US9ML2_2 0xAC29
#define BANDB_DEVICE_ID_US9ML2_4 0xAC30
#define BANDB_DEVICE_ID_USPTL4_2 0xAC31
#define BANDB_DEVICE_ID_USPTL4_4 0xAC32
#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
#define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02
#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
#define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03
/* Interrupt Routine Defines */
#define SERIAL_IIR_RLS 0x06
#define SERIAL_IIR_MS 0x00
/*
* Emulation of the bit mask on the LINE STATUS REGISTER.
*/
#define SERIAL_LSR_DR 0x0001
#define SERIAL_LSR_OE 0x0002
#define SERIAL_LSR_PE 0x0004
#define SERIAL_LSR_FE 0x0008
#define SERIAL_LSR_BI 0x0010
#define MOS_MSR_DELTA_CTS 0x10
#define MOS_MSR_DELTA_DSR 0x20
#define MOS_MSR_DELTA_RI 0x40
#define MOS_MSR_DELTA_CD 0x80
/* Serial Port register Address */
#define INTERRUPT_ENABLE_REGISTER ((__u16)(0x01))
#define FIFO_CONTROL_REGISTER ((__u16)(0x02))
#define LINE_CONTROL_REGISTER ((__u16)(0x03))
#define MODEM_CONTROL_REGISTER ((__u16)(0x04))
#define LINE_STATUS_REGISTER ((__u16)(0x05))
#define MODEM_STATUS_REGISTER ((__u16)(0x06))
#define SCRATCH_PAD_REGISTER ((__u16)(0x07))
#define DIVISOR_LATCH_LSB ((__u16)(0x00))
#define DIVISOR_LATCH_MSB ((__u16)(0x01))
#define CLK_MULTI_REGISTER ((__u16)(0x02))
#define CLK_START_VALUE_REGISTER ((__u16)(0x03))
#define GPIO_REGISTER ((__u16)(0x07))
#define SERIAL_LCR_DLAB ((__u16)(0x0080))
/*
* URB POOL related defines
*/
#define NUM_URBS 16 /* URB Count */
#define URB_TRANSFER_BUFFER_SIZE 32 /* URB Size */
/* LED on/off milliseconds*/
#define LED_ON_MS 500
#define LED_OFF_MS 500
enum mos7840_flag {
MOS7840_FLAG_LED_BUSY,
};
#define MCS_PORT_MASK GENMASK(2, 0)
#define MCS_PORTS(nr) ((nr) & MCS_PORT_MASK)
#define MCS_LED BIT(3)
#define MCS_DEVICE(vid, pid, flags) \
USB_DEVICE((vid), (pid)), .driver_info = (flags)
static const struct usb_device_id id_table[] = {
{ MCS_DEVICE(0x0557, 0x2011, MCS_PORTS(4)) }, /* ATEN UC2324 */
{ MCS_DEVICE(0x0557, 0x7820, MCS_PORTS(2)) }, /* ATEN UC2322 */
{ MCS_DEVICE(0x110a, 0x2210, MCS_PORTS(2)) }, /* Moxa UPort 2210 */
{ MCS_DEVICE(0x9710, 0x7810, MCS_PORTS(1) | MCS_LED) }, /* ASIX MCS7810 */
{ MCS_DEVICE(0x9710, 0x7820, MCS_PORTS(2)) }, /* MosChip MCS7820 */
{ MCS_DEVICE(0x9710, 0x7840, MCS_PORTS(4)) }, /* MosChip MCS7840 */
{ MCS_DEVICE(0x9710, 0x7843, MCS_PORTS(3)) }, /* ASIX MCS7840 3 port */
{ USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2) },
{ USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P) },
{ USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4) },
{ USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P) },
{ USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2) },
{ USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4) },
{ USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2) },
{ USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4) },
{ USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2) },
{ USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P) },
{ USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4) },
{ USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P) },
{} /* terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
/* This structure holds all of the local port information */
struct moschip_port {
int port_num; /*Actual port number in the device(1,2,etc) */
struct urb *read_urb; /* read URB for this port */
__u8 shadowLCR; /* last LCR value received */
__u8 shadowMCR; /* last MCR value received */
struct usb_serial_port *port; /* loop back to the owner of this object */
/* Offsets */
__u8 SpRegOffset;
__u8 ControlRegOffset;
__u8 DcrRegOffset;
spinlock_t pool_lock;
struct urb *write_urb_pool[NUM_URBS];
char busy[NUM_URBS];
bool read_urb_busy;
/* For device(s) with LED indicator */
bool has_led;
struct timer_list led_timer1; /* Timer for LED on */
struct timer_list led_timer2; /* Timer for LED off */
struct urb *led_urb;
struct usb_ctrlrequest *led_dr;
unsigned long flags;
};
/*
* mos7840_set_reg_sync
* To set the Control register by calling usb_fill_control_urb function
* by passing usb_sndctrlpipe function as parameter.
*/
static int mos7840_set_reg_sync(struct usb_serial_port *port, __u16 reg,
__u16 val)
{
struct usb_device *dev = port->serial->dev;
val = val & 0x00ff;
dev_dbg(&port->dev, "mos7840_set_reg_sync offset is %x, value %x\n", reg, val);
return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), MCS_WRREQ,
MCS_WR_RTYPE, val, reg, NULL, 0,
MOS_WDR_TIMEOUT);
}
/*
* mos7840_get_reg_sync
* To set the Uart register by calling usb_fill_control_urb function by
* passing usb_rcvctrlpipe function as parameter.
*/
static int mos7840_get_reg_sync(struct usb_serial_port *port, __u16 reg,
__u16 *val)
{
struct usb_device *dev = port->serial->dev;
int ret = 0;
u8 *buf;
buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
MCS_RD_RTYPE, 0, reg, buf, VENDOR_READ_LENGTH,
MOS_WDR_TIMEOUT);
if (ret < VENDOR_READ_LENGTH) {
if (ret >= 0)
ret = -EIO;
goto out;
}
*val = buf[0];
dev_dbg(&port->dev, "%s offset is %x, return val %x\n", __func__, reg, *val);
out:
kfree(buf);
return ret;
}
/*
* mos7840_set_uart_reg
* To set the Uart register by calling usb_fill_control_urb function by
* passing usb_sndctrlpipe function as parameter.
*/
static int mos7840_set_uart_reg(struct usb_serial_port *port, __u16 reg,
__u16 val)
{
struct usb_device *dev = port->serial->dev;
val = val & 0x00ff;
/* For the UART control registers, the application number need
to be Or'ed */
if (port->serial->num_ports == 2 && port->port_number != 0)
val |= ((__u16)port->port_number + 2) << 8;
else
val |= ((__u16)port->port_number + 1) << 8;
dev_dbg(&port->dev, "%s application number is %x\n", __func__, val);
return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), MCS_WRREQ,
MCS_WR_RTYPE, val, reg, NULL, 0,
MOS_WDR_TIMEOUT);
}
/*
* mos7840_get_uart_reg
* To set the Control register by calling usb_fill_control_urb function
* by passing usb_rcvctrlpipe function as parameter.
*/
static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
__u16 *val)
{
struct usb_device *dev = port->serial->dev;
int ret = 0;
__u16 Wval;
u8 *buf;
buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* Wval is same as application number */
if (port->serial->num_ports == 2 && port->port_number != 0)
Wval = ((__u16)port->port_number + 2) << 8;
else
Wval = ((__u16)port->port_number + 1) << 8;
dev_dbg(&port->dev, "%s application number is %x\n", __func__, Wval);
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
MCS_RD_RTYPE, Wval, reg, buf, VENDOR_READ_LENGTH,
MOS_WDR_TIMEOUT);
if (ret < VENDOR_READ_LENGTH) {
if (ret >= 0)
ret = -EIO;
goto out;
}
*val = buf[0];
out:
kfree(buf);
return ret;
}
static void mos7840_dump_serial_port(struct usb_serial_port *port,
struct moschip_port *mos7840_port)
{
dev_dbg(&port->dev, "SpRegOffset is %2x\n", mos7840_port->SpRegOffset);
dev_dbg(&port->dev, "ControlRegOffset is %2x\n", mos7840_port->ControlRegOffset);
dev_dbg(&port->dev, "DCRRegOffset is %2x\n", mos7840_port->DcrRegOffset);
}
/************************************************************************/
/************************************************************************/
/* U S B C A L L B A C K F U N C T I O N S */
/* U S B C A L L B A C K F U N C T I O N S */
/************************************************************************/
/************************************************************************/
static void mos7840_set_led_callback(struct urb *urb)
{
switch (urb->status) {
case 0:
/* Success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* This urb is terminated, clean up */
dev_dbg(&urb->dev->dev, "%s - urb shutting down: %d\n",
__func__, urb->status);
break;
default:
dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n",
__func__, urb->status);
}
}
static void mos7840_set_led_async(struct moschip_port *mcs, __u16 wval,
__u16 reg)
{
struct usb_device *dev = mcs->port->serial->dev;
struct usb_ctrlrequest *dr = mcs->led_dr;
dr->bRequestType = MCS_WR_RTYPE;
dr->bRequest = MCS_WRREQ;
dr->wValue = cpu_to_le16(wval);
dr->wIndex = cpu_to_le16(reg);
dr->wLength = cpu_to_le16(0);
usb_fill_control_urb(mcs->led_urb, dev, usb_sndctrlpipe(dev, 0),
(unsigned char *)dr, NULL, 0, mos7840_set_led_callback, NULL);
usb_submit_urb(mcs->led_urb, GFP_ATOMIC);
}
static void mos7840_set_led_sync(struct usb_serial_port *port, __u16 reg,
__u16 val)
{
struct usb_device *dev = port->serial->dev;
usb_control_msg(dev, usb_sndctrlpipe(dev, 0), MCS_WRREQ, MCS_WR_RTYPE,
val, reg, NULL, 0, MOS_WDR_TIMEOUT);
}
static void mos7840_led_off(struct timer_list *t)
{
struct moschip_port *mcs = from_timer(mcs, t, led_timer1);
/* Turn off LED */
mos7840_set_led_async(mcs, 0x0300, MODEM_CONTROL_REGISTER);
mod_timer(&mcs->led_timer2,
jiffies + msecs_to_jiffies(LED_OFF_MS));
}
static void mos7840_led_flag_off(struct timer_list *t)
{
struct moschip_port *mcs = from_timer(mcs, t, led_timer2);
clear_bit_unlock(MOS7840_FLAG_LED_BUSY, &mcs->flags);
}
static void mos7840_led_activity(struct usb_serial_port *port)
{
struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
if (test_and_set_bit_lock(MOS7840_FLAG_LED_BUSY, &mos7840_port->flags))
return;
mos7840_set_led_async(mos7840_port, 0x0301, MODEM_CONTROL_REGISTER);
mod_timer(&mos7840_port->led_timer1,
jiffies + msecs_to_jiffies(LED_ON_MS));
}
/*****************************************************************************
* mos7840_bulk_in_callback
* this is the callback function for when we have received data on the
* bulk in endpoint.
*****************************************************************************/
static void mos7840_bulk_in_callback(struct urb *urb)
{
struct moschip_port *mos7840_port = urb->context;
struct usb_serial_port *port = mos7840_port->port;
int retval;
unsigned char *data;
int status = urb->status;
if (status) {
dev_dbg(&urb->dev->dev, "nonzero read bulk status received: %d\n", status);
mos7840_port->read_urb_busy = false;
return;
}
data = urb->transfer_buffer;
usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
if (urb->actual_length) {
struct tty_port *tport = &mos7840_port->port->port;
tty_insert_flip_string(tport, data, urb->actual_length);
tty_flip_buffer_push(tport);
port->icount.rx += urb->actual_length;
dev_dbg(&port->dev, "icount.rx is %d:\n", port->icount.rx);
}
if (mos7840_port->has_led)
mos7840_led_activity(port);
mos7840_port->read_urb_busy = true;
retval = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
if (retval) {
dev_dbg(&port->dev, "usb_submit_urb(read bulk) failed, retval = %d\n", retval);
mos7840_port->read_urb_busy = false;
}
}
/*****************************************************************************
* mos7840_bulk_out_data_callback
* this is the callback function for when we have finished sending
* serial data on the bulk out endpoint.
*****************************************************************************/
static void mos7840_bulk_out_data_callback(struct urb *urb)
{
struct moschip_port *mos7840_port = urb->context;
struct usb_serial_port *port = mos7840_port->port;
int status = urb->status;
unsigned long flags;
int i;
spin_lock_irqsave(&mos7840_port->pool_lock, flags);
for (i = 0; i < NUM_URBS; i++) {
if (urb == mos7840_port->write_urb_pool[i]) {
mos7840_port->busy[i] = 0;
break;
}
}
spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
if (status) {
dev_dbg(&port->dev, "nonzero write bulk status received:%d\n", status);
return;
}
tty_port_tty_wakeup(&port->port);
}
/************************************************************************/
/* D R I V E R T T Y I N T E R F A C E F U N C T I O N S */
/************************************************************************/
/*****************************************************************************
* mos7840_open
* this function is called by the tty driver when a port is opened
* If successful, we return 0
* Otherwise we return a negative error number.
*****************************************************************************/
static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
int response;
int j;
struct urb *urb;
__u16 Data;
int status;
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
/* Initialising the write urb pool */
for (j = 0; j < NUM_URBS; ++j) {
urb = usb_alloc_urb(0, GFP_KERNEL);
mos7840_port->write_urb_pool[j] = urb;
if (!urb)
continue;
urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
GFP_KERNEL);
if (!urb->transfer_buffer) {
usb_free_urb(urb);
mos7840_port->write_urb_pool[j] = NULL;
continue;
}
}
/*****************************************************************************
* Initialize MCS7840 -- Write Init values to corresponding Registers
*
* Register Index
* 1 : IER
* 2 : FCR
* 3 : LCR
* 4 : MCR
*
* 0x08 : SP1/2 Control Reg
*****************************************************************************/
/* NEED to check the following Block */
Data = 0x0;
status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data);
if (status < 0) {
dev_dbg(&port->dev, "Reading Spreg failed\n");
goto err;
}
Data |= 0x80;
status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
if (status < 0) {
dev_dbg(&port->dev, "writing Spreg failed\n");
goto err;
}
Data &= ~0x80;
status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
if (status < 0) {
dev_dbg(&port->dev, "writing Spreg failed\n");
goto err;
}
/* End of block to be checked */
Data = 0x0;
status = mos7840_get_reg_sync(port, mos7840_port->ControlRegOffset,
&Data);
if (status < 0) {
dev_dbg(&port->dev, "Reading Controlreg failed\n");
goto err;
}
Data |= 0x08; /* Driver done bit */
Data |= 0x20; /* rx_disable */
status = mos7840_set_reg_sync(port,
mos7840_port->ControlRegOffset, Data);
if (status < 0) {
dev_dbg(&port->dev, "writing Controlreg failed\n");
goto err;
}
/* do register settings here */
/* Set all regs to the device default values. */
/***********************************
* First Disable all interrupts.
***********************************/
Data = 0x00;
status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "disabling interrupts failed\n");
goto err;
}
/* Set FIFO_CONTROL_REGISTER to the default value */
Data = 0x00;
status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n");
goto err;
}
Data = 0xcf;
status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n");
goto err;
}
Data = 0x03;
status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
mos7840_port->shadowLCR = Data;
Data = 0x0b;
status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
mos7840_port->shadowMCR = Data;
Data = 0x00;
status = mos7840_get_uart_reg(port, LINE_CONTROL_REGISTER, &Data);
mos7840_port->shadowLCR = Data;
Data |= SERIAL_LCR_DLAB; /* data latch enable in LCR 0x80 */
status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
Data = 0x0c;
status = mos7840_set_uart_reg(port, DIVISOR_LATCH_LSB, Data);
Data = 0x0;
status = mos7840_set_uart_reg(port, DIVISOR_LATCH_MSB, Data);
Data = 0x00;
status = mos7840_get_uart_reg(port, LINE_CONTROL_REGISTER, &Data);
Data = Data & ~SERIAL_LCR_DLAB;
status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
mos7840_port->shadowLCR = Data;
/* clearing Bulkin and Bulkout Fifo */
Data = 0x0;
status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data);
Data = Data | 0x0c;
status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
Data = Data & ~0x0c;
status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
/* Finally enable all interrupts */
Data = 0x0c;
status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
/* clearing rx_disable */
Data = 0x0;
status = mos7840_get_reg_sync(port, mos7840_port->ControlRegOffset,
&Data);
Data = Data & ~0x20;
status = mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset,
Data);
/* rx_negate */
Data = 0x0;
status = mos7840_get_reg_sync(port, mos7840_port->ControlRegOffset,
&Data);
Data = Data | 0x10;
status = mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset,
Data);
dev_dbg(&port->dev, "port number is %d\n", port->port_number);
dev_dbg(&port->dev, "minor number is %d\n", port->minor);
dev_dbg(&port->dev, "Bulkin endpoint is %d\n", port->bulk_in_endpointAddress);
dev_dbg(&port->dev, "BulkOut endpoint is %d\n", port->bulk_out_endpointAddress);
dev_dbg(&port->dev, "Interrupt endpoint is %d\n", port->interrupt_in_endpointAddress);
dev_dbg(&port->dev, "port's number in the device is %d\n", mos7840_port->port_num);
mos7840_port->read_urb = port->read_urb;
/* set up our bulk in urb */
if ((serial->num_ports == 2) && (((__u16)port->port_number % 2) != 0)) {
usb_fill_bulk_urb(mos7840_port->read_urb,
serial->dev,
usb_rcvbulkpipe(serial->dev,
(port->bulk_in_endpointAddress) + 2),
port->bulk_in_buffer,
mos7840_port->read_urb->transfer_buffer_length,
mos7840_bulk_in_callback, mos7840_port);
} else {
usb_fill_bulk_urb(mos7840_port->read_urb,
serial->dev,
usb_rcvbulkpipe(serial->dev,
port->bulk_in_endpointAddress),
port->bulk_in_buffer,
mos7840_port->read_urb->transfer_buffer_length,
mos7840_bulk_in_callback, mos7840_port);
}
dev_dbg(&port->dev, "%s: bulkin endpoint is %d\n", __func__, port->bulk_in_endpointAddress);
mos7840_port->read_urb_busy = true;
response = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL);
if (response) {
dev_err(&port->dev, "%s - Error %d submitting control urb\n",
__func__, response);
mos7840_port->read_urb_busy = false;
}
/* initialize our port settings */
/* Must set to enable ints! */
mos7840_port->shadowMCR = MCR_MASTER_IE;
return 0;
err:
for (j = 0; j < NUM_URBS; ++j) {
urb = mos7840_port->write_urb_pool[j];
if (!urb)
continue;
kfree(urb->transfer_buffer);
usb_free_urb(urb);
}
return status;
}
/*****************************************************************************
* mos7840_chars_in_buffer
* this function is called by the tty driver when it wants to know how many
* bytes of data we currently have outstanding in the port (data that has
* been written, but hasn't made it out the port yet)
*****************************************************************************/
static unsigned int mos7840_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int i;
unsigned int chars = 0;
unsigned long flags;
spin_lock_irqsave(&mos7840_port->pool_lock, flags);
for (i = 0; i < NUM_URBS; ++i) {
if (mos7840_port->busy[i]) {
struct urb *urb = mos7840_port->write_urb_pool[i];
chars += urb->transfer_buffer_length;
}
}
spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
dev_dbg(&port->dev, "%s - returns %u\n", __func__, chars);
return chars;
}
/*****************************************************************************
* mos7840_close
* this function is called by the tty driver when a port is closed
*****************************************************************************/
static void mos7840_close(struct usb_serial_port *port)
{
struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int j;
__u16 Data;
for (j = 0; j < NUM_URBS; ++j)
usb_kill_urb(mos7840_port->write_urb_pool[j]);
/* Freeing Write URBs */
for (j = 0; j < NUM_URBS; ++j) {
if (mos7840_port->write_urb_pool[j]) {
kfree(mos7840_port->write_urb_pool[j]->transfer_buffer);
usb_free_urb(mos7840_port->write_urb_pool[j]);
}
}
usb_kill_urb(mos7840_port->read_urb);
mos7840_port->read_urb_busy = false;
Data = 0x0;
mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
Data = 0x00;
mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
}
/*****************************************************************************
* mos7840_break
* this function sends a break to the port
*****************************************************************************/
static int mos7840_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
unsigned char data;
if (break_state == -1)
data = mos7840_port->shadowLCR | LCR_SET_BREAK;
else
data = mos7840_port->shadowLCR & ~LCR_SET_BREAK;
/* FIXME: no locking on shadowLCR anywhere in driver */
mos7840_port->shadowLCR = data;
dev_dbg(&port->dev, "%s mos7840_port->shadowLCR is %x\n", __func__, mos7840_port->shadowLCR);
return mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER,
mos7840_port->shadowLCR);
}
/*****************************************************************************
* mos7840_write_room
* this function is called by the tty driver when it wants to know how many
* bytes of data we can accept for a specific port.
*****************************************************************************/
static unsigned int mos7840_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int i;
unsigned int room = 0;
unsigned long flags;
spin_lock_irqsave(&mos7840_port->pool_lock, flags);
for (i = 0; i < NUM_URBS; ++i) {
if (!mos7840_port->busy[i])
room += URB_TRANSFER_BUFFER_SIZE;
}
spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
room = (room == 0) ? 0 : room - URB_TRANSFER_BUFFER_SIZE + 1;
dev_dbg(&mos7840_port->port->dev, "%s - returns %u\n", __func__, room);
return room;
}
/*****************************************************************************
* mos7840_write
* this function is called by the tty driver when data should be written to
* the port.
* If successful, we return the number of bytes written, otherwise we
* return a negative error number.
*****************************************************************************/
static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *data, int count)
{
struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
int status;
int i;
int bytes_sent = 0;
int transfer_size;
unsigned long flags;
struct urb *urb;
/* __u16 Data; */
const unsigned char *current_position = data;
/* try to find a free urb in the list */
urb = NULL;
spin_lock_irqsave(&mos7840_port->pool_lock, flags);
for (i = 0; i < NUM_URBS; ++i) {
if (!mos7840_port->busy[i]) {
mos7840_port->busy[i] = 1;
urb = mos7840_port->write_urb_pool[i];
dev_dbg(&port->dev, "URB:%d\n", i);
break;
}
}
spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
if (urb == NULL) {
dev_dbg(&port->dev, "%s - no more free urbs\n", __func__);
goto exit;
}
if (urb->transfer_buffer == NULL) {
urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
GFP_ATOMIC);
if (!urb->transfer_buffer) {
bytes_sent = -ENOMEM;
goto exit;
}
}
transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE);
memcpy(urb->transfer_buffer, current_position, transfer_size);
/* fill urb with data and submit */
if ((serial->num_ports == 2) && (((__u16)port->port_number % 2) != 0)) {
usb_fill_bulk_urb(urb,
serial->dev,
usb_sndbulkpipe(serial->dev,
(port->bulk_out_endpointAddress) + 2),
urb->transfer_buffer,
transfer_size,
mos7840_bulk_out_data_callback, mos7840_port);
} else {
usb_fill_bulk_urb(urb,
serial->dev,
usb_sndbulkpipe(serial->dev,
port->bulk_out_endpointAddress),
urb->transfer_buffer,
transfer_size,
mos7840_bulk_out_data_callback, mos7840_port);
}
dev_dbg(&port->dev, "bulkout endpoint is %d\n", port->bulk_out_endpointAddress);
if (mos7840_port->has_led)
mos7840_led_activity(port);
/* send it down the pipe */
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
mos7840_port->busy[i] = 0;
dev_err_console(port, "%s - usb_submit_urb(write bulk) failed "
"with status = %d\n", __func__, status);
bytes_sent = status;
goto exit;
}
bytes_sent = transfer_size;
port->icount.tx += transfer_size;
dev_dbg(&port->dev, "icount.tx is %d:\n", port->icount.tx);
exit:
return bytes_sent;
}
/*****************************************************************************
* mos7840_throttle
* this function is called by the tty driver when it wants to stop the data
* being read from the port.
*****************************************************************************/
static void mos7840_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int status;
/* if we are implementing XON/XOFF, send the stop character */
if (I_IXOFF(tty)) {
unsigned char stop_char = STOP_CHAR(tty);
status = mos7840_write(tty, port, &stop_char, 1);
if (status <= 0)
return;
}
/* if we are implementing RTS/CTS, toggle that line */
if (C_CRTSCTS(tty)) {
mos7840_port->shadowMCR &= ~MCR_RTS;
status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER,
mos7840_port->shadowMCR);
if (status < 0)
return;
}
}
/*****************************************************************************
* mos7840_unthrottle
* this function is called by the tty driver when it wants to resume
* the data being read from the port (called after mos7840_throttle is
* called)
*****************************************************************************/
static void mos7840_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int status;
/* if we are implementing XON/XOFF, send the start character */
if (I_IXOFF(tty)) {
unsigned char start_char = START_CHAR(tty);
status = mos7840_write(tty, port, &start_char, 1);
if (status <= 0)
return;
}
/* if we are implementing RTS/CTS, toggle that line */
if (C_CRTSCTS(tty)) {
mos7840_port->shadowMCR |= MCR_RTS;
status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER,
mos7840_port->shadowMCR);
if (status < 0)
return;
}
}
static int mos7840_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
unsigned int result;
__u16 msr;
__u16 mcr;
int status;
status = mos7840_get_uart_reg(port, MODEM_STATUS_REGISTER, &msr);
if (status < 0)
return -EIO;
status = mos7840_get_uart_reg(port, MODEM_CONTROL_REGISTER, &mcr);
if (status < 0)
return -EIO;
result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0)
| ((mcr & MCR_RTS) ? TIOCM_RTS : 0)
| ((mcr & MCR_LOOPBACK) ? TIOCM_LOOP : 0)
| ((msr & MOS7840_MSR_CTS) ? TIOCM_CTS : 0)
| ((msr & MOS7840_MSR_CD) ? TIOCM_CAR : 0)
| ((msr & MOS7840_MSR_RI) ? TIOCM_RI : 0)
| ((msr & MOS7840_MSR_DSR) ? TIOCM_DSR : 0);
dev_dbg(&port->dev, "%s - 0x%04X\n", __func__, result);
return result;
}
static int mos7840_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
unsigned int mcr;
int status;
/* FIXME: What locks the port registers ? */
mcr = mos7840_port->shadowMCR;
if (clear & TIOCM_RTS)
mcr &= ~MCR_RTS;
if (clear & TIOCM_DTR)
mcr &= ~MCR_DTR;
if (clear & TIOCM_LOOP)
mcr &= ~MCR_LOOPBACK;
if (set & TIOCM_RTS)
mcr |= MCR_RTS;
if (set & TIOCM_DTR)
mcr |= MCR_DTR;
if (set & TIOCM_LOOP)
mcr |= MCR_LOOPBACK;
mos7840_port->shadowMCR = mcr;
status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, mcr);
if (status < 0) {
dev_dbg(&port->dev, "setting MODEM_CONTROL_REGISTER Failed\n");
return status;
}
return 0;
}
/*****************************************************************************
* mos7840_calc_baud_rate_divisor
* this function calculates the proper baud rate divisor for the specified
* baud rate.
*****************************************************************************/
static int mos7840_calc_baud_rate_divisor(struct usb_serial_port *port,
int baudRate, int *divisor,
__u16 *clk_sel_val)
{
dev_dbg(&port->dev, "%s - %d\n", __func__, baudRate);
if (baudRate <= 115200) {
*divisor = 115200 / baudRate;
*clk_sel_val = 0x0;
}
if ((baudRate > 115200) && (baudRate <= 230400)) {
*divisor = 230400 / baudRate;
*clk_sel_val = 0x10;
} else if ((baudRate > 230400) && (baudRate <= 403200)) {
*divisor = 403200 / baudRate;
*clk_sel_val = 0x20;
} else if ((baudRate > 403200) && (baudRate <= 460800)) {
*divisor = 460800 / baudRate;
*clk_sel_val = 0x30;
} else if ((baudRate > 460800) && (baudRate <= 806400)) {
*divisor = 806400 / baudRate;
*clk_sel_val = 0x40;
} else if ((baudRate > 806400) && (baudRate <= 921600)) {
*divisor = 921600 / baudRate;
*clk_sel_val = 0x50;
} else if ((baudRate > 921600) && (baudRate <= 1572864)) {
*divisor = 1572864 / baudRate;
*clk_sel_val = 0x60;
} else if ((baudRate > 1572864) && (baudRate <= 3145728)) {
*divisor = 3145728 / baudRate;
*clk_sel_val = 0x70;
}
return 0;
}
/*****************************************************************************
* mos7840_send_cmd_write_baud_rate
* this function sends the proper command to change the baud rate of the
* specified port.
*****************************************************************************/
static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
int baudRate)
{
struct usb_serial_port *port = mos7840_port->port;
int divisor = 0;
int status;
__u16 Data;
__u16 clk_sel_val;
dev_dbg(&port->dev, "%s - baud = %d\n", __func__, baudRate);
/* reset clk_uart_sel in spregOffset */
if (baudRate > 115200) {
#ifdef HW_flow_control
/* NOTE: need to see the pther register to modify */
/* setting h/w flow control bit to 1 */
Data = 0x2b;
mos7840_port->shadowMCR = Data;
status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER,
Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing spreg failed in set_serial_baud\n");
return -1;
}
#endif
} else {
#ifdef HW_flow_control
/* setting h/w flow control bit to 0 */
Data = 0xb;
mos7840_port->shadowMCR = Data;
status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER,
Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing spreg failed in set_serial_baud\n");
return -1;
}
#endif
}
if (1) { /* baudRate <= 115200) */
clk_sel_val = 0x0;
Data = 0x0;
status = mos7840_calc_baud_rate_divisor(port, baudRate, &divisor,
&clk_sel_val);
status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset,
&Data);
if (status < 0) {
dev_dbg(&port->dev, "reading spreg failed in set_serial_baud\n");
return -1;
}
Data = (Data & 0x8f) | clk_sel_val;
status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset,
Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing spreg failed in set_serial_baud\n");
return -1;
}
/* Calculate the Divisor */
if (status) {
dev_err(&port->dev, "%s - bad baud rate\n", __func__);
return status;
}
/* Enable access to divisor latch */
Data = mos7840_port->shadowLCR | SERIAL_LCR_DLAB;
mos7840_port->shadowLCR = Data;
mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
/* Write the divisor */
Data = (unsigned char)(divisor & 0xff);
dev_dbg(&port->dev, "set_serial_baud Value to write DLL is %x\n", Data);
mos7840_set_uart_reg(port, DIVISOR_LATCH_LSB, Data);
Data = (unsigned char)((divisor & 0xff00) >> 8);
dev_dbg(&port->dev, "set_serial_baud Value to write DLM is %x\n", Data);
mos7840_set_uart_reg(port, DIVISOR_LATCH_MSB, Data);
/* Disable access to divisor latch */
Data = mos7840_port->shadowLCR & ~SERIAL_LCR_DLAB;
mos7840_port->shadowLCR = Data;
mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
}
return status;
}
/*****************************************************************************
* mos7840_change_port_settings
* This routine is called to set the UART on the device to match
* the specified new settings.
*****************************************************************************/
static void mos7840_change_port_settings(struct tty_struct *tty,
struct moschip_port *mos7840_port,
const struct ktermios *old_termios)
{
struct usb_serial_port *port = mos7840_port->port;
int baud;
unsigned cflag;
__u8 lData;
__u8 lParity;
__u8 lStop;
int status;
__u16 Data;
lData = LCR_BITS_8;
lStop = LCR_STOP_1;
lParity = LCR_PAR_NONE;
cflag = tty->termios.c_cflag;
/* Change the number of bits */
switch (cflag & CSIZE) {
case CS5:
lData = LCR_BITS_5;
break;
case CS6:
lData = LCR_BITS_6;
break;
case CS7:
lData = LCR_BITS_7;
break;
default:
case CS8:
lData = LCR_BITS_8;
break;
}
/* Change the Parity bit */
if (cflag & PARENB) {
if (cflag & PARODD) {
lParity = LCR_PAR_ODD;
dev_dbg(&port->dev, "%s - parity = odd\n", __func__);
} else {
lParity = LCR_PAR_EVEN;
dev_dbg(&port->dev, "%s - parity = even\n", __func__);
}
} else {
dev_dbg(&port->dev, "%s - parity = none\n", __func__);
}
if (cflag & CMSPAR)
lParity = lParity | 0x20;
/* Change the Stop bit */
if (cflag & CSTOPB) {
lStop = LCR_STOP_2;
dev_dbg(&port->dev, "%s - stop bits = 2\n", __func__);
} else {
lStop = LCR_STOP_1;
dev_dbg(&port->dev, "%s - stop bits = 1\n", __func__);
}
/* Update the LCR with the correct value */
mos7840_port->shadowLCR &=
~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK);
mos7840_port->shadowLCR |= (lData | lParity | lStop);
dev_dbg(&port->dev, "%s - mos7840_port->shadowLCR is %x\n", __func__,
mos7840_port->shadowLCR);
/* Disable Interrupts */
Data = 0x00;
mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
Data = 0x00;
mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
Data = 0xcf;
mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
/* Send the updated LCR value to the mos7840 */
Data = mos7840_port->shadowLCR;
mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
Data = 0x00b;
mos7840_port->shadowMCR = Data;
mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
Data = 0x00b;
mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
/* set up the MCR register and send it to the mos7840 */
mos7840_port->shadowMCR = MCR_MASTER_IE;
if (cflag & CBAUD)
mos7840_port->shadowMCR |= (MCR_DTR | MCR_RTS);
if (cflag & CRTSCTS)
mos7840_port->shadowMCR |= (MCR_XON_ANY);
else
mos7840_port->shadowMCR &= ~(MCR_XON_ANY);
Data = mos7840_port->shadowMCR;
mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
/* Determine divisor based on baud rate */
baud = tty_get_baud_rate(tty);
if (!baud) {
/* pick a default, any default... */
dev_dbg(&port->dev, "%s", "Picked default baud...\n");
baud = 9600;
}
dev_dbg(&port->dev, "%s - baud rate = %d\n", __func__, baud);
status = mos7840_send_cmd_write_baud_rate(mos7840_port, baud);
/* Enable Interrupts */
Data = 0x0c;
mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
if (!mos7840_port->read_urb_busy) {
mos7840_port->read_urb_busy = true;
status = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL);
if (status) {
dev_dbg(&port->dev, "usb_submit_urb(read bulk) failed, status = %d\n",
status);
mos7840_port->read_urb_busy = false;
}
}
dev_dbg(&port->dev, "%s - mos7840_port->shadowLCR is End %x\n", __func__,
mos7840_port->shadowLCR);
}
/*****************************************************************************
* mos7840_set_termios
* this function is called by the tty driver when it wants to change
* the termios structure
*****************************************************************************/
static void mos7840_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int status;
/* change the port settings to the new ones specified */
mos7840_change_port_settings(tty, mos7840_port, old_termios);
if (!mos7840_port->read_urb_busy) {
mos7840_port->read_urb_busy = true;
status = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL);
if (status) {
dev_dbg(&port->dev, "usb_submit_urb(read bulk) failed, status = %d\n",
status);
mos7840_port->read_urb_busy = false;
}
}
}
/*****************************************************************************
* mos7840_get_lsr_info - get line status register info
*
* Purpose: Let user call ioctl() to get info when the UART physically
* is emptied. On bus types like RS485, the transmitter must
* release the bus after transmitting. This must be done when
* the transmit shift register is empty, not be done when the
* transmit holding register is empty. This functionality
* allows an RS485 driver to be written in user space.
*****************************************************************************/
static int mos7840_get_lsr_info(struct tty_struct *tty,
unsigned int __user *value)
{
int count;
unsigned int result = 0;
count = mos7840_chars_in_buffer(tty);
if (count == 0)
result = TIOCSER_TEMT;
if (copy_to_user(value, &result, sizeof(int)))
return -EFAULT;
return 0;
}
/*****************************************************************************
* SerialIoctl
* this function handles any ioctl calls to the driver
*****************************************************************************/
static int mos7840_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
void __user *argp = (void __user *)arg;
switch (cmd) {
/* return number of bytes available */
case TIOCSERGETLSR:
dev_dbg(&port->dev, "%s TIOCSERGETLSR\n", __func__);
return mos7840_get_lsr_info(tty, argp);
default:
break;
}
return -ENOIOCTLCMD;
}
/*
* Check if GPO (pin 42) is connected to GPI (pin 33) as recommended by ASIX
* for MCS7810 by bit-banging a 16-bit word.
*
* Note that GPO is really RTS of the third port so this will toggle RTS of
* port two or three on two- and four-port devices.
*/
static int mos7810_check(struct usb_serial *serial)
{
int i, pass_count = 0;
u8 *buf;
__u16 data = 0, mcr_data = 0;
__u16 test_pattern = 0x55AA;
int res;
buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
if (!buf)
return 0; /* failed to identify 7810 */
/* Store MCR setting */
res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
MCS_RDREQ, MCS_RD_RTYPE, 0x0300, MODEM_CONTROL_REGISTER,
buf, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
if (res == VENDOR_READ_LENGTH)
mcr_data = *buf;
for (i = 0; i < 16; i++) {
/* Send the 1-bit test pattern out to MCS7810 test pin */
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCS_WRREQ, MCS_WR_RTYPE,
(0x0300 | (((test_pattern >> i) & 0x0001) << 1)),
MODEM_CONTROL_REGISTER, NULL, 0, MOS_WDR_TIMEOUT);
/* Read the test pattern back */
res = usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0), MCS_RDREQ,
MCS_RD_RTYPE, 0, GPIO_REGISTER, buf,
VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
if (res == VENDOR_READ_LENGTH)
data = *buf;
/* If this is a MCS7810 device, both test patterns must match */
if (((test_pattern >> i) ^ (~data >> 1)) & 0x0001)
break;
pass_count++;
}
/* Restore MCR setting */
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCS_WRREQ,
MCS_WR_RTYPE, 0x0300 | mcr_data, MODEM_CONTROL_REGISTER, NULL,
0, MOS_WDR_TIMEOUT);
kfree(buf);
if (pass_count == 16)
return 1;
return 0;
}
static int mos7840_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
unsigned long device_flags = id->driver_info;
u8 *buf;
/* Skip device-type detection if we already have device flags. */
if (device_flags)
goto out;
buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
if (!buf)
return -ENOMEM;
usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, buf,
VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
/* For a MCS7840 device GPIO0 must be set to 1 */
if (buf[0] & 0x01)
device_flags = MCS_PORTS(4);
else if (mos7810_check(serial))
device_flags = MCS_PORTS(1) | MCS_LED;
else
device_flags = MCS_PORTS(2);
kfree(buf);
out:
usb_set_serial_data(serial, (void *)device_flags);
return 0;
}
static int mos7840_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds)
{
unsigned long device_flags = (unsigned long)usb_get_serial_data(serial);
int num_ports = MCS_PORTS(device_flags);
if (num_ports == 0 || num_ports > 4)
return -ENODEV;
if (epds->num_bulk_in < num_ports || epds->num_bulk_out < num_ports) {
dev_err(&serial->interface->dev, "missing endpoints\n");
return -ENODEV;
}
return num_ports;
}
static int mos7840_attach(struct usb_serial *serial)
{
struct device *dev = &serial->interface->dev;
int status;
u16 val;
/* Zero Length flag enable */
val = 0x0f;
status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, val);
if (status < 0)
dev_dbg(dev, "Writing ZLP_REG5 failed status-0x%x\n", status);
else
dev_dbg(dev, "ZLP_REG5 Writing success status%d\n", status);
return status;
}
static int mos7840_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
unsigned long device_flags = (unsigned long)usb_get_serial_data(serial);
struct moschip_port *mos7840_port;
int status;
int pnum;
__u16 Data;
/* we set up the pointers to the endpoints in the mos7840_open *
* function, as the structures aren't created yet. */
pnum = port->port_number;
dev_dbg(&port->dev, "mos7840_startup: configuring port %d\n", pnum);
mos7840_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL);
if (!mos7840_port)
return -ENOMEM;
/* Initialize all port interrupt end point to port 0 int
* endpoint. Our device has only one interrupt end point
* common to all port */
mos7840_port->port = port;
spin_lock_init(&mos7840_port->pool_lock);
/* minor is not initialised until later by
* usb-serial.c:get_free_serial() and cannot therefore be used
* to index device instances */
mos7840_port->port_num = pnum + 1;
dev_dbg(&port->dev, "port->minor = %d\n", port->minor);
dev_dbg(&port->dev, "mos7840_port->port_num = %d\n", mos7840_port->port_num);
if (mos7840_port->port_num == 1) {
mos7840_port->SpRegOffset = 0x0;
mos7840_port->ControlRegOffset = 0x1;
mos7840_port->DcrRegOffset = 0x4;
} else {
u8 phy_num = mos7840_port->port_num;
/* Port 2 in the 2-port case uses registers of port 3 */
if (serial->num_ports == 2)
phy_num = 3;
mos7840_port->SpRegOffset = 0x8 + 2 * (phy_num - 2);
mos7840_port->ControlRegOffset = 0x9 + 2 * (phy_num - 2);
mos7840_port->DcrRegOffset = 0x16 + 3 * (phy_num - 2);
}
mos7840_dump_serial_port(port, mos7840_port);
usb_set_serial_port_data(port, mos7840_port);
/* enable rx_disable bit in control register */
status = mos7840_get_reg_sync(port,
mos7840_port->ControlRegOffset, &Data);
if (status < 0) {
dev_dbg(&port->dev, "Reading ControlReg failed status-0x%x\n", status);
goto error;
} else
dev_dbg(&port->dev, "ControlReg Reading success val is %x, status%d\n", Data, status);
Data |= 0x08; /* setting driver done bit */
Data |= 0x04; /* sp1_bit to have cts change reflect in
modem status reg */
/* Data |= 0x20; //rx_disable bit */
status = mos7840_set_reg_sync(port,
mos7840_port->ControlRegOffset, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing ControlReg failed(rx_disable) status-0x%x\n", status);
goto error;
} else
dev_dbg(&port->dev, "ControlReg Writing success(rx_disable) status%d\n", status);
/* Write default values in DCR (i.e 0x01 in DCR0, 0x05 in DCR2
and 0x24 in DCR3 */
Data = 0x01;
status = mos7840_set_reg_sync(port,
(__u16) (mos7840_port->DcrRegOffset + 0), Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing DCR0 failed status-0x%x\n", status);
goto error;
} else
dev_dbg(&port->dev, "DCR0 Writing success status%d\n", status);
Data = 0x05;
status = mos7840_set_reg_sync(port,
(__u16) (mos7840_port->DcrRegOffset + 1), Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing DCR1 failed status-0x%x\n", status);
goto error;
} else
dev_dbg(&port->dev, "DCR1 Writing success status%d\n", status);
Data = 0x24;
status = mos7840_set_reg_sync(port,
(__u16) (mos7840_port->DcrRegOffset + 2), Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing DCR2 failed status-0x%x\n", status);
goto error;
} else
dev_dbg(&port->dev, "DCR2 Writing success status%d\n", status);
/* write values in clkstart0x0 and clkmulti 0x20 */
Data = 0x0;
status = mos7840_set_reg_sync(port, CLK_START_VALUE_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing CLK_START_VALUE_REGISTER failed status-0x%x\n", status);
goto error;
} else
dev_dbg(&port->dev, "CLK_START_VALUE_REGISTER Writing success status%d\n", status);
Data = 0x20;
status = mos7840_set_reg_sync(port, CLK_MULTI_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing CLK_MULTI_REGISTER failed status-0x%x\n", status);
goto error;
} else
dev_dbg(&port->dev, "CLK_MULTI_REGISTER Writing success status%d\n", status);
/* write value 0x0 to scratchpad register */
Data = 0x00;
status = mos7840_set_uart_reg(port, SCRATCH_PAD_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing SCRATCH_PAD_REGISTER failed status-0x%x\n", status);
goto error;
} else
dev_dbg(&port->dev, "SCRATCH_PAD_REGISTER Writing success status%d\n", status);
/* Zero Length flag register */
if ((mos7840_port->port_num != 1) && (serial->num_ports == 2)) {
Data = 0xff;
status = mos7840_set_reg_sync(port,
(__u16) (ZLP_REG1 +
((__u16)mos7840_port->port_num)), Data);
dev_dbg(&port->dev, "ZLIP offset %x\n",
(__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num)));
if (status < 0) {
dev_dbg(&port->dev, "Writing ZLP_REG%d failed status-0x%x\n", pnum + 2, status);
goto error;
} else
dev_dbg(&port->dev, "ZLP_REG%d Writing success status%d\n", pnum + 2, status);
} else {
Data = 0xff;
status = mos7840_set_reg_sync(port,
(__u16) (ZLP_REG1 +
((__u16)mos7840_port->port_num) - 0x1), Data);
dev_dbg(&port->dev, "ZLIP offset %x\n",
(__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num) - 0x1));
if (status < 0) {
dev_dbg(&port->dev, "Writing ZLP_REG%d failed status-0x%x\n", pnum + 1, status);
goto error;
} else
dev_dbg(&port->dev, "ZLP_REG%d Writing success status%d\n", pnum + 1, status);
}
mos7840_port->has_led = device_flags & MCS_LED;
/* Initialize LED timers */
if (mos7840_port->has_led) {
mos7840_port->led_urb = usb_alloc_urb(0, GFP_KERNEL);
mos7840_port->led_dr = kmalloc(sizeof(*mos7840_port->led_dr),
GFP_KERNEL);
if (!mos7840_port->led_urb || !mos7840_port->led_dr) {
status = -ENOMEM;
goto error;
}
timer_setup(&mos7840_port->led_timer1, mos7840_led_off, 0);
mos7840_port->led_timer1.expires =
jiffies + msecs_to_jiffies(LED_ON_MS);
timer_setup(&mos7840_port->led_timer2, mos7840_led_flag_off,
0);
mos7840_port->led_timer2.expires =
jiffies + msecs_to_jiffies(LED_OFF_MS);
/* Turn off LED */
mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300);
}
return 0;
error:
kfree(mos7840_port->led_dr);
usb_free_urb(mos7840_port->led_urb);
kfree(mos7840_port);
return status;
}
static void mos7840_port_remove(struct usb_serial_port *port)
{
struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
if (mos7840_port->has_led) {
/* Turn off LED */
mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300);
timer_shutdown_sync(&mos7840_port->led_timer1);
timer_shutdown_sync(&mos7840_port->led_timer2);
usb_kill_urb(mos7840_port->led_urb);
usb_free_urb(mos7840_port->led_urb);
kfree(mos7840_port->led_dr);
}
kfree(mos7840_port);
}
static struct usb_serial_driver moschip7840_4port_device = {
.driver = {
.owner = THIS_MODULE,
.name = "mos7840",
},
.description = DRIVER_DESC,
.id_table = id_table,
.num_interrupt_in = 1,
.open = mos7840_open,
.close = mos7840_close,
.write = mos7840_write,
.write_room = mos7840_write_room,
.chars_in_buffer = mos7840_chars_in_buffer,
.throttle = mos7840_throttle,
.unthrottle = mos7840_unthrottle,
.calc_num_ports = mos7840_calc_num_ports,
.probe = mos7840_probe,
.attach = mos7840_attach,
.ioctl = mos7840_ioctl,
.set_termios = mos7840_set_termios,
.break_ctl = mos7840_break,
.tiocmget = mos7840_tiocmget,
.tiocmset = mos7840_tiocmset,
.get_icount = usb_serial_generic_get_icount,
.port_probe = mos7840_port_probe,
.port_remove = mos7840_port_remove,
.read_bulk_callback = mos7840_bulk_in_callback,
};
static struct usb_serial_driver * const serial_drivers[] = {
&moschip7840_4port_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/serial/mos7840.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Navman Serial USB driver
*
* Copyright (C) 2006 Greg Kroah-Hartman <[email protected]>
*
* TODO:
* Add termios method that uses copy_hw but also kills all echo
* flags as the navman is rx only so cannot echo.
*/
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */
{ USB_DEVICE(0x0df7, 0x0900) }, /* Mobile Action i-gotU */
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
static void navman_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
int result;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&port->dev, "%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_dbg(&port->dev, "%s - nonzero urb status received: %d\n",
__func__, status);
goto exit;
}
usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
if (urb->actual_length) {
tty_insert_flip_string(&port->port, data, urb->actual_length);
tty_flip_buffer_push(&port->port);
}
exit:
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
dev_err(&urb->dev->dev,
"%s - Error %d submitting interrupt urb\n",
__func__, result);
}
static int navman_open(struct tty_struct *tty, struct usb_serial_port *port)
{
int result = 0;
if (port->interrupt_in_urb) {
dev_dbg(&port->dev, "%s - adding interrupt input for treo\n",
__func__);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result)
dev_err(&port->dev,
"%s - failed submitting interrupt urb, error %d\n",
__func__, result);
}
return result;
}
static void navman_close(struct usb_serial_port *port)
{
usb_kill_urb(port->interrupt_in_urb);
}
static int navman_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
/*
* This device can't write any data, only read from the device
*/
return -EOPNOTSUPP;
}
static struct usb_serial_driver navman_device = {
.driver = {
.owner = THIS_MODULE,
.name = "navman",
},
.id_table = id_table,
.num_ports = 1,
.open = navman_open,
.close = navman_close,
.write = navman_write,
.read_int_callback = navman_read_int_callback,
};
static struct usb_serial_driver * const serial_drivers[] = {
&navman_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/navman.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB Serial "Simple" driver
*
* Copyright (C) 2001-2006,2008,2013 Greg Kroah-Hartman <[email protected]>
* Copyright (C) 2005 Arthur Huillet ([email protected])
* Copyright (C) 2005 Thomas Hergenhahn <[email protected]>
* Copyright (C) 2009 Outpost Embedded, LLC
* Copyright (C) 2010 Zilogic Systems <[email protected]>
* Copyright (C) 2013 Wei Shuai <[email protected]>
* Copyright (C) 2013 Linux Foundation
*/
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#define DEVICE_N(vendor, IDS, nport) \
static const struct usb_device_id vendor##_id_table[] = { \
IDS(), \
{ }, \
}; \
static struct usb_serial_driver vendor##_device = { \
.driver = { \
.owner = THIS_MODULE, \
.name = #vendor, \
}, \
.id_table = vendor##_id_table, \
.num_ports = nport, \
};
#define DEVICE(vendor, IDS) DEVICE_N(vendor, IDS, 1)
/* Medtronic CareLink USB driver */
#define CARELINK_IDS() \
{ USB_DEVICE(0x0a21, 0x8001) } /* MMT-7305WW */
DEVICE(carelink, CARELINK_IDS);
/* Infineon Flashloader driver */
#define FLASHLOADER_IDS() \
{ USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \
{ USB_DEVICE(0x8087, 0x0716) }, \
{ USB_DEVICE(0x8087, 0x0801) }
DEVICE(flashloader, FLASHLOADER_IDS);
/* Funsoft Serial USB driver */
#define FUNSOFT_IDS() \
{ USB_DEVICE(0x1404, 0xcddc) }
DEVICE(funsoft, FUNSOFT_IDS);
/* Google Serial USB SubClass */
#define GOOGLE_IDS() \
{ USB_VENDOR_AND_INTERFACE_INFO(0x18d1, \
USB_CLASS_VENDOR_SPEC, \
0x50, \
0x01) }
DEVICE(google, GOOGLE_IDS);
/* HP4x (48/49) Generic Serial driver */
#define HP4X_IDS() \
{ USB_DEVICE(0x03f0, 0x0121) }
DEVICE(hp4x, HP4X_IDS);
/* KAUFMANN RKS+CAN VCP */
#define KAUFMANN_IDS() \
{ USB_DEVICE(0x16d0, 0x0870) }
DEVICE(kaufmann, KAUFMANN_IDS);
/* Libtransistor USB console */
#define LIBTRANSISTOR_IDS() \
{ USB_DEVICE(0x1209, 0x8b00) }
DEVICE(libtransistor, LIBTRANSISTOR_IDS);
/* Motorola USB Phone driver */
#define MOTO_IDS() \
{ USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */ \
{ USB_DEVICE(0x0c44, 0x0022) }, /* unknown Motorola phone */ \
{ USB_DEVICE(0x22b8, 0x2a64) }, /* Motorola KRZR K1m */ \
{ USB_DEVICE(0x22b8, 0x2c84) }, /* Motorola VE240 phone */ \
{ USB_DEVICE(0x22b8, 0x2c64) } /* Motorola V950 phone */
DEVICE(moto_modem, MOTO_IDS);
/* Motorola Tetra driver */
#define MOTOROLA_TETRA_IDS() \
{ USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
{ USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \
{ USB_DEVICE(0x0cad, 0x9013) }, /* MTP3xxx */ \
{ USB_DEVICE(0x0cad, 0x9015) }, /* MTP85xx */ \
{ USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */
DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
/* Nokia mobile phone driver */
#define NOKIA_IDS() \
{ USB_DEVICE(0x0421, 0x069a) } /* Nokia 130 (RM-1035) */
DEVICE(nokia, NOKIA_IDS);
/* Novatel Wireless GPS driver */
#define NOVATEL_IDS() \
{ USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */
DEVICE_N(novatel_gps, NOVATEL_IDS, 3);
/* Siemens USB/MPI adapter */
#define SIEMENS_IDS() \
{ USB_DEVICE(0x908, 0x0004) }
DEVICE(siemens_mpi, SIEMENS_IDS);
/* Suunto ANT+ USB Driver */
#define SUUNTO_IDS() \
{ USB_DEVICE(0x0fcf, 0x1008) }, \
{ USB_DEVICE(0x0fcf, 0x1009) } /* Dynastream ANT USB-m Stick */
DEVICE(suunto, SUUNTO_IDS);
/* ViVOpay USB Serial Driver */
#define VIVOPAY_IDS() \
{ USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */
DEVICE(vivopay, VIVOPAY_IDS);
/* ZIO Motherboard USB driver */
#define ZIO_IDS() \
{ USB_DEVICE(0x1CBE, 0x0103) }
DEVICE(zio, ZIO_IDS);
/* All of the above structures mushed into two lists */
static struct usb_serial_driver * const serial_drivers[] = {
&carelink_device,
&flashloader_device,
&funsoft_device,
&google_device,
&hp4x_device,
&kaufmann_device,
&libtransistor_device,
&moto_modem_device,
&motorola_tetra_device,
&nokia_device,
&novatel_gps_device,
&siemens_mpi_device,
&suunto_device,
&vivopay_device,
&zio_device,
NULL
};
static const struct usb_device_id id_table[] = {
CARELINK_IDS(),
FLASHLOADER_IDS(),
FUNSOFT_IDS(),
GOOGLE_IDS(),
HP4X_IDS(),
KAUFMANN_IDS(),
LIBTRANSISTOR_IDS(),
MOTO_IDS(),
MOTOROLA_TETRA_IDS(),
NOKIA_IDS(),
NOVATEL_IDS(),
SIEMENS_IDS(),
SUUNTO_IDS(),
VIVOPAY_IDS(),
ZIO_IDS(),
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
module_usb_serial_driver(serial_drivers, id_table);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/usb-serial-simple.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB Empeg empeg-car player driver
*
* Copyright (C) 2000, 2001
* Gary Brubaker ([email protected])
*
* Copyright (C) 1999 - 2001
* Greg Kroah-Hartman ([email protected])
*
* See Documentation/usb/usb-serial.rst for more information on using this
* driver
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#define DRIVER_AUTHOR "Greg Kroah-Hartman <[email protected]>, Gary Brubaker <[email protected]>"
#define DRIVER_DESC "USB Empeg Mark I/II Driver"
#define EMPEG_VENDOR_ID 0x084f
#define EMPEG_PRODUCT_ID 0x0001
/* function prototypes for an empeg-car player */
static int empeg_startup(struct usb_serial *serial);
static void empeg_init_termios(struct tty_struct *tty);
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(EMPEG_VENDOR_ID, EMPEG_PRODUCT_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver empeg_device = {
.driver = {
.owner = THIS_MODULE,
.name = "empeg",
},
.id_table = id_table,
.num_ports = 1,
.bulk_out_size = 256,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
.attach = empeg_startup,
.init_termios = empeg_init_termios,
};
static struct usb_serial_driver * const serial_drivers[] = {
&empeg_device, NULL
};
static int empeg_startup(struct usb_serial *serial)
{
int r;
if (serial->dev->actconfig->desc.bConfigurationValue != 1) {
dev_err(&serial->dev->dev, "active config #%d != 1 ??\n",
serial->dev->actconfig->desc.bConfigurationValue);
return -ENODEV;
}
r = usb_reset_configuration(serial->dev);
/* continue on with initialization */
return r;
}
static void empeg_init_termios(struct tty_struct *tty)
{
struct ktermios *termios = &tty->termios;
/*
* The empeg-car player wants these particular tty settings.
* You could, for example, change the baud rate, however the
* player only supports 115200 (currently), so there is really
* no point in support for changes to the tty settings.
* (at least for now)
*
* The default requirements for this device are:
*/
termios->c_iflag
&= ~(IGNBRK /* disable ignore break */
| BRKINT /* disable break causes interrupt */
| PARMRK /* disable mark parity errors */
| ISTRIP /* disable clear high bit of input characters */
| INLCR /* disable translate NL to CR */
| IGNCR /* disable ignore CR */
| ICRNL /* disable translate CR to NL */
| IXON); /* disable enable XON/XOFF flow control */
termios->c_oflag
&= ~OPOST; /* disable postprocess output characters */
termios->c_lflag
&= ~(ECHO /* disable echo input characters */
| ECHONL /* disable echo new line */
| ICANON /* disable erase, kill, werase, and rprnt special characters */
| ISIG /* disable interrupt, quit, and suspend special characters */
| IEXTEN); /* disable non-POSIX special characters */
termios->c_cflag
&= ~(CSIZE /* no size */
| PARENB /* disable parity bit */
| CBAUD); /* clear current baud rate */
termios->c_cflag
|= CS8; /* character size 8 bits */
tty_encode_baud_rate(tty, 115200, 115200);
}
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/serial/empeg.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* F81532/F81534 USB to Serial Ports Bridge
*
* F81532 => 2 Serial Ports
* F81534 => 4 Serial Ports
*
* Copyright (C) 2016 Feature Integration Technology Inc., (Fintek)
* Copyright (C) 2016 Tom Tsai ([email protected])
* Copyright (C) 2016 Peter Hong ([email protected])
*
* The F81532/F81534 had 1 control endpoint for setting, 1 endpoint bulk-out
* for all serial port TX and 1 endpoint bulk-in for all serial port read in
* (Read Data/MSR/LSR).
*
* Write URB is fixed with 512bytes, per serial port used 128Bytes.
* It can be described by f81534_prepare_write_buffer()
*
* Read URB is 512Bytes max, per serial port used 128Bytes.
* It can be described by f81534_process_read_urb() and maybe received with
* 128x1,2,3,4 bytes.
*
*/
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/serial_reg.h>
#include <linux/module.h>
#include <linux/uaccess.h>
/* Serial Port register Address */
#define F81534_UART_BASE_ADDRESS 0x1200
#define F81534_UART_OFFSET 0x10
#define F81534_DIVISOR_LSB_REG (0x00 + F81534_UART_BASE_ADDRESS)
#define F81534_DIVISOR_MSB_REG (0x01 + F81534_UART_BASE_ADDRESS)
#define F81534_INTERRUPT_ENABLE_REG (0x01 + F81534_UART_BASE_ADDRESS)
#define F81534_FIFO_CONTROL_REG (0x02 + F81534_UART_BASE_ADDRESS)
#define F81534_LINE_CONTROL_REG (0x03 + F81534_UART_BASE_ADDRESS)
#define F81534_MODEM_CONTROL_REG (0x04 + F81534_UART_BASE_ADDRESS)
#define F81534_LINE_STATUS_REG (0x05 + F81534_UART_BASE_ADDRESS)
#define F81534_MODEM_STATUS_REG (0x06 + F81534_UART_BASE_ADDRESS)
#define F81534_CLOCK_REG (0x08 + F81534_UART_BASE_ADDRESS)
#define F81534_CONFIG1_REG (0x09 + F81534_UART_BASE_ADDRESS)
#define F81534_DEF_CONF_ADDRESS_START 0x3000
#define F81534_DEF_CONF_SIZE 12
#define F81534_CUSTOM_ADDRESS_START 0x2f00
#define F81534_CUSTOM_DATA_SIZE 0x10
#define F81534_CUSTOM_NO_CUSTOM_DATA 0xff
#define F81534_CUSTOM_VALID_TOKEN 0xf0
#define F81534_CONF_OFFSET 1
#define F81534_CONF_INIT_GPIO_OFFSET 4
#define F81534_CONF_WORK_GPIO_OFFSET 8
#define F81534_CONF_GPIO_SHUTDOWN 7
#define F81534_CONF_GPIO_RS232 1
#define F81534_MAX_DATA_BLOCK 64
#define F81534_MAX_BUS_RETRY 20
/* Default URB timeout for USB operations */
#define F81534_USB_MAX_RETRY 10
#define F81534_USB_TIMEOUT 2000
#define F81534_SET_GET_REGISTER 0xA0
#define F81534_NUM_PORT 4
#define F81534_UNUSED_PORT 0xff
#define F81534_WRITE_BUFFER_SIZE 512
#define DRIVER_DESC "Fintek F81532/F81534"
#define FINTEK_VENDOR_ID_1 0x1934
#define FINTEK_VENDOR_ID_2 0x2C42
#define FINTEK_DEVICE_ID 0x1202
#define F81534_MAX_TX_SIZE 124
#define F81534_MAX_RX_SIZE 124
#define F81534_RECEIVE_BLOCK_SIZE 128
#define F81534_MAX_RECEIVE_BLOCK_SIZE 512
#define F81534_TOKEN_RECEIVE 0x01
#define F81534_TOKEN_WRITE 0x02
#define F81534_TOKEN_TX_EMPTY 0x03
#define F81534_TOKEN_MSR_CHANGE 0x04
/*
* We used interal SPI bus to access FLASH section. We must wait the SPI bus to
* idle if we performed any command.
*
* SPI Bus status register: F81534_BUS_REG_STATUS
* Bit 0/1 : BUSY
* Bit 2 : IDLE
*/
#define F81534_BUS_BUSY (BIT(0) | BIT(1))
#define F81534_BUS_IDLE BIT(2)
#define F81534_BUS_READ_DATA 0x1004
#define F81534_BUS_REG_STATUS 0x1003
#define F81534_BUS_REG_START 0x1002
#define F81534_BUS_REG_END 0x1001
#define F81534_CMD_READ 0x03
#define F81534_DEFAULT_BAUD_RATE 9600
#define F81534_PORT_CONF_RS232 0
#define F81534_PORT_CONF_RS485 BIT(0)
#define F81534_PORT_CONF_RS485_INVERT (BIT(0) | BIT(1))
#define F81534_PORT_CONF_MODE_MASK GENMASK(1, 0)
#define F81534_PORT_CONF_DISABLE_PORT BIT(3)
#define F81534_PORT_CONF_NOT_EXIST_PORT BIT(7)
#define F81534_PORT_UNAVAILABLE \
(F81534_PORT_CONF_DISABLE_PORT | F81534_PORT_CONF_NOT_EXIST_PORT)
#define F81534_1X_RXTRIGGER 0xc3
#define F81534_8X_RXTRIGGER 0xcf
/*
* F81532/534 Clock registers (offset +08h)
*
* Bit0: UART Enable (always on)
* Bit2-1: Clock source selector
* 00: 1.846MHz.
* 01: 18.46MHz.
* 10: 24MHz.
* 11: 14.77MHz.
* Bit4: Auto direction(RTS) control (RTS pin Low when TX)
* Bit5: Invert direction(RTS) when Bit4 enabled (RTS pin high when TX)
*/
#define F81534_UART_EN BIT(0)
#define F81534_CLK_1_846_MHZ 0
#define F81534_CLK_18_46_MHZ BIT(1)
#define F81534_CLK_24_MHZ BIT(2)
#define F81534_CLK_14_77_MHZ (BIT(1) | BIT(2))
#define F81534_CLK_MASK GENMASK(2, 1)
#define F81534_CLK_TX_DELAY_1BIT BIT(3)
#define F81534_CLK_RS485_MODE BIT(4)
#define F81534_CLK_RS485_INVERT BIT(5)
static const struct usb_device_id f81534_id_table[] = {
{ USB_DEVICE(FINTEK_VENDOR_ID_1, FINTEK_DEVICE_ID) },
{ USB_DEVICE(FINTEK_VENDOR_ID_2, FINTEK_DEVICE_ID) },
{} /* Terminating entry */
};
#define F81534_TX_EMPTY_BIT 0
struct f81534_serial_private {
u8 conf_data[F81534_DEF_CONF_SIZE];
int tty_idx[F81534_NUM_PORT];
u8 setting_idx;
int opened_port;
struct mutex urb_mutex;
};
struct f81534_port_private {
struct mutex mcr_mutex;
struct mutex lcr_mutex;
struct work_struct lsr_work;
struct usb_serial_port *port;
unsigned long tx_empty;
spinlock_t msr_lock;
u32 baud_base;
u8 shadow_mcr;
u8 shadow_lcr;
u8 shadow_msr;
u8 shadow_clk;
u8 phy_num;
};
struct f81534_pin_data {
const u16 reg_addr;
const u8 reg_mask;
};
struct f81534_port_out_pin {
struct f81534_pin_data pin[3];
};
/* Pin output value for M2/M1/M0(SD) */
static const struct f81534_port_out_pin f81534_port_out_pins[] = {
{ { { 0x2ae8, BIT(7) }, { 0x2a90, BIT(5) }, { 0x2a90, BIT(4) } } },
{ { { 0x2ae8, BIT(6) }, { 0x2ae8, BIT(0) }, { 0x2ae8, BIT(3) } } },
{ { { 0x2a90, BIT(0) }, { 0x2ae8, BIT(2) }, { 0x2a80, BIT(6) } } },
{ { { 0x2a90, BIT(3) }, { 0x2a90, BIT(2) }, { 0x2a90, BIT(1) } } },
};
static u32 const baudrate_table[] = { 115200, 921600, 1152000, 1500000 };
static u8 const clock_table[] = { F81534_CLK_1_846_MHZ, F81534_CLK_14_77_MHZ,
F81534_CLK_18_46_MHZ, F81534_CLK_24_MHZ };
static int f81534_logic_to_phy_port(struct usb_serial *serial,
struct usb_serial_port *port)
{
struct f81534_serial_private *serial_priv =
usb_get_serial_data(port->serial);
int count = 0;
int i;
for (i = 0; i < F81534_NUM_PORT; ++i) {
if (serial_priv->conf_data[i] & F81534_PORT_UNAVAILABLE)
continue;
if (port->port_number == count)
return i;
++count;
}
return -ENODEV;
}
static int f81534_set_register(struct usb_serial *serial, u16 reg, u8 data)
{
struct usb_interface *interface = serial->interface;
struct usb_device *dev = serial->dev;
size_t count = F81534_USB_MAX_RETRY;
int status;
u8 *tmp;
tmp = kmalloc(sizeof(u8), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
*tmp = data;
/*
* Our device maybe not reply when heavily loading, We'll retry for
* F81534_USB_MAX_RETRY times.
*/
while (count--) {
status = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
F81534_SET_GET_REGISTER,
USB_TYPE_VENDOR | USB_DIR_OUT,
reg, 0, tmp, sizeof(u8),
F81534_USB_TIMEOUT);
if (status == sizeof(u8)) {
status = 0;
break;
}
}
if (status < 0) {
dev_err(&interface->dev, "%s: reg: %x data: %x failed: %d\n",
__func__, reg, data, status);
}
kfree(tmp);
return status;
}
static int f81534_get_register(struct usb_serial *serial, u16 reg, u8 *data)
{
struct usb_interface *interface = serial->interface;
struct usb_device *dev = serial->dev;
size_t count = F81534_USB_MAX_RETRY;
int status;
u8 *tmp;
tmp = kmalloc(sizeof(u8), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
/*
* Our device maybe not reply when heavily loading, We'll retry for
* F81534_USB_MAX_RETRY times.
*/
while (count--) {
status = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
F81534_SET_GET_REGISTER,
USB_TYPE_VENDOR | USB_DIR_IN,
reg, 0, tmp, sizeof(u8),
F81534_USB_TIMEOUT);
if (status > 0) {
status = 0;
break;
} else if (status == 0) {
status = -EIO;
}
}
if (status < 0) {
dev_err(&interface->dev, "%s: reg: %x failed: %d\n", __func__,
reg, status);
goto end;
}
*data = *tmp;
end:
kfree(tmp);
return status;
}
static int f81534_set_mask_register(struct usb_serial *serial, u16 reg,
u8 mask, u8 data)
{
int status;
u8 tmp;
status = f81534_get_register(serial, reg, &tmp);
if (status)
return status;
tmp &= ~mask;
tmp |= (mask & data);
return f81534_set_register(serial, reg, tmp);
}
static int f81534_set_phy_port_register(struct usb_serial *serial, int phy,
u16 reg, u8 data)
{
return f81534_set_register(serial, reg + F81534_UART_OFFSET * phy,
data);
}
static int f81534_get_phy_port_register(struct usb_serial *serial, int phy,
u16 reg, u8 *data)
{
return f81534_get_register(serial, reg + F81534_UART_OFFSET * phy,
data);
}
static int f81534_set_port_register(struct usb_serial_port *port, u16 reg,
u8 data)
{
struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
return f81534_set_register(port->serial,
reg + port_priv->phy_num * F81534_UART_OFFSET, data);
}
static int f81534_get_port_register(struct usb_serial_port *port, u16 reg,
u8 *data)
{
struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
return f81534_get_register(port->serial,
reg + port_priv->phy_num * F81534_UART_OFFSET, data);
}
/*
* If we try to access the internal flash via SPI bus, we should check the bus
* status for every command. e.g., F81534_BUS_REG_START/F81534_BUS_REG_END
*/
static int f81534_wait_for_spi_idle(struct usb_serial *serial)
{
size_t count = F81534_MAX_BUS_RETRY;
u8 tmp;
int status;
do {
status = f81534_get_register(serial, F81534_BUS_REG_STATUS,
&tmp);
if (status)
return status;
if (tmp & F81534_BUS_BUSY)
continue;
if (tmp & F81534_BUS_IDLE)
break;
} while (--count);
if (!count) {
dev_err(&serial->interface->dev,
"%s: timed out waiting for idle SPI bus\n",
__func__);
return -EIO;
}
return f81534_set_register(serial, F81534_BUS_REG_STATUS,
tmp & ~F81534_BUS_IDLE);
}
static int f81534_get_spi_register(struct usb_serial *serial, u16 reg,
u8 *data)
{
int status;
status = f81534_get_register(serial, reg, data);
if (status)
return status;
return f81534_wait_for_spi_idle(serial);
}
static int f81534_set_spi_register(struct usb_serial *serial, u16 reg, u8 data)
{
int status;
status = f81534_set_register(serial, reg, data);
if (status)
return status;
return f81534_wait_for_spi_idle(serial);
}
static int f81534_read_flash(struct usb_serial *serial, u32 address,
size_t size, u8 *buf)
{
u8 tmp_buf[F81534_MAX_DATA_BLOCK];
size_t block = 0;
size_t read_size;
size_t count;
int status;
int offset;
u16 reg_tmp;
status = f81534_set_spi_register(serial, F81534_BUS_REG_START,
F81534_CMD_READ);
if (status)
return status;
status = f81534_set_spi_register(serial, F81534_BUS_REG_START,
(address >> 16) & 0xff);
if (status)
return status;
status = f81534_set_spi_register(serial, F81534_BUS_REG_START,
(address >> 8) & 0xff);
if (status)
return status;
status = f81534_set_spi_register(serial, F81534_BUS_REG_START,
(address >> 0) & 0xff);
if (status)
return status;
/* Continuous read mode */
do {
read_size = min_t(size_t, F81534_MAX_DATA_BLOCK, size);
for (count = 0; count < read_size; ++count) {
/* To write F81534_BUS_REG_END when final byte */
if (size <= F81534_MAX_DATA_BLOCK &&
read_size == count + 1)
reg_tmp = F81534_BUS_REG_END;
else
reg_tmp = F81534_BUS_REG_START;
/*
* Dummy code, force IC to generate a read pulse, the
* set of value 0xf1 is dont care (any value is ok)
*/
status = f81534_set_spi_register(serial, reg_tmp,
0xf1);
if (status)
return status;
status = f81534_get_spi_register(serial,
F81534_BUS_READ_DATA,
&tmp_buf[count]);
if (status)
return status;
offset = count + block * F81534_MAX_DATA_BLOCK;
buf[offset] = tmp_buf[count];
}
size -= read_size;
++block;
} while (size);
return 0;
}
static void f81534_prepare_write_buffer(struct usb_serial_port *port, u8 *buf)
{
struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
int phy_num = port_priv->phy_num;
u8 tx_len;
int i;
/*
* The block layout is fixed with 4x128 Bytes, per 128 Bytes a port.
* index 0: port phy idx (e.g., 0,1,2,3)
* index 1: only F81534_TOKEN_WRITE
* index 2: serial TX out length
* index 3: fix to 0
* index 4~127: serial out data block
*/
for (i = 0; i < F81534_NUM_PORT; ++i) {
buf[i * F81534_RECEIVE_BLOCK_SIZE] = i;
buf[i * F81534_RECEIVE_BLOCK_SIZE + 1] = F81534_TOKEN_WRITE;
buf[i * F81534_RECEIVE_BLOCK_SIZE + 2] = 0;
buf[i * F81534_RECEIVE_BLOCK_SIZE + 3] = 0;
}
tx_len = kfifo_out_locked(&port->write_fifo,
&buf[phy_num * F81534_RECEIVE_BLOCK_SIZE + 4],
F81534_MAX_TX_SIZE, &port->lock);
buf[phy_num * F81534_RECEIVE_BLOCK_SIZE + 2] = tx_len;
}
static int f81534_submit_writer(struct usb_serial_port *port, gfp_t mem_flags)
{
struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
struct urb *urb;
unsigned long flags;
int result;
/* Check is any data in write_fifo */
spin_lock_irqsave(&port->lock, flags);
if (kfifo_is_empty(&port->write_fifo)) {
spin_unlock_irqrestore(&port->lock, flags);
return 0;
}
spin_unlock_irqrestore(&port->lock, flags);
/* Check H/W is TXEMPTY */
if (!test_and_clear_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty))
return 0;
urb = port->write_urbs[0];
f81534_prepare_write_buffer(port, port->bulk_out_buffers[0]);
urb->transfer_buffer_length = F81534_WRITE_BUFFER_SIZE;
result = usb_submit_urb(urb, mem_flags);
if (result) {
set_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty);
dev_err(&port->dev, "%s: submit failed: %d\n", __func__,
result);
return result;
}
usb_serial_port_softint(port);
return 0;
}
static u32 f81534_calc_baud_divisor(u32 baudrate, u32 clockrate)
{
/* Round to nearest divisor */
return DIV_ROUND_CLOSEST(clockrate, baudrate);
}
static int f81534_find_clk(u32 baudrate)
{
int idx;
for (idx = 0; idx < ARRAY_SIZE(baudrate_table); ++idx) {
if (baudrate <= baudrate_table[idx] &&
baudrate_table[idx] % baudrate == 0)
return idx;
}
return -EINVAL;
}
static int f81534_set_port_config(struct usb_serial_port *port,
struct tty_struct *tty, u32 baudrate, u32 old_baudrate, u8 lcr)
{
struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
u32 divisor;
int status;
int i;
int idx;
u8 value;
u32 baud_list[] = {baudrate, old_baudrate, F81534_DEFAULT_BAUD_RATE};
for (i = 0; i < ARRAY_SIZE(baud_list); ++i) {
baudrate = baud_list[i];
if (baudrate == 0) {
tty_encode_baud_rate(tty, 0, 0);
return 0;
}
idx = f81534_find_clk(baudrate);
if (idx >= 0) {
tty_encode_baud_rate(tty, baudrate, baudrate);
break;
}
}
if (idx < 0)
return -EINVAL;
port_priv->baud_base = baudrate_table[idx];
port_priv->shadow_clk &= ~F81534_CLK_MASK;
port_priv->shadow_clk |= clock_table[idx];
status = f81534_set_port_register(port, F81534_CLOCK_REG,
port_priv->shadow_clk);
if (status) {
dev_err(&port->dev, "CLOCK_REG setting failed\n");
return status;
}
if (baudrate <= 1200)
value = F81534_1X_RXTRIGGER; /* 128 FIFO & TL: 1x */
else
value = F81534_8X_RXTRIGGER; /* 128 FIFO & TL: 8x */
status = f81534_set_port_register(port, F81534_CONFIG1_REG, value);
if (status) {
dev_err(&port->dev, "%s: CONFIG1 setting failed\n", __func__);
return status;
}
if (baudrate <= 1200)
value = UART_FCR_TRIGGER_1 | UART_FCR_ENABLE_FIFO; /* TL: 1 */
else
value = UART_FCR_TRIGGER_8 | UART_FCR_ENABLE_FIFO; /* TL: 8 */
status = f81534_set_port_register(port, F81534_FIFO_CONTROL_REG,
value);
if (status) {
dev_err(&port->dev, "%s: FCR setting failed\n", __func__);
return status;
}
divisor = f81534_calc_baud_divisor(baudrate, port_priv->baud_base);
mutex_lock(&port_priv->lcr_mutex);
value = UART_LCR_DLAB;
status = f81534_set_port_register(port, F81534_LINE_CONTROL_REG,
value);
if (status) {
dev_err(&port->dev, "%s: set LCR failed\n", __func__);
goto out_unlock;
}
value = divisor & 0xff;
status = f81534_set_port_register(port, F81534_DIVISOR_LSB_REG, value);
if (status) {
dev_err(&port->dev, "%s: set DLAB LSB failed\n", __func__);
goto out_unlock;
}
value = (divisor >> 8) & 0xff;
status = f81534_set_port_register(port, F81534_DIVISOR_MSB_REG, value);
if (status) {
dev_err(&port->dev, "%s: set DLAB MSB failed\n", __func__);
goto out_unlock;
}
value = lcr | (port_priv->shadow_lcr & UART_LCR_SBC);
status = f81534_set_port_register(port, F81534_LINE_CONTROL_REG,
value);
if (status) {
dev_err(&port->dev, "%s: set LCR failed\n", __func__);
goto out_unlock;
}
port_priv->shadow_lcr = value;
out_unlock:
mutex_unlock(&port_priv->lcr_mutex);
return status;
}
static int f81534_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
int status;
mutex_lock(&port_priv->lcr_mutex);
if (break_state)
port_priv->shadow_lcr |= UART_LCR_SBC;
else
port_priv->shadow_lcr &= ~UART_LCR_SBC;
status = f81534_set_port_register(port, F81534_LINE_CONTROL_REG,
port_priv->shadow_lcr);
if (status)
dev_err(&port->dev, "set break failed: %d\n", status);
mutex_unlock(&port_priv->lcr_mutex);
return status;
}
static int f81534_update_mctrl(struct usb_serial_port *port, unsigned int set,
unsigned int clear)
{
struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
int status;
u8 tmp;
if (((set | clear) & (TIOCM_DTR | TIOCM_RTS)) == 0)
return 0; /* no change */
mutex_lock(&port_priv->mcr_mutex);
/* 'Set' takes precedence over 'Clear' */
clear &= ~set;
/* Always enable UART_MCR_OUT2 */
tmp = UART_MCR_OUT2 | port_priv->shadow_mcr;
if (clear & TIOCM_DTR)
tmp &= ~UART_MCR_DTR;
if (clear & TIOCM_RTS)
tmp &= ~UART_MCR_RTS;
if (set & TIOCM_DTR)
tmp |= UART_MCR_DTR;
if (set & TIOCM_RTS)
tmp |= UART_MCR_RTS;
status = f81534_set_port_register(port, F81534_MODEM_CONTROL_REG, tmp);
if (status < 0) {
dev_err(&port->dev, "%s: MCR write failed\n", __func__);
mutex_unlock(&port_priv->mcr_mutex);
return status;
}
port_priv->shadow_mcr = tmp;
mutex_unlock(&port_priv->mcr_mutex);
return 0;
}
/*
* This function will search the data area with token F81534_CUSTOM_VALID_TOKEN
* for latest configuration index. If nothing found
* (*index = F81534_CUSTOM_NO_CUSTOM_DATA), We'll load default configure in
* F81534_DEF_CONF_ADDRESS_START section.
*
* Due to we only use block0 to save data, so *index should be 0 or
* F81534_CUSTOM_NO_CUSTOM_DATA.
*/
static int f81534_find_config_idx(struct usb_serial *serial, u8 *index)
{
u8 tmp;
int status;
status = f81534_read_flash(serial, F81534_CUSTOM_ADDRESS_START, 1,
&tmp);
if (status) {
dev_err(&serial->interface->dev, "%s: read failed: %d\n",
__func__, status);
return status;
}
/* We'll use the custom data when the data is valid. */
if (tmp == F81534_CUSTOM_VALID_TOKEN)
*index = 0;
else
*index = F81534_CUSTOM_NO_CUSTOM_DATA;
return 0;
}
/*
* The F81532/534 will not report serial port to USB serial subsystem when
* H/W DCD/DSR/CTS/RI/RX pin connected to ground.
*
* To detect RX pin status, we'll enable MCR interal loopback, disable it and
* delayed for 60ms. It connected to ground If LSR register report UART_LSR_BI.
*/
static bool f81534_check_port_hw_disabled(struct usb_serial *serial, int phy)
{
int status;
u8 old_mcr;
u8 msr;
u8 lsr;
u8 msr_mask;
msr_mask = UART_MSR_DCD | UART_MSR_RI | UART_MSR_DSR | UART_MSR_CTS;
status = f81534_get_phy_port_register(serial, phy,
F81534_MODEM_STATUS_REG, &msr);
if (status)
return false;
if ((msr & msr_mask) != msr_mask)
return false;
status = f81534_set_phy_port_register(serial, phy,
F81534_FIFO_CONTROL_REG, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
if (status)
return false;
status = f81534_get_phy_port_register(serial, phy,
F81534_MODEM_CONTROL_REG, &old_mcr);
if (status)
return false;
status = f81534_set_phy_port_register(serial, phy,
F81534_MODEM_CONTROL_REG, UART_MCR_LOOP);
if (status)
return false;
status = f81534_set_phy_port_register(serial, phy,
F81534_MODEM_CONTROL_REG, 0x0);
if (status)
return false;
msleep(60);
status = f81534_get_phy_port_register(serial, phy,
F81534_LINE_STATUS_REG, &lsr);
if (status)
return false;
status = f81534_set_phy_port_register(serial, phy,
F81534_MODEM_CONTROL_REG, old_mcr);
if (status)
return false;
if ((lsr & UART_LSR_BI) == UART_LSR_BI)
return true;
return false;
}
/*
* We had 2 generation of F81532/534 IC. All has an internal storage.
*
* 1st is pure USB-to-TTL RS232 IC and designed for 4 ports only, no any
* internal data will used. All mode and gpio control should manually set
* by AP or Driver and all storage space value are 0xff. The
* f81534_calc_num_ports() will run to final we marked as "oldest version"
* for this IC.
*
* 2rd is designed to more generic to use any transceiver and this is our
* mass production type. We'll save data in F81534_CUSTOM_ADDRESS_START
* (0x2f00) with 9bytes. The 1st byte is a indicater. If the token is
* F81534_CUSTOM_VALID_TOKEN(0xf0), the IC is 2nd gen type, the following
* 4bytes save port mode (0:RS232/1:RS485 Invert/2:RS485), and the last
* 4bytes save GPIO state(value from 0~7 to represent 3 GPIO output pin).
* The f81534_calc_num_ports() will run to "new style" with checking
* F81534_PORT_UNAVAILABLE section.
*/
static int f81534_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds)
{
struct f81534_serial_private *serial_priv;
struct device *dev = &serial->interface->dev;
int size_bulk_in = usb_endpoint_maxp(epds->bulk_in[0]);
int size_bulk_out = usb_endpoint_maxp(epds->bulk_out[0]);
u8 num_port = 0;
int index = 0;
int status;
int i;
if (size_bulk_out != F81534_WRITE_BUFFER_SIZE ||
size_bulk_in != F81534_MAX_RECEIVE_BLOCK_SIZE) {
dev_err(dev, "unsupported endpoint max packet size\n");
return -ENODEV;
}
serial_priv = devm_kzalloc(&serial->interface->dev,
sizeof(*serial_priv), GFP_KERNEL);
if (!serial_priv)
return -ENOMEM;
usb_set_serial_data(serial, serial_priv);
mutex_init(&serial_priv->urb_mutex);
/* Check had custom setting */
status = f81534_find_config_idx(serial, &serial_priv->setting_idx);
if (status) {
dev_err(&serial->interface->dev, "%s: find idx failed: %d\n",
__func__, status);
return status;
}
/*
* We'll read custom data only when data available, otherwise we'll
* read default value instead.
*/
if (serial_priv->setting_idx != F81534_CUSTOM_NO_CUSTOM_DATA) {
status = f81534_read_flash(serial,
F81534_CUSTOM_ADDRESS_START +
F81534_CONF_OFFSET,
sizeof(serial_priv->conf_data),
serial_priv->conf_data);
if (status) {
dev_err(&serial->interface->dev,
"%s: get custom data failed: %d\n",
__func__, status);
return status;
}
dev_dbg(&serial->interface->dev,
"%s: read config from block: %d\n", __func__,
serial_priv->setting_idx);
} else {
/* Read default board setting */
status = f81534_read_flash(serial,
F81534_DEF_CONF_ADDRESS_START,
sizeof(serial_priv->conf_data),
serial_priv->conf_data);
if (status) {
dev_err(&serial->interface->dev,
"%s: read failed: %d\n", __func__,
status);
return status;
}
dev_dbg(&serial->interface->dev, "%s: read default config\n",
__func__);
}
/* New style, find all possible ports */
for (i = 0; i < F81534_NUM_PORT; ++i) {
if (f81534_check_port_hw_disabled(serial, i))
serial_priv->conf_data[i] |= F81534_PORT_UNAVAILABLE;
if (serial_priv->conf_data[i] & F81534_PORT_UNAVAILABLE)
continue;
++num_port;
}
if (!num_port) {
dev_warn(&serial->interface->dev,
"no config found, assuming 4 ports\n");
num_port = 4; /* Nothing found, oldest version IC */
}
/* Assign phy-to-logic mapping */
for (i = 0; i < F81534_NUM_PORT; ++i) {
if (serial_priv->conf_data[i] & F81534_PORT_UNAVAILABLE)
continue;
serial_priv->tty_idx[i] = index++;
dev_dbg(&serial->interface->dev,
"%s: phy_num: %d, tty_idx: %d\n", __func__, i,
serial_priv->tty_idx[i]);
}
/*
* Setup bulk-out endpoint multiplexing. All ports share the same
* bulk-out endpoint.
*/
BUILD_BUG_ON(ARRAY_SIZE(epds->bulk_out) < F81534_NUM_PORT);
for (i = 1; i < num_port; ++i)
epds->bulk_out[i] = epds->bulk_out[0];
epds->num_bulk_out = num_port;
return num_port;
}
static void f81534_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
{
u8 new_lcr = 0;
int status;
u32 baud;
u32 old_baud;
if (C_BAUD(tty) == B0)
f81534_update_mctrl(port, 0, TIOCM_DTR | TIOCM_RTS);
else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
f81534_update_mctrl(port, TIOCM_DTR | TIOCM_RTS, 0);
if (C_PARENB(tty)) {
new_lcr |= UART_LCR_PARITY;
if (!C_PARODD(tty))
new_lcr |= UART_LCR_EPAR;
if (C_CMSPAR(tty))
new_lcr |= UART_LCR_SPAR;
}
if (C_CSTOPB(tty))
new_lcr |= UART_LCR_STOP;
new_lcr |= UART_LCR_WLEN(tty_get_char_size(tty->termios.c_cflag));
baud = tty_get_baud_rate(tty);
if (!baud)
return;
if (old_termios)
old_baud = tty_termios_baud_rate(old_termios);
else
old_baud = F81534_DEFAULT_BAUD_RATE;
dev_dbg(&port->dev, "%s: baud: %d\n", __func__, baud);
status = f81534_set_port_config(port, tty, baud, old_baud, new_lcr);
if (status < 0) {
dev_err(&port->dev, "%s: set port config failed: %d\n",
__func__, status);
}
}
static int f81534_submit_read_urb(struct usb_serial *serial, gfp_t flags)
{
return usb_serial_generic_submit_read_urbs(serial->port[0], flags);
}
static void f81534_msr_changed(struct usb_serial_port *port, u8 msr)
{
struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
struct tty_struct *tty;
unsigned long flags;
u8 old_msr;
if (!(msr & UART_MSR_ANY_DELTA))
return;
spin_lock_irqsave(&port_priv->msr_lock, flags);
old_msr = port_priv->shadow_msr;
port_priv->shadow_msr = msr;
spin_unlock_irqrestore(&port_priv->msr_lock, flags);
dev_dbg(&port->dev, "%s: MSR from %02x to %02x\n", __func__, old_msr,
msr);
/* Update input line counters */
if (msr & UART_MSR_DCTS)
port->icount.cts++;
if (msr & UART_MSR_DDSR)
port->icount.dsr++;
if (msr & UART_MSR_DDCD)
port->icount.dcd++;
if (msr & UART_MSR_TERI)
port->icount.rng++;
wake_up_interruptible(&port->port.delta_msr_wait);
if (!(msr & UART_MSR_DDCD))
return;
dev_dbg(&port->dev, "%s: DCD Changed: phy_num: %d from %x to %x\n",
__func__, port_priv->phy_num, old_msr, msr);
tty = tty_port_tty_get(&port->port);
if (!tty)
return;
usb_serial_handle_dcd_change(port, tty, msr & UART_MSR_DCD);
tty_kref_put(tty);
}
static int f81534_read_msr(struct usb_serial_port *port)
{
struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
unsigned long flags;
int status;
u8 msr;
/* Get MSR initial value */
status = f81534_get_port_register(port, F81534_MODEM_STATUS_REG, &msr);
if (status)
return status;
/* Force update current state */
spin_lock_irqsave(&port_priv->msr_lock, flags);
port_priv->shadow_msr = msr;
spin_unlock_irqrestore(&port_priv->msr_lock, flags);
return 0;
}
static int f81534_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct f81534_serial_private *serial_priv =
usb_get_serial_data(port->serial);
struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
int status;
status = f81534_set_port_register(port,
F81534_FIFO_CONTROL_REG, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
if (status) {
dev_err(&port->dev, "%s: Clear FIFO failed: %d\n", __func__,
status);
return status;
}
if (tty)
f81534_set_termios(tty, port, NULL);
status = f81534_read_msr(port);
if (status)
return status;
mutex_lock(&serial_priv->urb_mutex);
/* Submit Read URBs for first port opened */
if (!serial_priv->opened_port) {
status = f81534_submit_read_urb(port->serial, GFP_KERNEL);
if (status)
goto exit;
}
serial_priv->opened_port++;
exit:
mutex_unlock(&serial_priv->urb_mutex);
set_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty);
return status;
}
static void f81534_close(struct usb_serial_port *port)
{
struct f81534_serial_private *serial_priv =
usb_get_serial_data(port->serial);
struct usb_serial_port *port0 = port->serial->port[0];
unsigned long flags;
size_t i;
usb_kill_urb(port->write_urbs[0]);
spin_lock_irqsave(&port->lock, flags);
kfifo_reset_out(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
/* Kill Read URBs when final port closed */
mutex_lock(&serial_priv->urb_mutex);
serial_priv->opened_port--;
if (!serial_priv->opened_port) {
for (i = 0; i < ARRAY_SIZE(port0->read_urbs); ++i)
usb_kill_urb(port0->read_urbs[i]);
}
mutex_unlock(&serial_priv->urb_mutex);
}
static void f81534_get_serial_info(struct tty_struct *tty, struct serial_struct *ss)
{
struct usb_serial_port *port = tty->driver_data;
struct f81534_port_private *port_priv;
port_priv = usb_get_serial_port_data(port);
ss->baud_base = port_priv->baud_base;
}
static void f81534_process_per_serial_block(struct usb_serial_port *port,
u8 *data)
{
struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
int phy_num = data[0];
size_t read_size = 0;
size_t i;
char tty_flag;
int status;
u8 lsr;
/*
* The block layout is 128 Bytes
* index 0: port phy idx (e.g., 0,1,2,3),
* index 1: It's could be
* F81534_TOKEN_RECEIVE
* F81534_TOKEN_TX_EMPTY
* F81534_TOKEN_MSR_CHANGE
* index 2: serial in size (data+lsr, must be even)
* meaningful for F81534_TOKEN_RECEIVE only
* index 3: current MSR with this device
* index 4~127: serial in data block (data+lsr, must be even)
*/
switch (data[1]) {
case F81534_TOKEN_TX_EMPTY:
set_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty);
/* Try to submit writer */
status = f81534_submit_writer(port, GFP_ATOMIC);
if (status)
dev_err(&port->dev, "%s: submit failed\n", __func__);
return;
case F81534_TOKEN_MSR_CHANGE:
f81534_msr_changed(port, data[3]);
return;
case F81534_TOKEN_RECEIVE:
read_size = data[2];
if (read_size > F81534_MAX_RX_SIZE) {
dev_err(&port->dev,
"%s: phy: %d read_size: %zu larger than: %d\n",
__func__, phy_num, read_size,
F81534_MAX_RX_SIZE);
return;
}
break;
default:
dev_warn(&port->dev, "%s: unknown token: %02x\n", __func__,
data[1]);
return;
}
for (i = 4; i < 4 + read_size; i += 2) {
tty_flag = TTY_NORMAL;
lsr = data[i + 1];
if (lsr & UART_LSR_BRK_ERROR_BITS) {
if (lsr & UART_LSR_BI) {
tty_flag = TTY_BREAK;
port->icount.brk++;
usb_serial_handle_break(port);
} else if (lsr & UART_LSR_PE) {
tty_flag = TTY_PARITY;
port->icount.parity++;
} else if (lsr & UART_LSR_FE) {
tty_flag = TTY_FRAME;
port->icount.frame++;
}
if (lsr & UART_LSR_OE) {
port->icount.overrun++;
tty_insert_flip_char(&port->port, 0,
TTY_OVERRUN);
}
schedule_work(&port_priv->lsr_work);
}
if (port->sysrq) {
if (usb_serial_handle_sysrq_char(port, data[i]))
continue;
}
tty_insert_flip_char(&port->port, data[i], tty_flag);
}
tty_flip_buffer_push(&port->port);
}
static void f81534_process_read_urb(struct urb *urb)
{
struct f81534_serial_private *serial_priv;
struct usb_serial_port *port;
struct usb_serial *serial;
u8 *buf;
int phy_port_num;
int tty_port_num;
size_t i;
if (!urb->actual_length ||
urb->actual_length % F81534_RECEIVE_BLOCK_SIZE) {
return;
}
port = urb->context;
serial = port->serial;
buf = urb->transfer_buffer;
serial_priv = usb_get_serial_data(serial);
for (i = 0; i < urb->actual_length; i += F81534_RECEIVE_BLOCK_SIZE) {
phy_port_num = buf[i];
if (phy_port_num >= F81534_NUM_PORT) {
dev_err(&port->dev,
"%s: phy_port_num: %d larger than: %d\n",
__func__, phy_port_num, F81534_NUM_PORT);
continue;
}
tty_port_num = serial_priv->tty_idx[phy_port_num];
port = serial->port[tty_port_num];
if (tty_port_initialized(&port->port))
f81534_process_per_serial_block(port, &buf[i]);
}
}
static void f81534_write_usb_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
switch (urb->status) {
case 0:
break;
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
dev_dbg(&port->dev, "%s - urb stopped: %d\n",
__func__, urb->status);
return;
case -EPIPE:
dev_err(&port->dev, "%s - urb stopped: %d\n",
__func__, urb->status);
return;
default:
dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
__func__, urb->status);
break;
}
}
static void f81534_lsr_worker(struct work_struct *work)
{
struct f81534_port_private *port_priv;
struct usb_serial_port *port;
int status;
u8 tmp;
port_priv = container_of(work, struct f81534_port_private, lsr_work);
port = port_priv->port;
status = f81534_get_port_register(port, F81534_LINE_STATUS_REG, &tmp);
if (status)
dev_warn(&port->dev, "read LSR failed: %d\n", status);
}
static int f81534_set_port_output_pin(struct usb_serial_port *port)
{
struct f81534_serial_private *serial_priv;
struct f81534_port_private *port_priv;
struct usb_serial *serial;
const struct f81534_port_out_pin *pins;
int status;
int i;
u8 value;
u8 idx;
serial = port->serial;
serial_priv = usb_get_serial_data(serial);
port_priv = usb_get_serial_port_data(port);
idx = F81534_CONF_INIT_GPIO_OFFSET + port_priv->phy_num;
value = serial_priv->conf_data[idx];
if (value >= F81534_CONF_GPIO_SHUTDOWN) {
/*
* Newer IC configure will make transceiver in shutdown mode on
* initial power on. We need enable it before using UARTs.
*/
idx = F81534_CONF_WORK_GPIO_OFFSET + port_priv->phy_num;
value = serial_priv->conf_data[idx];
if (value >= F81534_CONF_GPIO_SHUTDOWN)
value = F81534_CONF_GPIO_RS232;
}
pins = &f81534_port_out_pins[port_priv->phy_num];
for (i = 0; i < ARRAY_SIZE(pins->pin); ++i) {
status = f81534_set_mask_register(serial,
pins->pin[i].reg_addr, pins->pin[i].reg_mask,
value & BIT(i) ? pins->pin[i].reg_mask : 0);
if (status)
return status;
}
dev_dbg(&port->dev, "Output pin (M0/M1/M2): %d\n", value);
return 0;
}
static int f81534_port_probe(struct usb_serial_port *port)
{
struct f81534_serial_private *serial_priv;
struct f81534_port_private *port_priv;
int ret;
u8 value;
serial_priv = usb_get_serial_data(port->serial);
port_priv = devm_kzalloc(&port->dev, sizeof(*port_priv), GFP_KERNEL);
if (!port_priv)
return -ENOMEM;
/*
* We'll make tx frame error when baud rate from 384~500kps. So we'll
* delay all tx data frame with 1bit.
*/
port_priv->shadow_clk = F81534_UART_EN | F81534_CLK_TX_DELAY_1BIT;
spin_lock_init(&port_priv->msr_lock);
mutex_init(&port_priv->mcr_mutex);
mutex_init(&port_priv->lcr_mutex);
INIT_WORK(&port_priv->lsr_work, f81534_lsr_worker);
/* Assign logic-to-phy mapping */
ret = f81534_logic_to_phy_port(port->serial, port);
if (ret < 0)
return ret;
port_priv->phy_num = ret;
port_priv->port = port;
usb_set_serial_port_data(port, port_priv);
dev_dbg(&port->dev, "%s: port_number: %d, phy_num: %d\n", __func__,
port->port_number, port_priv->phy_num);
/*
* The F81532/534 will hang-up when enable LSR interrupt in IER and
* occur data overrun. So we'll disable the LSR interrupt in probe()
* and submit the LSR worker to clear LSR state when reported LSR error
* bit with bulk-in data in f81534_process_per_serial_block().
*/
ret = f81534_set_port_register(port, F81534_INTERRUPT_ENABLE_REG,
UART_IER_RDI | UART_IER_THRI | UART_IER_MSI);
if (ret)
return ret;
value = serial_priv->conf_data[port_priv->phy_num];
switch (value & F81534_PORT_CONF_MODE_MASK) {
case F81534_PORT_CONF_RS485_INVERT:
port_priv->shadow_clk |= F81534_CLK_RS485_MODE |
F81534_CLK_RS485_INVERT;
dev_dbg(&port->dev, "RS485 invert mode\n");
break;
case F81534_PORT_CONF_RS485:
port_priv->shadow_clk |= F81534_CLK_RS485_MODE;
dev_dbg(&port->dev, "RS485 mode\n");
break;
default:
case F81534_PORT_CONF_RS232:
dev_dbg(&port->dev, "RS232 mode\n");
break;
}
return f81534_set_port_output_pin(port);
}
static void f81534_port_remove(struct usb_serial_port *port)
{
struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
flush_work(&port_priv->lsr_work);
}
static int f81534_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
int status;
int r;
u8 msr;
u8 mcr;
/* Read current MSR from device */
status = f81534_get_port_register(port, F81534_MODEM_STATUS_REG, &msr);
if (status)
return status;
mutex_lock(&port_priv->mcr_mutex);
mcr = port_priv->shadow_mcr;
mutex_unlock(&port_priv->mcr_mutex);
r = (mcr & UART_MCR_DTR ? TIOCM_DTR : 0) |
(mcr & UART_MCR_RTS ? TIOCM_RTS : 0) |
(msr & UART_MSR_CTS ? TIOCM_CTS : 0) |
(msr & UART_MSR_DCD ? TIOCM_CAR : 0) |
(msr & UART_MSR_RI ? TIOCM_RI : 0) |
(msr & UART_MSR_DSR ? TIOCM_DSR : 0);
return r;
}
static int f81534_tiocmset(struct tty_struct *tty, unsigned int set,
unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
return f81534_update_mctrl(port, set, clear);
}
static void f81534_dtr_rts(struct usb_serial_port *port, int on)
{
if (on)
f81534_update_mctrl(port, TIOCM_DTR | TIOCM_RTS, 0);
else
f81534_update_mctrl(port, 0, TIOCM_DTR | TIOCM_RTS);
}
static int f81534_write(struct tty_struct *tty, struct usb_serial_port *port,
const u8 *buf, int count)
{
int bytes_out, status;
if (!count)
return 0;
bytes_out = kfifo_in_locked(&port->write_fifo, buf, count,
&port->lock);
status = f81534_submit_writer(port, GFP_ATOMIC);
if (status) {
dev_err(&port->dev, "%s: submit failed\n", __func__);
return status;
}
return bytes_out;
}
static bool f81534_tx_empty(struct usb_serial_port *port)
{
struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
return test_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty);
}
static int f81534_resume(struct usb_serial *serial)
{
struct f81534_serial_private *serial_priv =
usb_get_serial_data(serial);
struct usb_serial_port *port;
int error = 0;
int status;
size_t i;
/*
* We'll register port 0 bulkin when port had opened, It'll take all
* port received data, MSR register change and TX_EMPTY information.
*/
mutex_lock(&serial_priv->urb_mutex);
if (serial_priv->opened_port) {
status = f81534_submit_read_urb(serial, GFP_NOIO);
if (status) {
mutex_unlock(&serial_priv->urb_mutex);
return status;
}
}
mutex_unlock(&serial_priv->urb_mutex);
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
if (!tty_port_initialized(&port->port))
continue;
status = f81534_submit_writer(port, GFP_NOIO);
if (status) {
dev_err(&port->dev, "%s: submit failed\n", __func__);
++error;
}
}
if (error)
return -EIO;
return 0;
}
static struct usb_serial_driver f81534_device = {
.driver = {
.owner = THIS_MODULE,
.name = "f81534",
},
.description = DRIVER_DESC,
.id_table = f81534_id_table,
.num_bulk_in = 1,
.num_bulk_out = 1,
.open = f81534_open,
.close = f81534_close,
.write = f81534_write,
.tx_empty = f81534_tx_empty,
.calc_num_ports = f81534_calc_num_ports,
.port_probe = f81534_port_probe,
.port_remove = f81534_port_remove,
.break_ctl = f81534_break_ctl,
.dtr_rts = f81534_dtr_rts,
.process_read_urb = f81534_process_read_urb,
.get_serial = f81534_get_serial_info,
.tiocmget = f81534_tiocmget,
.tiocmset = f81534_tiocmset,
.write_bulk_callback = f81534_write_usb_callback,
.set_termios = f81534_set_termios,
.resume = f81534_resume,
};
static struct usb_serial_driver *const serial_drivers[] = {
&f81534_device, NULL
};
module_usb_serial_driver(serial_drivers, f81534_id_table);
MODULE_DEVICE_TABLE(usb, f81534_id_table);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Peter Hong <[email protected]>");
MODULE_AUTHOR("Tom Tsai <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/serial/f81534.c |
// SPDX-License-Identifier: GPL-2.0
/*
USB Driver for Sierra Wireless
Copyright (C) 2006, 2007, 2008 Kevin Lloyd <[email protected]>,
Copyright (C) 2008, 2009 Elina Pasheva, Matthew Safar, Rory Filer
<[email protected]>
IMPORTANT DISCLAIMER: This driver is not commercially supported by
Sierra Wireless. Use at your own risk.
Portions based on the option driver by Matthias Urlichs <[email protected]>
Whom based his on the Keyspan driver by Hugh Blemings <[email protected]>
*/
/* Uncomment to log function calls */
/* #define DEBUG */
#define DRIVER_AUTHOR "Kevin Lloyd, Elina Pasheva, Matthew Safar, Rory Filer"
#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems"
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/errno.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#define SWIMS_USB_REQUEST_SetPower 0x00
#define SWIMS_USB_REQUEST_SetNmea 0x07
#define N_IN_URB_HM 8
#define N_OUT_URB_HM 64
#define N_IN_URB 4
#define N_OUT_URB 4
#define IN_BUFLEN 4096
#define MAX_TRANSFER (PAGE_SIZE - 512)
/* MAX_TRANSFER is chosen so that the VM is not stressed by
allocations > PAGE_SIZE and the number of packets in a page
is an integer 512 is the largest possible packet on EHCI */
static bool nmea;
struct sierra_iface_list {
const u8 *nums; /* array of interface numbers */
size_t count; /* number of elements in array */
};
struct sierra_intf_private {
spinlock_t susp_lock;
unsigned int suspended:1;
int in_flight;
unsigned int open_ports;
};
static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
{
return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
SWIMS_USB_REQUEST_SetPower, /* __u8 request */
USB_TYPE_VENDOR, /* __u8 request type */
swiState, /* __u16 value */
0, /* __u16 index */
NULL, /* void *data */
0, /* __u16 size */
USB_CTRL_SET_TIMEOUT); /* int timeout */
}
static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable)
{
return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
SWIMS_USB_REQUEST_SetNmea, /* __u8 request */
USB_TYPE_VENDOR, /* __u8 request type */
enable, /* __u16 value */
0x0000, /* __u16 index */
NULL, /* void *data */
0, /* __u16 size */
USB_CTRL_SET_TIMEOUT); /* int timeout */
}
static int sierra_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds)
{
int num_ports = 0;
u8 ifnum, numendpoints;
ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
numendpoints = serial->interface->cur_altsetting->desc.bNumEndpoints;
/* Dummy interface present on some SKUs should be ignored */
if (ifnum == 0x99)
num_ports = 0;
else if (numendpoints <= 3)
num_ports = 1;
else
num_ports = (numendpoints-1)/2;
return num_ports;
}
static bool is_listed(const u8 ifnum, const struct sierra_iface_list *list)
{
int i;
if (!list)
return false;
for (i = 0; i < list->count; i++) {
if (list->nums[i] == ifnum)
return true;
}
return false;
}
static u8 sierra_interface_num(struct usb_serial *serial)
{
return serial->interface->cur_altsetting->desc.bInterfaceNumber;
}
static int sierra_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
const struct sierra_iface_list *ignore_list;
int result = 0;
struct usb_device *udev;
u8 ifnum;
udev = serial->dev;
ifnum = sierra_interface_num(serial);
/*
* If this interface supports more than 1 alternate
* select the 2nd one
*/
if (serial->interface->num_altsetting == 2) {
dev_dbg(&udev->dev, "Selecting alt setting for interface %d\n",
ifnum);
/* We know the alternate setting is 1 for the MC8785 */
usb_set_interface(udev, ifnum, 1);
}
ignore_list = (const struct sierra_iface_list *)id->driver_info;
if (is_listed(ifnum, ignore_list)) {
dev_dbg(&serial->dev->dev, "Ignoring interface #%d\n", ifnum);
return -ENODEV;
}
return result;
}
/* interfaces with higher memory requirements */
static const u8 hi_memory_typeA_ifaces[] = { 0, 2 };
static const struct sierra_iface_list typeA_interface_list = {
.nums = hi_memory_typeA_ifaces,
.count = ARRAY_SIZE(hi_memory_typeA_ifaces),
};
static const u8 hi_memory_typeB_ifaces[] = { 3, 4, 5, 6 };
static const struct sierra_iface_list typeB_interface_list = {
.nums = hi_memory_typeB_ifaces,
.count = ARRAY_SIZE(hi_memory_typeB_ifaces),
};
/* 'ignorelist' of interfaces not served by this driver */
static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11, 19, 20 };
static const struct sierra_iface_list direct_ip_interface_ignore = {
.nums = direct_ip_non_serial_ifaces,
.count = ARRAY_SIZE(direct_ip_non_serial_ifaces),
};
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
{ USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */
{ USB_DEVICE(0x03F0, 0x211D) }, /* HP ev2210 a.k.a MC5725 */
{ USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */
{ USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
{ USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */
{ USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */
{ USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */
{ USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */
{ USB_DEVICE(0x1199, 0x0022) }, /* Sierra Wireless EM5725 */
{ USB_DEVICE(0x1199, 0x0024) }, /* Sierra Wireless MC5727 */
{ USB_DEVICE(0x1199, 0x0224) }, /* Sierra Wireless MC5727 */
{ USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */
{ USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */
{ USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */
{ USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */
{ USB_DEVICE(0x1199, 0x0301) }, /* Sierra Wireless USB Dongle 250U */
/* Sierra Wireless C597 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) },
/* Sierra Wireless T598 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0025, 0xFF, 0xFF, 0xFF) },
{ USB_DEVICE(0x1199, 0x0026) }, /* Sierra Wireless T11 */
{ USB_DEVICE(0x1199, 0x0027) }, /* Sierra Wireless AC402 */
{ USB_DEVICE(0x1199, 0x0028) }, /* Sierra Wireless MC5728 */
{ USB_DEVICE(0x1199, 0x0029) }, /* Sierra Wireless Device */
{ USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */
{ USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */
{ USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */
{ USB_DEVICE(0x1199, 0x6805) }, /* Sierra Wireless MC8765 */
{ USB_DEVICE(0x1199, 0x6808) }, /* Sierra Wireless MC8755 */
{ USB_DEVICE(0x1199, 0x6809) }, /* Sierra Wireless MC8765 */
{ USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */
{ USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 */
{ USB_DEVICE(0x1199, 0x6815) }, /* Sierra Wireless MC8775 */
{ USB_DEVICE(0x1199, 0x6816) }, /* Sierra Wireless MC8775 */
{ USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */
{ USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */
{ USB_DEVICE(0x1199, 0x6822) }, /* Sierra Wireless AirCard 875E */
{ USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780 */
{ USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */
{ USB_DEVICE(0x1199, 0x6834) }, /* Sierra Wireless MC8780 */
{ USB_DEVICE(0x1199, 0x6835) }, /* Sierra Wireless MC8781 */
{ USB_DEVICE(0x1199, 0x6838) }, /* Sierra Wireless MC8780 */
{ USB_DEVICE(0x1199, 0x6839) }, /* Sierra Wireless MC8781 */
{ USB_DEVICE(0x1199, 0x683A) }, /* Sierra Wireless MC8785 */
{ USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */
/* Sierra Wireless MC8790, MC8791, MC8792 Composite */
{ USB_DEVICE(0x1199, 0x683C) },
{ USB_DEVICE(0x1199, 0x683D) }, /* Sierra Wireless MC8791 Composite */
/* Sierra Wireless MC8790, MC8791, MC8792 */
{ USB_DEVICE(0x1199, 0x683E) },
{ USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */
{ USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */
{ USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880 E */
{ USB_DEVICE(0x1199, 0x6853) }, /* Sierra Wireless AirCard 881 E */
{ USB_DEVICE(0x1199, 0x6855) }, /* Sierra Wireless AirCard 880 U */
{ USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */
{ USB_DEVICE(0x1199, 0x6859) }, /* Sierra Wireless AirCard 885 E */
{ USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */
/* Sierra Wireless C885 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)},
/* Sierra Wireless C888, Air Card 501, USB 303, USB 304 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6890, 0xFF, 0xFF, 0xFF)},
/* Sierra Wireless C22/C33 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6891, 0xFF, 0xFF, 0xFF)},
/* Sierra Wireless HSPA Non-Composite Device */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
{ USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */
/* Sierra Wireless Direct IP modems */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68A3, 0xFF, 0xFF, 0xFF),
.driver_info = (kernel_ulong_t)&direct_ip_interface_ignore
},
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
.driver_info = (kernel_ulong_t)&direct_ip_interface_ignore
},
{ USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */
/* AT&T Direct IP LTE modems */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
.driver_info = (kernel_ulong_t)&direct_ip_interface_ignore
},
/* Airprime/Sierra Wireless Direct IP modems */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68A3, 0xFF, 0xFF, 0xFF),
.driver_info = (kernel_ulong_t)&direct_ip_interface_ignore
},
{ }
};
MODULE_DEVICE_TABLE(usb, id_table);
struct sierra_port_private {
spinlock_t lock; /* lock the structure */
int outstanding_urbs; /* number of out urbs in flight */
struct usb_anchor active;
struct usb_anchor delayed;
int num_out_urbs;
int num_in_urbs;
/* Input endpoints and buffers for this port */
struct urb *in_urbs[N_IN_URB_HM];
/* Settings for the port */
int rts_state; /* Handshaking pins (outputs) */
int dtr_state;
int cts_state; /* Handshaking pins (inputs) */
int dsr_state;
int dcd_state;
int ri_state;
};
static int sierra_send_setup(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct sierra_port_private *portdata;
__u16 interface = 0;
int val = 0;
int do_send = 0;
int retval;
portdata = usb_get_serial_port_data(port);
if (portdata->dtr_state)
val |= 0x01;
if (portdata->rts_state)
val |= 0x02;
/* If composite device then properly report interface */
if (serial->num_ports == 1) {
interface = sierra_interface_num(serial);
/* Control message is sent only to interfaces with
* interrupt_in endpoints
*/
if (port->interrupt_in_urb) {
/* send control message */
do_send = 1;
}
}
/* Otherwise the need to do non-composite mapping */
else {
if (port->bulk_out_endpointAddress == 2)
interface = 0;
else if (port->bulk_out_endpointAddress == 4)
interface = 1;
else if (port->bulk_out_endpointAddress == 5)
interface = 2;
do_send = 1;
}
if (!do_send)
return 0;
retval = usb_autopm_get_interface(serial->interface);
if (retval < 0)
return retval;
retval = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
0x22, 0x21, val, interface, NULL, 0, USB_CTRL_SET_TIMEOUT);
usb_autopm_put_interface(serial->interface);
return retval;
}
static int sierra_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
unsigned int value;
struct sierra_port_private *portdata;
portdata = usb_get_serial_port_data(port);
value = ((portdata->rts_state) ? TIOCM_RTS : 0) |
((portdata->dtr_state) ? TIOCM_DTR : 0) |
((portdata->cts_state) ? TIOCM_CTS : 0) |
((portdata->dsr_state) ? TIOCM_DSR : 0) |
((portdata->dcd_state) ? TIOCM_CAR : 0) |
((portdata->ri_state) ? TIOCM_RNG : 0);
return value;
}
static int sierra_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct sierra_port_private *portdata;
portdata = usb_get_serial_port_data(port);
if (set & TIOCM_RTS)
portdata->rts_state = 1;
if (set & TIOCM_DTR)
portdata->dtr_state = 1;
if (clear & TIOCM_RTS)
portdata->rts_state = 0;
if (clear & TIOCM_DTR)
portdata->dtr_state = 0;
return sierra_send_setup(port);
}
static void sierra_release_urb(struct urb *urb)
{
if (urb) {
kfree(urb->transfer_buffer);
usb_free_urb(urb);
}
}
static void sierra_outdat_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct sierra_port_private *portdata = usb_get_serial_port_data(port);
struct sierra_intf_private *intfdata;
int status = urb->status;
unsigned long flags;
intfdata = usb_get_serial_data(port->serial);
/* free up the transfer buffer, as usb_free_urb() does not do this */
kfree(urb->transfer_buffer);
usb_autopm_put_interface_async(port->serial->interface);
if (status)
dev_dbg(&port->dev, "%s - nonzero write bulk status "
"received: %d\n", __func__, status);
spin_lock_irqsave(&portdata->lock, flags);
--portdata->outstanding_urbs;
spin_unlock_irqrestore(&portdata->lock, flags);
spin_lock_irqsave(&intfdata->susp_lock, flags);
--intfdata->in_flight;
spin_unlock_irqrestore(&intfdata->susp_lock, flags);
usb_serial_port_softint(port);
}
/* Write */
static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
struct sierra_port_private *portdata;
struct sierra_intf_private *intfdata;
struct usb_serial *serial = port->serial;
unsigned long flags;
unsigned char *buffer;
struct urb *urb;
size_t writesize = min((size_t)count, (size_t)MAX_TRANSFER);
int retval = 0;
/* verify that we actually have some data to write */
if (count == 0)
return 0;
portdata = usb_get_serial_port_data(port);
intfdata = usb_get_serial_data(serial);
dev_dbg(&port->dev, "%s: write (%zd bytes)\n", __func__, writesize);
spin_lock_irqsave(&portdata->lock, flags);
dev_dbg(&port->dev, "%s - outstanding_urbs: %d\n", __func__,
portdata->outstanding_urbs);
if (portdata->outstanding_urbs > portdata->num_out_urbs) {
spin_unlock_irqrestore(&portdata->lock, flags);
dev_dbg(&port->dev, "%s - write limit hit\n", __func__);
return 0;
}
portdata->outstanding_urbs++;
dev_dbg(&port->dev, "%s - 1, outstanding_urbs: %d\n", __func__,
portdata->outstanding_urbs);
spin_unlock_irqrestore(&portdata->lock, flags);
retval = usb_autopm_get_interface_async(serial->interface);
if (retval < 0) {
spin_lock_irqsave(&portdata->lock, flags);
portdata->outstanding_urbs--;
spin_unlock_irqrestore(&portdata->lock, flags);
goto error_simple;
}
buffer = kmemdup(buf, writesize, GFP_ATOMIC);
if (!buffer) {
retval = -ENOMEM;
goto error_no_buffer;
}
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
retval = -ENOMEM;
goto error_no_urb;
}
usb_serial_debug_data(&port->dev, __func__, writesize, buffer);
usb_fill_bulk_urb(urb, serial->dev,
usb_sndbulkpipe(serial->dev,
port->bulk_out_endpointAddress),
buffer, writesize, sierra_outdat_callback, port);
/* Handle the need to send a zero length packet */
urb->transfer_flags |= URB_ZERO_PACKET;
spin_lock_irqsave(&intfdata->susp_lock, flags);
if (intfdata->suspended) {
usb_anchor_urb(urb, &portdata->delayed);
spin_unlock_irqrestore(&intfdata->susp_lock, flags);
goto skip_power;
} else {
usb_anchor_urb(urb, &portdata->active);
}
/* send it down the pipe */
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval) {
usb_unanchor_urb(urb);
spin_unlock_irqrestore(&intfdata->susp_lock, flags);
dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed "
"with status = %d\n", __func__, retval);
goto error;
} else {
intfdata->in_flight++;
spin_unlock_irqrestore(&intfdata->susp_lock, flags);
}
skip_power:
/* we are done with this urb, so let the host driver
* really free it when it is finished with it */
usb_free_urb(urb);
return writesize;
error:
usb_free_urb(urb);
error_no_urb:
kfree(buffer);
error_no_buffer:
spin_lock_irqsave(&portdata->lock, flags);
--portdata->outstanding_urbs;
dev_dbg(&port->dev, "%s - 2. outstanding_urbs: %d\n", __func__,
portdata->outstanding_urbs);
spin_unlock_irqrestore(&portdata->lock, flags);
usb_autopm_put_interface_async(serial->interface);
error_simple:
return retval;
}
static void sierra_indat_callback(struct urb *urb)
{
int err;
int endpoint;
struct usb_serial_port *port;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
endpoint = usb_pipeendpoint(urb->pipe);
port = urb->context;
if (status) {
dev_dbg(&port->dev, "%s: nonzero status: %d on"
" endpoint %02x\n", __func__, status, endpoint);
} else {
if (urb->actual_length) {
tty_insert_flip_string(&port->port, data,
urb->actual_length);
tty_flip_buffer_push(&port->port);
usb_serial_debug_data(&port->dev, __func__,
urb->actual_length, data);
} else {
dev_dbg(&port->dev, "%s: empty read urb"
" received\n", __func__);
}
}
/* Resubmit urb so we continue receiving */
if (status != -ESHUTDOWN && status != -EPERM) {
usb_mark_last_busy(port->serial->dev);
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err && err != -EPERM)
dev_err(&port->dev, "resubmit read urb failed."
"(%d)\n", err);
}
}
static void sierra_instat_callback(struct urb *urb)
{
int err;
int status = urb->status;
struct usb_serial_port *port = urb->context;
struct sierra_port_private *portdata = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
dev_dbg(&port->dev, "%s: urb %p port %p has data %p\n", __func__,
urb, port, portdata);
if (status == 0) {
struct usb_ctrlrequest *req_pkt = urb->transfer_buffer;
if (!req_pkt) {
dev_dbg(&port->dev, "%s: NULL req_pkt\n",
__func__);
return;
}
if ((req_pkt->bRequestType == 0xA1) &&
(req_pkt->bRequest == 0x20)) {
int old_dcd_state;
unsigned char signals = *((unsigned char *)
urb->transfer_buffer +
sizeof(struct usb_ctrlrequest));
dev_dbg(&port->dev, "%s: signal x%x\n", __func__,
signals);
old_dcd_state = portdata->dcd_state;
portdata->cts_state = 1;
portdata->dcd_state = ((signals & 0x01) ? 1 : 0);
portdata->dsr_state = ((signals & 0x02) ? 1 : 0);
portdata->ri_state = ((signals & 0x08) ? 1 : 0);
if (old_dcd_state && !portdata->dcd_state)
tty_port_tty_hangup(&port->port, true);
} else {
dev_dbg(&port->dev, "%s: type %x req %x\n",
__func__, req_pkt->bRequestType,
req_pkt->bRequest);
}
} else
dev_dbg(&port->dev, "%s: error %d\n", __func__, status);
/* Resubmit urb so we continue receiving IRQ data */
if (status != -ESHUTDOWN && status != -ENOENT) {
usb_mark_last_busy(serial->dev);
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err && err != -EPERM)
dev_err(&port->dev, "%s: resubmit intr urb "
"failed. (%d)\n", __func__, err);
}
}
static unsigned int sierra_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct sierra_port_private *portdata = usb_get_serial_port_data(port);
unsigned long flags;
/* try to give a good number back based on if we have any free urbs at
* this point in time */
spin_lock_irqsave(&portdata->lock, flags);
if (portdata->outstanding_urbs > (portdata->num_out_urbs * 2) / 3) {
spin_unlock_irqrestore(&portdata->lock, flags);
dev_dbg(&port->dev, "%s - write limit hit\n", __func__);
return 0;
}
spin_unlock_irqrestore(&portdata->lock, flags);
return 2048;
}
static unsigned int sierra_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct sierra_port_private *portdata = usb_get_serial_port_data(port);
unsigned long flags;
unsigned int chars;
/* NOTE: This overcounts somewhat. */
spin_lock_irqsave(&portdata->lock, flags);
chars = portdata->outstanding_urbs * MAX_TRANSFER;
spin_unlock_irqrestore(&portdata->lock, flags);
dev_dbg(&port->dev, "%s - %u\n", __func__, chars);
return chars;
}
static void sierra_stop_rx_urbs(struct usb_serial_port *port)
{
int i;
struct sierra_port_private *portdata = usb_get_serial_port_data(port);
for (i = 0; i < portdata->num_in_urbs; i++)
usb_kill_urb(portdata->in_urbs[i]);
usb_kill_urb(port->interrupt_in_urb);
}
static int sierra_submit_rx_urbs(struct usb_serial_port *port, gfp_t mem_flags)
{
int ok_cnt;
int err = -EINVAL;
int i;
struct urb *urb;
struct sierra_port_private *portdata = usb_get_serial_port_data(port);
ok_cnt = 0;
for (i = 0; i < portdata->num_in_urbs; i++) {
urb = portdata->in_urbs[i];
if (!urb)
continue;
err = usb_submit_urb(urb, mem_flags);
if (err) {
dev_err(&port->dev, "%s: submit urb failed: %d\n",
__func__, err);
} else {
ok_cnt++;
}
}
if (ok_cnt && port->interrupt_in_urb) {
err = usb_submit_urb(port->interrupt_in_urb, mem_flags);
if (err) {
dev_err(&port->dev, "%s: submit intr urb failed: %d\n",
__func__, err);
}
}
if (ok_cnt > 0) /* at least one rx urb submitted */
return 0;
else
return err;
}
static struct urb *sierra_setup_urb(struct usb_serial *serial, int endpoint,
int dir, void *ctx, int len,
gfp_t mem_flags,
usb_complete_t callback)
{
struct urb *urb;
u8 *buf;
urb = usb_alloc_urb(0, mem_flags);
if (!urb)
return NULL;
buf = kmalloc(len, mem_flags);
if (buf) {
/* Fill URB using supplied data */
usb_fill_bulk_urb(urb, serial->dev,
usb_sndbulkpipe(serial->dev, endpoint) | dir,
buf, len, callback, ctx);
dev_dbg(&serial->dev->dev, "%s %c u : %p d:%p\n", __func__,
dir == USB_DIR_IN ? 'i' : 'o', urb, buf);
} else {
sierra_release_urb(urb);
urb = NULL;
}
return urb;
}
static void sierra_close(struct usb_serial_port *port)
{
int i;
struct usb_serial *serial = port->serial;
struct sierra_port_private *portdata;
struct sierra_intf_private *intfdata = usb_get_serial_data(serial);
struct urb *urb;
portdata = usb_get_serial_port_data(port);
/*
* Need to take susp_lock to make sure port is not already being
* resumed, but no need to hold it due to the tty-port initialized
* flag.
*/
spin_lock_irq(&intfdata->susp_lock);
if (--intfdata->open_ports == 0)
serial->interface->needs_remote_wakeup = 0;
spin_unlock_irq(&intfdata->susp_lock);
for (;;) {
urb = usb_get_from_anchor(&portdata->delayed);
if (!urb)
break;
kfree(urb->transfer_buffer);
usb_free_urb(urb);
usb_autopm_put_interface_async(serial->interface);
spin_lock_irq(&portdata->lock);
portdata->outstanding_urbs--;
spin_unlock_irq(&portdata->lock);
}
sierra_stop_rx_urbs(port);
usb_kill_anchored_urbs(&portdata->active);
for (i = 0; i < portdata->num_in_urbs; i++) {
sierra_release_urb(portdata->in_urbs[i]);
portdata->in_urbs[i] = NULL;
}
usb_autopm_get_interface_no_resume(serial->interface);
}
static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct sierra_port_private *portdata;
struct usb_serial *serial = port->serial;
struct sierra_intf_private *intfdata = usb_get_serial_data(serial);
int i;
int err;
int endpoint;
struct urb *urb;
portdata = usb_get_serial_port_data(port);
endpoint = port->bulk_in_endpointAddress;
for (i = 0; i < portdata->num_in_urbs; i++) {
urb = sierra_setup_urb(serial, endpoint, USB_DIR_IN, port,
IN_BUFLEN, GFP_KERNEL,
sierra_indat_callback);
portdata->in_urbs[i] = urb;
}
/* clear halt condition */
usb_clear_halt(serial->dev,
usb_sndbulkpipe(serial->dev, endpoint) | USB_DIR_IN);
err = sierra_submit_rx_urbs(port, GFP_KERNEL);
if (err)
goto err_submit;
spin_lock_irq(&intfdata->susp_lock);
if (++intfdata->open_ports == 1)
serial->interface->needs_remote_wakeup = 1;
spin_unlock_irq(&intfdata->susp_lock);
usb_autopm_put_interface(serial->interface);
return 0;
err_submit:
sierra_stop_rx_urbs(port);
for (i = 0; i < portdata->num_in_urbs; i++) {
sierra_release_urb(portdata->in_urbs[i]);
portdata->in_urbs[i] = NULL;
}
return err;
}
static void sierra_dtr_rts(struct usb_serial_port *port, int on)
{
struct sierra_port_private *portdata;
portdata = usb_get_serial_port_data(port);
portdata->rts_state = on;
portdata->dtr_state = on;
sierra_send_setup(port);
}
static int sierra_startup(struct usb_serial *serial)
{
struct sierra_intf_private *intfdata;
intfdata = kzalloc(sizeof(*intfdata), GFP_KERNEL);
if (!intfdata)
return -ENOMEM;
spin_lock_init(&intfdata->susp_lock);
usb_set_serial_data(serial, intfdata);
/* Set Device mode to D0 */
sierra_set_power_state(serial->dev, 0x0000);
/* Check NMEA and set */
if (nmea)
sierra_vsc_set_nmea(serial->dev, 1);
return 0;
}
static void sierra_release(struct usb_serial *serial)
{
struct sierra_intf_private *intfdata;
intfdata = usb_get_serial_data(serial);
kfree(intfdata);
}
static int sierra_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct sierra_port_private *portdata;
const struct sierra_iface_list *himemory_list;
u8 ifnum;
portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
if (!portdata)
return -ENOMEM;
spin_lock_init(&portdata->lock);
init_usb_anchor(&portdata->active);
init_usb_anchor(&portdata->delayed);
/* Assume low memory requirements */
portdata->num_out_urbs = N_OUT_URB;
portdata->num_in_urbs = N_IN_URB;
/* Determine actual memory requirements */
if (serial->num_ports == 1) {
/* Get interface number for composite device */
ifnum = sierra_interface_num(serial);
himemory_list = &typeB_interface_list;
} else {
/* This is really the usb-serial port number of the interface
* rather than the interface number.
*/
ifnum = port->port_number;
himemory_list = &typeA_interface_list;
}
if (is_listed(ifnum, himemory_list)) {
portdata->num_out_urbs = N_OUT_URB_HM;
portdata->num_in_urbs = N_IN_URB_HM;
}
dev_dbg(&port->dev,
"Memory usage (urbs) interface #%d, in=%d, out=%d\n",
ifnum, portdata->num_in_urbs, portdata->num_out_urbs);
usb_set_serial_port_data(port, portdata);
return 0;
}
static void sierra_port_remove(struct usb_serial_port *port)
{
struct sierra_port_private *portdata;
portdata = usb_get_serial_port_data(port);
usb_set_serial_port_data(port, NULL);
kfree(portdata);
}
#ifdef CONFIG_PM
static void stop_read_write_urbs(struct usb_serial *serial)
{
int i;
struct usb_serial_port *port;
struct sierra_port_private *portdata;
/* Stop reading/writing urbs */
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
portdata = usb_get_serial_port_data(port);
if (!portdata)
continue;
sierra_stop_rx_urbs(port);
usb_kill_anchored_urbs(&portdata->active);
}
}
static int sierra_suspend(struct usb_serial *serial, pm_message_t message)
{
struct sierra_intf_private *intfdata = usb_get_serial_data(serial);
spin_lock_irq(&intfdata->susp_lock);
if (PMSG_IS_AUTO(message)) {
if (intfdata->in_flight) {
spin_unlock_irq(&intfdata->susp_lock);
return -EBUSY;
}
}
intfdata->suspended = 1;
spin_unlock_irq(&intfdata->susp_lock);
stop_read_write_urbs(serial);
return 0;
}
/* Caller must hold susp_lock. */
static int sierra_submit_delayed_urbs(struct usb_serial_port *port)
{
struct sierra_port_private *portdata = usb_get_serial_port_data(port);
struct sierra_intf_private *intfdata;
struct urb *urb;
int ec = 0;
int err;
intfdata = usb_get_serial_data(port->serial);
for (;;) {
urb = usb_get_from_anchor(&portdata->delayed);
if (!urb)
break;
usb_anchor_urb(urb, &portdata->active);
intfdata->in_flight++;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err) {
dev_err(&port->dev, "%s - submit urb failed: %d",
__func__, err);
ec++;
intfdata->in_flight--;
usb_unanchor_urb(urb);
kfree(urb->transfer_buffer);
usb_free_urb(urb);
spin_lock(&portdata->lock);
portdata->outstanding_urbs--;
spin_unlock(&portdata->lock);
}
}
if (ec)
return -EIO;
return 0;
}
static int sierra_resume(struct usb_serial *serial)
{
struct usb_serial_port *port;
struct sierra_intf_private *intfdata = usb_get_serial_data(serial);
int ec = 0;
int i, err;
spin_lock_irq(&intfdata->susp_lock);
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
if (!tty_port_initialized(&port->port))
continue;
err = sierra_submit_delayed_urbs(port);
if (err)
ec++;
err = sierra_submit_rx_urbs(port, GFP_ATOMIC);
if (err)
ec++;
}
intfdata->suspended = 0;
spin_unlock_irq(&intfdata->susp_lock);
return ec ? -EIO : 0;
}
#else
#define sierra_suspend NULL
#define sierra_resume NULL
#endif
static struct usb_serial_driver sierra_device = {
.driver = {
.owner = THIS_MODULE,
.name = "sierra",
},
.description = "Sierra USB modem",
.id_table = id_table,
.calc_num_ports = sierra_calc_num_ports,
.probe = sierra_probe,
.open = sierra_open,
.close = sierra_close,
.dtr_rts = sierra_dtr_rts,
.write = sierra_write,
.write_room = sierra_write_room,
.chars_in_buffer = sierra_chars_in_buffer,
.tiocmget = sierra_tiocmget,
.tiocmset = sierra_tiocmset,
.attach = sierra_startup,
.release = sierra_release,
.port_probe = sierra_port_probe,
.port_remove = sierra_port_remove,
.suspend = sierra_suspend,
.resume = sierra_resume,
.read_int_callback = sierra_instat_callback,
};
static struct usb_serial_driver * const serial_drivers[] = {
&sierra_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL v2");
module_param(nmea, bool, 0644);
MODULE_PARM_DESC(nmea, "NMEA streaming");
| linux-master | drivers/usb/serial/sierra.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* isdnhdlc.c -- General purpose ISDN HDLC decoder.
*
* Copyright (C)
* 2009 Karsten Keil <[email protected]>
* 2002 Wolfgang Mües <[email protected]>
* 2001 Frode Isaksen <[email protected]>
* 2001 Kai Germaschewski <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/crc-ccitt.h>
#include <linux/bitrev.h>
#include "isdnhdlc.h"
/*-------------------------------------------------------------------*/
MODULE_AUTHOR("Wolfgang Mües <[email protected]>, "
"Frode Isaksen <[email protected]>, "
"Kai Germaschewski <[email protected]>");
MODULE_DESCRIPTION("General purpose ISDN HDLC decoder");
MODULE_LICENSE("GPL");
/*-------------------------------------------------------------------*/
enum {
HDLC_FAST_IDLE, HDLC_GET_FLAG_B0, HDLC_GETFLAG_B1A6, HDLC_GETFLAG_B7,
HDLC_GET_DATA, HDLC_FAST_FLAG
};
enum {
HDLC_SEND_DATA, HDLC_SEND_CRC1, HDLC_SEND_FAST_FLAG,
HDLC_SEND_FIRST_FLAG, HDLC_SEND_CRC2, HDLC_SEND_CLOSING_FLAG,
HDLC_SEND_IDLE1, HDLC_SEND_FAST_IDLE, HDLC_SENDFLAG_B0,
HDLC_SENDFLAG_B1A6, HDLC_SENDFLAG_B7, STOPPED, HDLC_SENDFLAG_ONE
};
void isdnhdlc_rcv_init(struct isdnhdlc_vars *hdlc, u32 features)
{
memset(hdlc, 0, sizeof(struct isdnhdlc_vars));
hdlc->state = HDLC_GET_DATA;
if (features & HDLC_56KBIT)
hdlc->do_adapt56 = 1;
if (features & HDLC_BITREVERSE)
hdlc->do_bitreverse = 1;
}
EXPORT_SYMBOL(isdnhdlc_out_init);
void isdnhdlc_out_init(struct isdnhdlc_vars *hdlc, u32 features)
{
memset(hdlc, 0, sizeof(struct isdnhdlc_vars));
if (features & HDLC_DCHANNEL) {
hdlc->dchannel = 1;
hdlc->state = HDLC_SEND_FIRST_FLAG;
} else {
hdlc->dchannel = 0;
hdlc->state = HDLC_SEND_FAST_FLAG;
hdlc->ffvalue = 0x7e;
}
hdlc->cbin = 0x7e;
if (features & HDLC_56KBIT) {
hdlc->do_adapt56 = 1;
hdlc->state = HDLC_SENDFLAG_B0;
} else
hdlc->data_bits = 8;
if (features & HDLC_BITREVERSE)
hdlc->do_bitreverse = 1;
}
EXPORT_SYMBOL(isdnhdlc_rcv_init);
static int
check_frame(struct isdnhdlc_vars *hdlc)
{
int status;
if (hdlc->dstpos < 2) /* too small - framing error */
status = -HDLC_FRAMING_ERROR;
else if (hdlc->crc != 0xf0b8) /* crc error */
status = -HDLC_CRC_ERROR;
else {
/* remove CRC */
hdlc->dstpos -= 2;
/* good frame */
status = hdlc->dstpos;
}
return status;
}
/*
isdnhdlc_decode - decodes HDLC frames from a transparent bit stream.
The source buffer is scanned for valid HDLC frames looking for
flags (01111110) to indicate the start of a frame. If the start of
the frame is found, the bit stuffing is removed (0 after 5 1's).
When a new flag is found, the complete frame has been received
and the CRC is checked.
If a valid frame is found, the function returns the frame length
excluding the CRC with the bit HDLC_END_OF_FRAME set.
If the beginning of a valid frame is found, the function returns
the length.
If a framing error is found (too many 1s and not a flag) the function
returns the length with the bit HDLC_FRAMING_ERROR set.
If a CRC error is found the function returns the length with the
bit HDLC_CRC_ERROR set.
If the frame length exceeds the destination buffer size, the function
returns the length with the bit HDLC_LENGTH_ERROR set.
src - source buffer
slen - source buffer length
count - number of bytes removed (decoded) from the source buffer
dst _ destination buffer
dsize - destination buffer size
returns - number of decoded bytes in the destination buffer and status
flag.
*/
int isdnhdlc_decode(struct isdnhdlc_vars *hdlc, const u8 *src, int slen,
int *count, u8 *dst, int dsize)
{
int status = 0;
static const unsigned char fast_flag[] = {
0x00, 0x00, 0x00, 0x20, 0x30, 0x38, 0x3c, 0x3e, 0x3f
};
static const unsigned char fast_flag_value[] = {
0x00, 0x7e, 0xfc, 0xf9, 0xf3, 0xe7, 0xcf, 0x9f, 0x3f
};
static const unsigned char fast_abort[] = {
0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, 0xff
};
#define handle_fast_flag(h) \
do { \
if (h->cbin == fast_flag[h->bit_shift]) { \
h->ffvalue = fast_flag_value[h->bit_shift]; \
h->state = HDLC_FAST_FLAG; \
h->ffbit_shift = h->bit_shift; \
h->bit_shift = 1; \
} else { \
h->state = HDLC_GET_DATA; \
h->data_received = 0; \
} \
} while (0)
#define handle_abort(h) \
do { \
h->shift_reg = fast_abort[h->ffbit_shift - 1]; \
h->hdlc_bits1 = h->ffbit_shift - 2; \
if (h->hdlc_bits1 < 0) \
h->hdlc_bits1 = 0; \
h->data_bits = h->ffbit_shift - 1; \
h->state = HDLC_GET_DATA; \
h->data_received = 0; \
} while (0)
*count = slen;
while (slen > 0) {
if (hdlc->bit_shift == 0) {
/* the code is for bitreverse streams */
if (hdlc->do_bitreverse == 0)
hdlc->cbin = bitrev8(*src++);
else
hdlc->cbin = *src++;
slen--;
hdlc->bit_shift = 8;
if (hdlc->do_adapt56)
hdlc->bit_shift--;
}
switch (hdlc->state) {
case STOPPED:
return 0;
case HDLC_FAST_IDLE:
if (hdlc->cbin == 0xff) {
hdlc->bit_shift = 0;
break;
}
hdlc->state = HDLC_GET_FLAG_B0;
hdlc->hdlc_bits1 = 0;
hdlc->bit_shift = 8;
break;
case HDLC_GET_FLAG_B0:
if (!(hdlc->cbin & 0x80)) {
hdlc->state = HDLC_GETFLAG_B1A6;
hdlc->hdlc_bits1 = 0;
} else {
if ((!hdlc->do_adapt56) &&
(++hdlc->hdlc_bits1 >= 8) &&
(hdlc->bit_shift == 1))
hdlc->state = HDLC_FAST_IDLE;
}
hdlc->cbin <<= 1;
hdlc->bit_shift--;
break;
case HDLC_GETFLAG_B1A6:
if (hdlc->cbin & 0x80) {
hdlc->hdlc_bits1++;
if (hdlc->hdlc_bits1 == 6)
hdlc->state = HDLC_GETFLAG_B7;
} else
hdlc->hdlc_bits1 = 0;
hdlc->cbin <<= 1;
hdlc->bit_shift--;
break;
case HDLC_GETFLAG_B7:
if (hdlc->cbin & 0x80) {
hdlc->state = HDLC_GET_FLAG_B0;
} else {
hdlc->state = HDLC_GET_DATA;
hdlc->crc = 0xffff;
hdlc->shift_reg = 0;
hdlc->hdlc_bits1 = 0;
hdlc->data_bits = 0;
hdlc->data_received = 0;
}
hdlc->cbin <<= 1;
hdlc->bit_shift--;
break;
case HDLC_GET_DATA:
if (hdlc->cbin & 0x80) {
hdlc->hdlc_bits1++;
switch (hdlc->hdlc_bits1) {
case 6:
break;
case 7:
if (hdlc->data_received)
/* bad frame */
status = -HDLC_FRAMING_ERROR;
if (!hdlc->do_adapt56) {
if (hdlc->cbin == fast_abort
[hdlc->bit_shift + 1]) {
hdlc->state =
HDLC_FAST_IDLE;
hdlc->bit_shift = 1;
break;
}
} else
hdlc->state = HDLC_GET_FLAG_B0;
break;
default:
hdlc->shift_reg >>= 1;
hdlc->shift_reg |= 0x80;
hdlc->data_bits++;
break;
}
} else {
switch (hdlc->hdlc_bits1) {
case 5:
break;
case 6:
if (hdlc->data_received)
status = check_frame(hdlc);
hdlc->crc = 0xffff;
hdlc->shift_reg = 0;
hdlc->data_bits = 0;
if (!hdlc->do_adapt56)
handle_fast_flag(hdlc);
else {
hdlc->state = HDLC_GET_DATA;
hdlc->data_received = 0;
}
break;
default:
hdlc->shift_reg >>= 1;
hdlc->data_bits++;
break;
}
hdlc->hdlc_bits1 = 0;
}
if (status) {
hdlc->dstpos = 0;
*count -= slen;
hdlc->cbin <<= 1;
hdlc->bit_shift--;
return status;
}
if (hdlc->data_bits == 8) {
hdlc->data_bits = 0;
hdlc->data_received = 1;
hdlc->crc = crc_ccitt_byte(hdlc->crc,
hdlc->shift_reg);
/* good byte received */
if (hdlc->dstpos < dsize)
dst[hdlc->dstpos++] = hdlc->shift_reg;
else {
/* frame too long */
status = -HDLC_LENGTH_ERROR;
hdlc->dstpos = 0;
}
}
hdlc->cbin <<= 1;
hdlc->bit_shift--;
break;
case HDLC_FAST_FLAG:
if (hdlc->cbin == hdlc->ffvalue) {
hdlc->bit_shift = 0;
break;
} else {
if (hdlc->cbin == 0xff) {
hdlc->state = HDLC_FAST_IDLE;
hdlc->bit_shift = 0;
} else if (hdlc->ffbit_shift == 8) {
hdlc->state = HDLC_GETFLAG_B7;
break;
} else
handle_abort(hdlc);
}
break;
default:
break;
}
}
*count -= slen;
return 0;
}
EXPORT_SYMBOL(isdnhdlc_decode);
/*
isdnhdlc_encode - encodes HDLC frames to a transparent bit stream.
The bit stream starts with a beginning flag (01111110). After
that each byte is added to the bit stream with bit stuffing added
(0 after 5 1's).
When the last byte has been removed from the source buffer, the
CRC (2 bytes is added) and the frame terminates with the ending flag.
For the dchannel, the idle character (all 1's) is also added at the end.
If this function is called with empty source buffer (slen=0), flags or
idle character will be generated.
src - source buffer
slen - source buffer length
count - number of bytes removed (encoded) from source buffer
dst _ destination buffer
dsize - destination buffer size
returns - number of encoded bytes in the destination buffer
*/
int isdnhdlc_encode(struct isdnhdlc_vars *hdlc, const u8 *src, u16 slen,
int *count, u8 *dst, int dsize)
{
static const unsigned char xfast_flag_value[] = {
0x7e, 0x3f, 0x9f, 0xcf, 0xe7, 0xf3, 0xf9, 0xfc, 0x7e
};
int len = 0;
*count = slen;
/* special handling for one byte frames */
if ((slen == 1) && (hdlc->state == HDLC_SEND_FAST_FLAG))
hdlc->state = HDLC_SENDFLAG_ONE;
while (dsize > 0) {
if (hdlc->bit_shift == 0) {
if (slen && !hdlc->do_closing) {
hdlc->shift_reg = *src++;
slen--;
if (slen == 0)
/* closing sequence, CRC + flag(s) */
hdlc->do_closing = 1;
hdlc->bit_shift = 8;
} else {
if (hdlc->state == HDLC_SEND_DATA) {
if (hdlc->data_received) {
hdlc->state = HDLC_SEND_CRC1;
hdlc->crc ^= 0xffff;
hdlc->bit_shift = 8;
hdlc->shift_reg =
hdlc->crc & 0xff;
} else if (!hdlc->do_adapt56)
hdlc->state =
HDLC_SEND_FAST_FLAG;
else
hdlc->state =
HDLC_SENDFLAG_B0;
}
}
}
switch (hdlc->state) {
case STOPPED:
while (dsize--)
*dst++ = 0xff;
return dsize;
case HDLC_SEND_FAST_FLAG:
hdlc->do_closing = 0;
if (slen == 0) {
/* the code is for bitreverse streams */
if (hdlc->do_bitreverse == 0)
*dst++ = bitrev8(hdlc->ffvalue);
else
*dst++ = hdlc->ffvalue;
len++;
dsize--;
break;
}
fallthrough;
case HDLC_SENDFLAG_ONE:
if (hdlc->bit_shift == 8) {
hdlc->cbin = hdlc->ffvalue >>
(8 - hdlc->data_bits);
hdlc->state = HDLC_SEND_DATA;
hdlc->crc = 0xffff;
hdlc->hdlc_bits1 = 0;
hdlc->data_received = 1;
}
break;
case HDLC_SENDFLAG_B0:
hdlc->do_closing = 0;
hdlc->cbin <<= 1;
hdlc->data_bits++;
hdlc->hdlc_bits1 = 0;
hdlc->state = HDLC_SENDFLAG_B1A6;
break;
case HDLC_SENDFLAG_B1A6:
hdlc->cbin <<= 1;
hdlc->data_bits++;
hdlc->cbin++;
if (++hdlc->hdlc_bits1 == 6)
hdlc->state = HDLC_SENDFLAG_B7;
break;
case HDLC_SENDFLAG_B7:
hdlc->cbin <<= 1;
hdlc->data_bits++;
if (slen == 0) {
hdlc->state = HDLC_SENDFLAG_B0;
break;
}
if (hdlc->bit_shift == 8) {
hdlc->state = HDLC_SEND_DATA;
hdlc->crc = 0xffff;
hdlc->hdlc_bits1 = 0;
hdlc->data_received = 1;
}
break;
case HDLC_SEND_FIRST_FLAG:
hdlc->data_received = 1;
if (hdlc->data_bits == 8) {
hdlc->state = HDLC_SEND_DATA;
hdlc->crc = 0xffff;
hdlc->hdlc_bits1 = 0;
break;
}
hdlc->cbin <<= 1;
hdlc->data_bits++;
if (hdlc->shift_reg & 0x01)
hdlc->cbin++;
hdlc->shift_reg >>= 1;
hdlc->bit_shift--;
if (hdlc->bit_shift == 0) {
hdlc->state = HDLC_SEND_DATA;
hdlc->crc = 0xffff;
hdlc->hdlc_bits1 = 0;
}
break;
case HDLC_SEND_DATA:
hdlc->cbin <<= 1;
hdlc->data_bits++;
if (hdlc->hdlc_bits1 == 5) {
hdlc->hdlc_bits1 = 0;
break;
}
if (hdlc->bit_shift == 8)
hdlc->crc = crc_ccitt_byte(hdlc->crc,
hdlc->shift_reg);
if (hdlc->shift_reg & 0x01) {
hdlc->hdlc_bits1++;
hdlc->cbin++;
hdlc->shift_reg >>= 1;
hdlc->bit_shift--;
} else {
hdlc->hdlc_bits1 = 0;
hdlc->shift_reg >>= 1;
hdlc->bit_shift--;
}
break;
case HDLC_SEND_CRC1:
hdlc->cbin <<= 1;
hdlc->data_bits++;
if (hdlc->hdlc_bits1 == 5) {
hdlc->hdlc_bits1 = 0;
break;
}
if (hdlc->shift_reg & 0x01) {
hdlc->hdlc_bits1++;
hdlc->cbin++;
hdlc->shift_reg >>= 1;
hdlc->bit_shift--;
} else {
hdlc->hdlc_bits1 = 0;
hdlc->shift_reg >>= 1;
hdlc->bit_shift--;
}
if (hdlc->bit_shift == 0) {
hdlc->shift_reg = (hdlc->crc >> 8);
hdlc->state = HDLC_SEND_CRC2;
hdlc->bit_shift = 8;
}
break;
case HDLC_SEND_CRC2:
hdlc->cbin <<= 1;
hdlc->data_bits++;
if (hdlc->hdlc_bits1 == 5) {
hdlc->hdlc_bits1 = 0;
break;
}
if (hdlc->shift_reg & 0x01) {
hdlc->hdlc_bits1++;
hdlc->cbin++;
hdlc->shift_reg >>= 1;
hdlc->bit_shift--;
} else {
hdlc->hdlc_bits1 = 0;
hdlc->shift_reg >>= 1;
hdlc->bit_shift--;
}
if (hdlc->bit_shift == 0) {
hdlc->shift_reg = 0x7e;
hdlc->state = HDLC_SEND_CLOSING_FLAG;
hdlc->bit_shift = 8;
}
break;
case HDLC_SEND_CLOSING_FLAG:
hdlc->cbin <<= 1;
hdlc->data_bits++;
if (hdlc->hdlc_bits1 == 5) {
hdlc->hdlc_bits1 = 0;
break;
}
if (hdlc->shift_reg & 0x01)
hdlc->cbin++;
hdlc->shift_reg >>= 1;
hdlc->bit_shift--;
if (hdlc->bit_shift == 0) {
hdlc->ffvalue =
xfast_flag_value[hdlc->data_bits];
if (hdlc->dchannel) {
hdlc->ffvalue = 0x7e;
hdlc->state = HDLC_SEND_IDLE1;
hdlc->bit_shift = 8-hdlc->data_bits;
if (hdlc->bit_shift == 0)
hdlc->state =
HDLC_SEND_FAST_IDLE;
} else {
if (!hdlc->do_adapt56) {
hdlc->state =
HDLC_SEND_FAST_FLAG;
hdlc->data_received = 0;
} else {
hdlc->state = HDLC_SENDFLAG_B0;
hdlc->data_received = 0;
}
/* Finished this frame, send flags */
if (dsize > 1)
dsize = 1;
}
}
break;
case HDLC_SEND_IDLE1:
hdlc->do_closing = 0;
hdlc->cbin <<= 1;
hdlc->cbin++;
hdlc->data_bits++;
hdlc->bit_shift--;
if (hdlc->bit_shift == 0) {
hdlc->state = HDLC_SEND_FAST_IDLE;
hdlc->bit_shift = 0;
}
break;
case HDLC_SEND_FAST_IDLE:
hdlc->do_closing = 0;
hdlc->cbin = 0xff;
hdlc->data_bits = 8;
if (hdlc->bit_shift == 8) {
hdlc->cbin = 0x7e;
hdlc->state = HDLC_SEND_FIRST_FLAG;
} else {
/* the code is for bitreverse streams */
if (hdlc->do_bitreverse == 0)
*dst++ = bitrev8(hdlc->cbin);
else
*dst++ = hdlc->cbin;
hdlc->bit_shift = 0;
hdlc->data_bits = 0;
len++;
dsize = 0;
}
break;
default:
break;
}
if (hdlc->do_adapt56) {
if (hdlc->data_bits == 7) {
hdlc->cbin <<= 1;
hdlc->cbin++;
hdlc->data_bits++;
}
}
if (hdlc->data_bits == 8) {
/* the code is for bitreverse streams */
if (hdlc->do_bitreverse == 0)
*dst++ = bitrev8(hdlc->cbin);
else
*dst++ = hdlc->cbin;
hdlc->data_bits = 0;
len++;
dsize--;
}
}
*count -= slen;
return len;
}
EXPORT_SYMBOL(isdnhdlc_encode);
| linux-master | drivers/isdn/hardware/mISDN/isdnhdlc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* isac.c ISAC specific routines
*
* Author Karsten Keil <[email protected]>
*
* Copyright 2009 by Karsten Keil <[email protected]>
*/
#include <linux/irqreturn.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mISDNhw.h>
#include "ipac.h"
#define DBUSY_TIMER_VALUE 80
#define ARCOFI_USE 1
#define ISAC_REV "2.0"
MODULE_AUTHOR("Karsten Keil");
MODULE_VERSION(ISAC_REV);
MODULE_LICENSE("GPL v2");
#define ReadISAC(is, o) (is->read_reg(is->dch.hw, o + is->off))
#define WriteISAC(is, o, v) (is->write_reg(is->dch.hw, o + is->off, v))
#define ReadHSCX(h, o) (h->ip->read_reg(h->ip->hw, h->off + o))
#define WriteHSCX(h, o, v) (h->ip->write_reg(h->ip->hw, h->off + o, v))
#define ReadIPAC(ip, o) (ip->read_reg(ip->hw, o))
#define WriteIPAC(ip, o, v) (ip->write_reg(ip->hw, o, v))
static inline void
ph_command(struct isac_hw *isac, u8 command)
{
pr_debug("%s: ph_command %x\n", isac->name, command);
if (isac->type & IPAC_TYPE_ISACX)
WriteISAC(isac, ISACX_CIX0, (command << 4) | 0xE);
else
WriteISAC(isac, ISAC_CIX0, (command << 2) | 3);
}
static void
isac_ph_state_change(struct isac_hw *isac)
{
switch (isac->state) {
case (ISAC_IND_RS):
case (ISAC_IND_EI):
ph_command(isac, ISAC_CMD_DUI);
}
schedule_event(&isac->dch, FLG_PHCHANGE);
}
static void
isac_ph_state_bh(struct dchannel *dch)
{
struct isac_hw *isac = container_of(dch, struct isac_hw, dch);
switch (isac->state) {
case ISAC_IND_RS:
case ISAC_IND_EI:
dch->state = 0;
l1_event(dch->l1, HW_RESET_IND);
break;
case ISAC_IND_DID:
dch->state = 3;
l1_event(dch->l1, HW_DEACT_CNF);
break;
case ISAC_IND_DR:
case ISAC_IND_DR6:
dch->state = 3;
l1_event(dch->l1, HW_DEACT_IND);
break;
case ISAC_IND_PU:
dch->state = 4;
l1_event(dch->l1, HW_POWERUP_IND);
break;
case ISAC_IND_RSY:
if (dch->state <= 5) {
dch->state = 5;
l1_event(dch->l1, ANYSIGNAL);
} else {
dch->state = 8;
l1_event(dch->l1, LOSTFRAMING);
}
break;
case ISAC_IND_ARD:
dch->state = 6;
l1_event(dch->l1, INFO2);
break;
case ISAC_IND_AI8:
dch->state = 7;
l1_event(dch->l1, INFO4_P8);
break;
case ISAC_IND_AI10:
dch->state = 7;
l1_event(dch->l1, INFO4_P10);
break;
}
pr_debug("%s: TE newstate %x\n", isac->name, dch->state);
}
static void
isac_empty_fifo(struct isac_hw *isac, int count)
{
u8 *ptr;
pr_debug("%s: %s %d\n", isac->name, __func__, count);
if (!isac->dch.rx_skb) {
isac->dch.rx_skb = mI_alloc_skb(isac->dch.maxlen, GFP_ATOMIC);
if (!isac->dch.rx_skb) {
pr_info("%s: D receive out of memory\n", isac->name);
WriteISAC(isac, ISAC_CMDR, 0x80);
return;
}
}
if ((isac->dch.rx_skb->len + count) >= isac->dch.maxlen) {
pr_debug("%s: %s overrun %d\n", isac->name, __func__,
isac->dch.rx_skb->len + count);
WriteISAC(isac, ISAC_CMDR, 0x80);
return;
}
ptr = skb_put(isac->dch.rx_skb, count);
isac->read_fifo(isac->dch.hw, isac->off, ptr, count);
WriteISAC(isac, ISAC_CMDR, 0x80);
if (isac->dch.debug & DEBUG_HW_DFIFO) {
char pfx[MISDN_MAX_IDLEN + 16];
snprintf(pfx, MISDN_MAX_IDLEN + 15, "D-recv %s %d ",
isac->name, count);
print_hex_dump_bytes(pfx, DUMP_PREFIX_OFFSET, ptr, count);
}
}
static void
isac_fill_fifo(struct isac_hw *isac)
{
int count, more;
u8 *ptr;
if (!isac->dch.tx_skb)
return;
count = isac->dch.tx_skb->len - isac->dch.tx_idx;
if (count <= 0)
return;
more = 0;
if (count > 32) {
more = !0;
count = 32;
}
pr_debug("%s: %s %d\n", isac->name, __func__, count);
ptr = isac->dch.tx_skb->data + isac->dch.tx_idx;
isac->dch.tx_idx += count;
isac->write_fifo(isac->dch.hw, isac->off, ptr, count);
WriteISAC(isac, ISAC_CMDR, more ? 0x8 : 0xa);
if (test_and_set_bit(FLG_BUSY_TIMER, &isac->dch.Flags)) {
pr_debug("%s: %s dbusytimer running\n", isac->name, __func__);
del_timer(&isac->dch.timer);
}
isac->dch.timer.expires = jiffies + ((DBUSY_TIMER_VALUE * HZ)/1000);
add_timer(&isac->dch.timer);
if (isac->dch.debug & DEBUG_HW_DFIFO) {
char pfx[MISDN_MAX_IDLEN + 16];
snprintf(pfx, MISDN_MAX_IDLEN + 15, "D-send %s %d ",
isac->name, count);
print_hex_dump_bytes(pfx, DUMP_PREFIX_OFFSET, ptr, count);
}
}
static void
isac_rme_irq(struct isac_hw *isac)
{
u8 val, count;
val = ReadISAC(isac, ISAC_RSTA);
if ((val & 0x70) != 0x20) {
if (val & 0x40) {
pr_debug("%s: ISAC RDO\n", isac->name);
#ifdef ERROR_STATISTIC
isac->dch.err_rx++;
#endif
}
if (!(val & 0x20)) {
pr_debug("%s: ISAC CRC error\n", isac->name);
#ifdef ERROR_STATISTIC
isac->dch.err_crc++;
#endif
}
WriteISAC(isac, ISAC_CMDR, 0x80);
dev_kfree_skb(isac->dch.rx_skb);
isac->dch.rx_skb = NULL;
} else {
count = ReadISAC(isac, ISAC_RBCL) & 0x1f;
if (count == 0)
count = 32;
isac_empty_fifo(isac, count);
recv_Dchannel(&isac->dch);
}
}
static void
isac_xpr_irq(struct isac_hw *isac)
{
if (test_and_clear_bit(FLG_BUSY_TIMER, &isac->dch.Flags))
del_timer(&isac->dch.timer);
if (isac->dch.tx_skb && isac->dch.tx_idx < isac->dch.tx_skb->len) {
isac_fill_fifo(isac);
} else {
dev_kfree_skb(isac->dch.tx_skb);
if (get_next_dframe(&isac->dch))
isac_fill_fifo(isac);
}
}
static void
isac_retransmit(struct isac_hw *isac)
{
if (test_and_clear_bit(FLG_BUSY_TIMER, &isac->dch.Flags))
del_timer(&isac->dch.timer);
if (test_bit(FLG_TX_BUSY, &isac->dch.Flags)) {
/* Restart frame */
isac->dch.tx_idx = 0;
isac_fill_fifo(isac);
} else if (isac->dch.tx_skb) { /* should not happen */
pr_info("%s: tx_skb exist but not busy\n", isac->name);
test_and_set_bit(FLG_TX_BUSY, &isac->dch.Flags);
isac->dch.tx_idx = 0;
isac_fill_fifo(isac);
} else {
pr_info("%s: ISAC XDU no TX_BUSY\n", isac->name);
if (get_next_dframe(&isac->dch))
isac_fill_fifo(isac);
}
}
static void
isac_mos_irq(struct isac_hw *isac)
{
u8 val;
int ret;
val = ReadISAC(isac, ISAC_MOSR);
pr_debug("%s: ISAC MOSR %02x\n", isac->name, val);
#if ARCOFI_USE
if (val & 0x08) {
if (!isac->mon_rx) {
isac->mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC);
if (!isac->mon_rx) {
pr_info("%s: ISAC MON RX out of memory!\n",
isac->name);
isac->mocr &= 0xf0;
isac->mocr |= 0x0a;
WriteISAC(isac, ISAC_MOCR, isac->mocr);
goto afterMONR0;
} else
isac->mon_rxp = 0;
}
if (isac->mon_rxp >= MAX_MON_FRAME) {
isac->mocr &= 0xf0;
isac->mocr |= 0x0a;
WriteISAC(isac, ISAC_MOCR, isac->mocr);
isac->mon_rxp = 0;
pr_debug("%s: ISAC MON RX overflow!\n", isac->name);
goto afterMONR0;
}
isac->mon_rx[isac->mon_rxp++] = ReadISAC(isac, ISAC_MOR0);
pr_debug("%s: ISAC MOR0 %02x\n", isac->name,
isac->mon_rx[isac->mon_rxp - 1]);
if (isac->mon_rxp == 1) {
isac->mocr |= 0x04;
WriteISAC(isac, ISAC_MOCR, isac->mocr);
}
}
afterMONR0:
if (val & 0x80) {
if (!isac->mon_rx) {
isac->mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC);
if (!isac->mon_rx) {
pr_info("%s: ISAC MON RX out of memory!\n",
isac->name);
isac->mocr &= 0x0f;
isac->mocr |= 0xa0;
WriteISAC(isac, ISAC_MOCR, isac->mocr);
goto afterMONR1;
} else
isac->mon_rxp = 0;
}
if (isac->mon_rxp >= MAX_MON_FRAME) {
isac->mocr &= 0x0f;
isac->mocr |= 0xa0;
WriteISAC(isac, ISAC_MOCR, isac->mocr);
isac->mon_rxp = 0;
pr_debug("%s: ISAC MON RX overflow!\n", isac->name);
goto afterMONR1;
}
isac->mon_rx[isac->mon_rxp++] = ReadISAC(isac, ISAC_MOR1);
pr_debug("%s: ISAC MOR1 %02x\n", isac->name,
isac->mon_rx[isac->mon_rxp - 1]);
isac->mocr |= 0x40;
WriteISAC(isac, ISAC_MOCR, isac->mocr);
}
afterMONR1:
if (val & 0x04) {
isac->mocr &= 0xf0;
WriteISAC(isac, ISAC_MOCR, isac->mocr);
isac->mocr |= 0x0a;
WriteISAC(isac, ISAC_MOCR, isac->mocr);
if (isac->monitor) {
ret = isac->monitor(isac->dch.hw, MONITOR_RX_0,
isac->mon_rx, isac->mon_rxp);
if (ret)
kfree(isac->mon_rx);
} else {
pr_info("%s: MONITOR 0 received %d but no user\n",
isac->name, isac->mon_rxp);
kfree(isac->mon_rx);
}
isac->mon_rx = NULL;
isac->mon_rxp = 0;
}
if (val & 0x40) {
isac->mocr &= 0x0f;
WriteISAC(isac, ISAC_MOCR, isac->mocr);
isac->mocr |= 0xa0;
WriteISAC(isac, ISAC_MOCR, isac->mocr);
if (isac->monitor) {
ret = isac->monitor(isac->dch.hw, MONITOR_RX_1,
isac->mon_rx, isac->mon_rxp);
if (ret)
kfree(isac->mon_rx);
} else {
pr_info("%s: MONITOR 1 received %d but no user\n",
isac->name, isac->mon_rxp);
kfree(isac->mon_rx);
}
isac->mon_rx = NULL;
isac->mon_rxp = 0;
}
if (val & 0x02) {
if ((!isac->mon_tx) || (isac->mon_txc &&
(isac->mon_txp >= isac->mon_txc) && !(val & 0x08))) {
isac->mocr &= 0xf0;
WriteISAC(isac, ISAC_MOCR, isac->mocr);
isac->mocr |= 0x0a;
WriteISAC(isac, ISAC_MOCR, isac->mocr);
if (isac->mon_txc && (isac->mon_txp >= isac->mon_txc)) {
if (isac->monitor)
isac->monitor(isac->dch.hw,
MONITOR_TX_0, NULL, 0);
}
kfree(isac->mon_tx);
isac->mon_tx = NULL;
isac->mon_txc = 0;
isac->mon_txp = 0;
goto AfterMOX0;
}
if (isac->mon_txc && (isac->mon_txp >= isac->mon_txc)) {
if (isac->monitor)
isac->monitor(isac->dch.hw,
MONITOR_TX_0, NULL, 0);
kfree(isac->mon_tx);
isac->mon_tx = NULL;
isac->mon_txc = 0;
isac->mon_txp = 0;
goto AfterMOX0;
}
WriteISAC(isac, ISAC_MOX0, isac->mon_tx[isac->mon_txp++]);
pr_debug("%s: ISAC %02x -> MOX0\n", isac->name,
isac->mon_tx[isac->mon_txp - 1]);
}
AfterMOX0:
if (val & 0x20) {
if ((!isac->mon_tx) || (isac->mon_txc &&
(isac->mon_txp >= isac->mon_txc) && !(val & 0x80))) {
isac->mocr &= 0x0f;
WriteISAC(isac, ISAC_MOCR, isac->mocr);
isac->mocr |= 0xa0;
WriteISAC(isac, ISAC_MOCR, isac->mocr);
if (isac->mon_txc && (isac->mon_txp >= isac->mon_txc)) {
if (isac->monitor)
isac->monitor(isac->dch.hw,
MONITOR_TX_1, NULL, 0);
}
kfree(isac->mon_tx);
isac->mon_tx = NULL;
isac->mon_txc = 0;
isac->mon_txp = 0;
goto AfterMOX1;
}
if (isac->mon_txc && (isac->mon_txp >= isac->mon_txc)) {
if (isac->monitor)
isac->monitor(isac->dch.hw,
MONITOR_TX_1, NULL, 0);
kfree(isac->mon_tx);
isac->mon_tx = NULL;
isac->mon_txc = 0;
isac->mon_txp = 0;
goto AfterMOX1;
}
WriteISAC(isac, ISAC_MOX1, isac->mon_tx[isac->mon_txp++]);
pr_debug("%s: ISAC %02x -> MOX1\n", isac->name,
isac->mon_tx[isac->mon_txp - 1]);
}
AfterMOX1:
val = 0; /* dummy to avoid warning */
#endif
}
static void
isac_cisq_irq(struct isac_hw *isac) {
u8 val;
val = ReadISAC(isac, ISAC_CIR0);
pr_debug("%s: ISAC CIR0 %02X\n", isac->name, val);
if (val & 2) {
pr_debug("%s: ph_state change %x->%x\n", isac->name,
isac->state, (val >> 2) & 0xf);
isac->state = (val >> 2) & 0xf;
isac_ph_state_change(isac);
}
if (val & 1) {
val = ReadISAC(isac, ISAC_CIR1);
pr_debug("%s: ISAC CIR1 %02X\n", isac->name, val);
}
}
static void
isacsx_cic_irq(struct isac_hw *isac)
{
u8 val;
val = ReadISAC(isac, ISACX_CIR0);
pr_debug("%s: ISACX CIR0 %02X\n", isac->name, val);
if (val & ISACX_CIR0_CIC0) {
pr_debug("%s: ph_state change %x->%x\n", isac->name,
isac->state, val >> 4);
isac->state = val >> 4;
isac_ph_state_change(isac);
}
}
static void
isacsx_rme_irq(struct isac_hw *isac)
{
int count;
u8 val;
val = ReadISAC(isac, ISACX_RSTAD);
if ((val & (ISACX_RSTAD_VFR |
ISACX_RSTAD_RDO |
ISACX_RSTAD_CRC |
ISACX_RSTAD_RAB))
!= (ISACX_RSTAD_VFR | ISACX_RSTAD_CRC)) {
pr_debug("%s: RSTAD %#x, dropped\n", isac->name, val);
#ifdef ERROR_STATISTIC
if (val & ISACX_RSTAD_CRC)
isac->dch.err_rx++;
else
isac->dch.err_crc++;
#endif
WriteISAC(isac, ISACX_CMDRD, ISACX_CMDRD_RMC);
dev_kfree_skb(isac->dch.rx_skb);
isac->dch.rx_skb = NULL;
} else {
count = ReadISAC(isac, ISACX_RBCLD) & 0x1f;
if (count == 0)
count = 32;
isac_empty_fifo(isac, count);
if (isac->dch.rx_skb) {
skb_trim(isac->dch.rx_skb, isac->dch.rx_skb->len - 1);
pr_debug("%s: dchannel received %d\n", isac->name,
isac->dch.rx_skb->len);
recv_Dchannel(&isac->dch);
}
}
}
irqreturn_t
mISDNisac_irq(struct isac_hw *isac, u8 val)
{
if (unlikely(!val))
return IRQ_NONE;
pr_debug("%s: ISAC interrupt %02x\n", isac->name, val);
if (isac->type & IPAC_TYPE_ISACX) {
if (val & ISACX__CIC)
isacsx_cic_irq(isac);
if (val & ISACX__ICD) {
val = ReadISAC(isac, ISACX_ISTAD);
pr_debug("%s: ISTAD %02x\n", isac->name, val);
if (val & ISACX_D_XDU) {
pr_debug("%s: ISAC XDU\n", isac->name);
#ifdef ERROR_STATISTIC
isac->dch.err_tx++;
#endif
isac_retransmit(isac);
}
if (val & ISACX_D_XMR) {
pr_debug("%s: ISAC XMR\n", isac->name);
#ifdef ERROR_STATISTIC
isac->dch.err_tx++;
#endif
isac_retransmit(isac);
}
if (val & ISACX_D_XPR)
isac_xpr_irq(isac);
if (val & ISACX_D_RFO) {
pr_debug("%s: ISAC RFO\n", isac->name);
WriteISAC(isac, ISACX_CMDRD, ISACX_CMDRD_RMC);
}
if (val & ISACX_D_RME)
isacsx_rme_irq(isac);
if (val & ISACX_D_RPF)
isac_empty_fifo(isac, 0x20);
}
} else {
if (val & 0x80) /* RME */
isac_rme_irq(isac);
if (val & 0x40) /* RPF */
isac_empty_fifo(isac, 32);
if (val & 0x10) /* XPR */
isac_xpr_irq(isac);
if (val & 0x04) /* CISQ */
isac_cisq_irq(isac);
if (val & 0x20) /* RSC - never */
pr_debug("%s: ISAC RSC interrupt\n", isac->name);
if (val & 0x02) /* SIN - never */
pr_debug("%s: ISAC SIN interrupt\n", isac->name);
if (val & 0x01) { /* EXI */
val = ReadISAC(isac, ISAC_EXIR);
pr_debug("%s: ISAC EXIR %02x\n", isac->name, val);
if (val & 0x80) /* XMR */
pr_debug("%s: ISAC XMR\n", isac->name);
if (val & 0x40) { /* XDU */
pr_debug("%s: ISAC XDU\n", isac->name);
#ifdef ERROR_STATISTIC
isac->dch.err_tx++;
#endif
isac_retransmit(isac);
}
if (val & 0x04) /* MOS */
isac_mos_irq(isac);
}
}
return IRQ_HANDLED;
}
EXPORT_SYMBOL(mISDNisac_irq);
static int
isac_l1hw(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
struct dchannel *dch = container_of(dev, struct dchannel, dev);
struct isac_hw *isac = container_of(dch, struct isac_hw, dch);
int ret = -EINVAL;
struct mISDNhead *hh = mISDN_HEAD_P(skb);
u32 id;
u_long flags;
switch (hh->prim) {
case PH_DATA_REQ:
spin_lock_irqsave(isac->hwlock, flags);
ret = dchannel_senddata(dch, skb);
if (ret > 0) { /* direct TX */
id = hh->id; /* skb can be freed */
isac_fill_fifo(isac);
ret = 0;
spin_unlock_irqrestore(isac->hwlock, flags);
queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
} else
spin_unlock_irqrestore(isac->hwlock, flags);
return ret;
case PH_ACTIVATE_REQ:
ret = l1_event(dch->l1, hh->prim);
break;
case PH_DEACTIVATE_REQ:
test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
ret = l1_event(dch->l1, hh->prim);
break;
}
if (!ret)
dev_kfree_skb(skb);
return ret;
}
static int
isac_ctrl(struct isac_hw *isac, u32 cmd, unsigned long para)
{
u8 tl = 0;
unsigned long flags;
int ret = 0;
switch (cmd) {
case HW_TESTLOOP:
spin_lock_irqsave(isac->hwlock, flags);
if (!(isac->type & IPAC_TYPE_ISACX)) {
/* TODO: implement for IPAC_TYPE_ISACX */
if (para & 1) /* B1 */
tl |= 0x0c;
else if (para & 2) /* B2 */
tl |= 0x3;
/* we only support IOM2 mode */
WriteISAC(isac, ISAC_SPCR, tl);
if (tl)
WriteISAC(isac, ISAC_ADF1, 0x8);
else
WriteISAC(isac, ISAC_ADF1, 0x0);
}
spin_unlock_irqrestore(isac->hwlock, flags);
break;
case HW_TIMER3_VALUE:
ret = l1_event(isac->dch.l1, HW_TIMER3_VALUE | (para & 0xff));
break;
default:
pr_debug("%s: %s unknown command %x %lx\n", isac->name,
__func__, cmd, para);
ret = -1;
}
return ret;
}
static int
isac_l1cmd(struct dchannel *dch, u32 cmd)
{
struct isac_hw *isac = container_of(dch, struct isac_hw, dch);
u_long flags;
pr_debug("%s: cmd(%x) state(%02x)\n", isac->name, cmd, isac->state);
switch (cmd) {
case INFO3_P8:
spin_lock_irqsave(isac->hwlock, flags);
ph_command(isac, ISAC_CMD_AR8);
spin_unlock_irqrestore(isac->hwlock, flags);
break;
case INFO3_P10:
spin_lock_irqsave(isac->hwlock, flags);
ph_command(isac, ISAC_CMD_AR10);
spin_unlock_irqrestore(isac->hwlock, flags);
break;
case HW_RESET_REQ:
spin_lock_irqsave(isac->hwlock, flags);
if ((isac->state == ISAC_IND_EI) ||
(isac->state == ISAC_IND_DR) ||
(isac->state == ISAC_IND_DR6) ||
(isac->state == ISAC_IND_RS))
ph_command(isac, ISAC_CMD_TIM);
else
ph_command(isac, ISAC_CMD_RS);
spin_unlock_irqrestore(isac->hwlock, flags);
break;
case HW_DEACT_REQ:
skb_queue_purge(&dch->squeue);
if (dch->tx_skb) {
dev_kfree_skb(dch->tx_skb);
dch->tx_skb = NULL;
}
dch->tx_idx = 0;
if (dch->rx_skb) {
dev_kfree_skb(dch->rx_skb);
dch->rx_skb = NULL;
}
test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
del_timer(&dch->timer);
break;
case HW_POWERUP_REQ:
spin_lock_irqsave(isac->hwlock, flags);
ph_command(isac, ISAC_CMD_TIM);
spin_unlock_irqrestore(isac->hwlock, flags);
break;
case PH_ACTIVATE_IND:
test_and_set_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
GFP_ATOMIC);
break;
case PH_DEACTIVATE_IND:
test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
GFP_ATOMIC);
break;
default:
pr_debug("%s: %s unknown command %x\n", isac->name,
__func__, cmd);
return -1;
}
return 0;
}
static void
isac_release(struct isac_hw *isac)
{
if (isac->type & IPAC_TYPE_ISACX)
WriteISAC(isac, ISACX_MASK, 0xff);
else if (isac->type != 0)
WriteISAC(isac, ISAC_MASK, 0xff);
if (isac->dch.timer.function != NULL) {
del_timer(&isac->dch.timer);
isac->dch.timer.function = NULL;
}
kfree(isac->mon_rx);
isac->mon_rx = NULL;
kfree(isac->mon_tx);
isac->mon_tx = NULL;
if (isac->dch.l1)
l1_event(isac->dch.l1, CLOSE_CHANNEL);
mISDN_freedchannel(&isac->dch);
}
static void
dbusy_timer_handler(struct timer_list *t)
{
struct isac_hw *isac = from_timer(isac, t, dch.timer);
int rbch, star;
u_long flags;
if (test_bit(FLG_BUSY_TIMER, &isac->dch.Flags)) {
spin_lock_irqsave(isac->hwlock, flags);
rbch = ReadISAC(isac, ISAC_RBCH);
star = ReadISAC(isac, ISAC_STAR);
pr_debug("%s: D-Channel Busy RBCH %02x STAR %02x\n",
isac->name, rbch, star);
if (rbch & ISAC_RBCH_XAC) /* D-Channel Busy */
test_and_set_bit(FLG_L1_BUSY, &isac->dch.Flags);
else {
/* discard frame; reset transceiver */
test_and_clear_bit(FLG_BUSY_TIMER, &isac->dch.Flags);
if (isac->dch.tx_idx)
isac->dch.tx_idx = 0;
else
pr_info("%s: ISAC D-Channel Busy no tx_idx\n",
isac->name);
/* Transmitter reset */
WriteISAC(isac, ISAC_CMDR, 0x01);
}
spin_unlock_irqrestore(isac->hwlock, flags);
}
}
static int
open_dchannel_caller(struct isac_hw *isac, struct channel_req *rq, void *caller)
{
pr_debug("%s: %s dev(%d) open from %p\n", isac->name, __func__,
isac->dch.dev.id, caller);
if (rq->protocol != ISDN_P_TE_S0)
return -EINVAL;
if (rq->adr.channel == 1)
/* E-Channel not supported */
return -EINVAL;
rq->ch = &isac->dch.dev.D;
rq->ch->protocol = rq->protocol;
if (isac->dch.state == 7)
_queue_data(rq->ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
0, NULL, GFP_KERNEL);
return 0;
}
static int
open_dchannel(struct isac_hw *isac, struct channel_req *rq)
{
return open_dchannel_caller(isac, rq, __builtin_return_address(0));
}
static const char *ISACVer[] =
{"2086/2186 V1.1", "2085 B1", "2085 B2",
"2085 V2.3"};
static int
isac_init(struct isac_hw *isac)
{
u8 val;
int err = 0;
if (!isac->dch.l1) {
err = create_l1(&isac->dch, isac_l1cmd);
if (err)
return err;
}
isac->mon_tx = NULL;
isac->mon_rx = NULL;
timer_setup(&isac->dch.timer, dbusy_timer_handler, 0);
isac->mocr = 0xaa;
if (isac->type & IPAC_TYPE_ISACX) {
/* Disable all IRQ */
WriteISAC(isac, ISACX_MASK, 0xff);
val = ReadISAC(isac, ISACX_STARD);
pr_debug("%s: ISACX STARD %x\n", isac->name, val);
val = ReadISAC(isac, ISACX_ISTAD);
pr_debug("%s: ISACX ISTAD %x\n", isac->name, val);
val = ReadISAC(isac, ISACX_ISTA);
pr_debug("%s: ISACX ISTA %x\n", isac->name, val);
/* clear LDD */
WriteISAC(isac, ISACX_TR_CONF0, 0x00);
/* enable transmitter */
WriteISAC(isac, ISACX_TR_CONF2, 0x00);
/* transparent mode 0, RAC, stop/go */
WriteISAC(isac, ISACX_MODED, 0xc9);
/* all HDLC IRQ unmasked */
val = ReadISAC(isac, ISACX_ID);
if (isac->dch.debug & DEBUG_HW)
pr_notice("%s: ISACX Design ID %x\n",
isac->name, val & 0x3f);
val = ReadISAC(isac, ISACX_CIR0);
pr_debug("%s: ISACX CIR0 %02X\n", isac->name, val);
isac->state = val >> 4;
isac_ph_state_change(isac);
ph_command(isac, ISAC_CMD_RS);
WriteISAC(isac, ISACX_MASK, IPACX__ON);
WriteISAC(isac, ISACX_MASKD, 0x00);
} else { /* old isac */
WriteISAC(isac, ISAC_MASK, 0xff);
val = ReadISAC(isac, ISAC_STAR);
pr_debug("%s: ISAC STAR %x\n", isac->name, val);
val = ReadISAC(isac, ISAC_MODE);
pr_debug("%s: ISAC MODE %x\n", isac->name, val);
val = ReadISAC(isac, ISAC_ADF2);
pr_debug("%s: ISAC ADF2 %x\n", isac->name, val);
val = ReadISAC(isac, ISAC_ISTA);
pr_debug("%s: ISAC ISTA %x\n", isac->name, val);
if (val & 0x01) {
val = ReadISAC(isac, ISAC_EXIR);
pr_debug("%s: ISAC EXIR %x\n", isac->name, val);
}
val = ReadISAC(isac, ISAC_RBCH);
if (isac->dch.debug & DEBUG_HW)
pr_notice("%s: ISAC version (%x): %s\n", isac->name,
val, ISACVer[(val >> 5) & 3]);
isac->type |= ((val >> 5) & 3);
if (!isac->adf2)
isac->adf2 = 0x80;
if (!(isac->adf2 & 0x80)) { /* only IOM 2 Mode */
pr_info("%s: only support IOM2 mode but adf2=%02x\n",
isac->name, isac->adf2);
isac_release(isac);
return -EINVAL;
}
WriteISAC(isac, ISAC_ADF2, isac->adf2);
WriteISAC(isac, ISAC_SQXR, 0x2f);
WriteISAC(isac, ISAC_SPCR, 0x00);
WriteISAC(isac, ISAC_STCR, 0x70);
WriteISAC(isac, ISAC_MODE, 0xc9);
WriteISAC(isac, ISAC_TIMR, 0x00);
WriteISAC(isac, ISAC_ADF1, 0x00);
val = ReadISAC(isac, ISAC_CIR0);
pr_debug("%s: ISAC CIR0 %x\n", isac->name, val);
isac->state = (val >> 2) & 0xf;
isac_ph_state_change(isac);
ph_command(isac, ISAC_CMD_RS);
WriteISAC(isac, ISAC_MASK, 0);
}
return err;
}
int
mISDNisac_init(struct isac_hw *isac, void *hw)
{
mISDN_initdchannel(&isac->dch, MAX_DFRAME_LEN_L1, isac_ph_state_bh);
isac->dch.hw = hw;
isac->dch.dev.D.send = isac_l1hw;
isac->init = isac_init;
isac->release = isac_release;
isac->ctrl = isac_ctrl;
isac->open = open_dchannel;
isac->dch.dev.Dprotocols = (1 << ISDN_P_TE_S0);
isac->dch.dev.nrbchan = 2;
return 0;
}
EXPORT_SYMBOL(mISDNisac_init);
static void
waitforCEC(struct hscx_hw *hx)
{
u8 starb, to = 50;
while (to) {
starb = ReadHSCX(hx, IPAC_STARB);
if (!(starb & 0x04))
break;
udelay(1);
to--;
}
if (to < 50)
pr_debug("%s: B%1d CEC %d us\n", hx->ip->name, hx->bch.nr,
50 - to);
if (!to)
pr_info("%s: B%1d CEC timeout\n", hx->ip->name, hx->bch.nr);
}
static void
waitforXFW(struct hscx_hw *hx)
{
u8 starb, to = 50;
while (to) {
starb = ReadHSCX(hx, IPAC_STARB);
if ((starb & 0x44) == 0x40)
break;
udelay(1);
to--;
}
if (to < 50)
pr_debug("%s: B%1d XFW %d us\n", hx->ip->name, hx->bch.nr,
50 - to);
if (!to)
pr_info("%s: B%1d XFW timeout\n", hx->ip->name, hx->bch.nr);
}
static void
hscx_cmdr(struct hscx_hw *hx, u8 cmd)
{
if (hx->ip->type & IPAC_TYPE_IPACX)
WriteHSCX(hx, IPACX_CMDRB, cmd);
else {
waitforCEC(hx);
WriteHSCX(hx, IPAC_CMDRB, cmd);
}
}
static void
hscx_empty_fifo(struct hscx_hw *hscx, u8 count)
{
u8 *p;
int maxlen;
pr_debug("%s: B%1d %d\n", hscx->ip->name, hscx->bch.nr, count);
if (test_bit(FLG_RX_OFF, &hscx->bch.Flags)) {
hscx->bch.dropcnt += count;
hscx_cmdr(hscx, 0x80); /* RMC */
return;
}
maxlen = bchannel_get_rxbuf(&hscx->bch, count);
if (maxlen < 0) {
hscx_cmdr(hscx, 0x80); /* RMC */
if (hscx->bch.rx_skb)
skb_trim(hscx->bch.rx_skb, 0);
pr_warn("%s.B%d: No bufferspace for %d bytes\n",
hscx->ip->name, hscx->bch.nr, count);
return;
}
p = skb_put(hscx->bch.rx_skb, count);
if (hscx->ip->type & IPAC_TYPE_IPACX)
hscx->ip->read_fifo(hscx->ip->hw,
hscx->off + IPACX_RFIFOB, p, count);
else
hscx->ip->read_fifo(hscx->ip->hw,
hscx->off, p, count);
hscx_cmdr(hscx, 0x80); /* RMC */
if (hscx->bch.debug & DEBUG_HW_BFIFO) {
snprintf(hscx->log, 64, "B%1d-recv %s %d ",
hscx->bch.nr, hscx->ip->name, count);
print_hex_dump_bytes(hscx->log, DUMP_PREFIX_OFFSET, p, count);
}
}
static void
hscx_fill_fifo(struct hscx_hw *hscx)
{
int count, more;
u8 *p;
if (!hscx->bch.tx_skb) {
if (!test_bit(FLG_TX_EMPTY, &hscx->bch.Flags))
return;
count = hscx->fifo_size;
more = 1;
p = hscx->log;
memset(p, hscx->bch.fill[0], count);
} else {
count = hscx->bch.tx_skb->len - hscx->bch.tx_idx;
if (count <= 0)
return;
p = hscx->bch.tx_skb->data + hscx->bch.tx_idx;
more = test_bit(FLG_TRANSPARENT, &hscx->bch.Flags) ? 1 : 0;
if (count > hscx->fifo_size) {
count = hscx->fifo_size;
more = 1;
}
pr_debug("%s: B%1d %d/%d/%d\n", hscx->ip->name, hscx->bch.nr,
count, hscx->bch.tx_idx, hscx->bch.tx_skb->len);
hscx->bch.tx_idx += count;
}
if (hscx->ip->type & IPAC_TYPE_IPACX)
hscx->ip->write_fifo(hscx->ip->hw,
hscx->off + IPACX_XFIFOB, p, count);
else {
waitforXFW(hscx);
hscx->ip->write_fifo(hscx->ip->hw,
hscx->off, p, count);
}
hscx_cmdr(hscx, more ? 0x08 : 0x0a);
if (hscx->bch.tx_skb && (hscx->bch.debug & DEBUG_HW_BFIFO)) {
snprintf(hscx->log, 64, "B%1d-send %s %d ",
hscx->bch.nr, hscx->ip->name, count);
print_hex_dump_bytes(hscx->log, DUMP_PREFIX_OFFSET, p, count);
}
}
static void
hscx_xpr(struct hscx_hw *hx)
{
if (hx->bch.tx_skb && hx->bch.tx_idx < hx->bch.tx_skb->len) {
hscx_fill_fifo(hx);
} else {
dev_kfree_skb(hx->bch.tx_skb);
if (get_next_bframe(&hx->bch)) {
hscx_fill_fifo(hx);
test_and_clear_bit(FLG_TX_EMPTY, &hx->bch.Flags);
} else if (test_bit(FLG_TX_EMPTY, &hx->bch.Flags)) {
hscx_fill_fifo(hx);
}
}
}
static void
ipac_rme(struct hscx_hw *hx)
{
int count;
u8 rstab;
if (hx->ip->type & IPAC_TYPE_IPACX)
rstab = ReadHSCX(hx, IPACX_RSTAB);
else
rstab = ReadHSCX(hx, IPAC_RSTAB);
pr_debug("%s: B%1d RSTAB %02x\n", hx->ip->name, hx->bch.nr, rstab);
if ((rstab & 0xf0) != 0xa0) {
/* !(VFR && !RDO && CRC && !RAB) */
if (!(rstab & 0x80)) {
if (hx->bch.debug & DEBUG_HW_BCHANNEL)
pr_notice("%s: B%1d invalid frame\n",
hx->ip->name, hx->bch.nr);
}
if (rstab & 0x40) {
if (hx->bch.debug & DEBUG_HW_BCHANNEL)
pr_notice("%s: B%1d RDO proto=%x\n",
hx->ip->name, hx->bch.nr,
hx->bch.state);
}
if (!(rstab & 0x20)) {
if (hx->bch.debug & DEBUG_HW_BCHANNEL)
pr_notice("%s: B%1d CRC error\n",
hx->ip->name, hx->bch.nr);
}
hscx_cmdr(hx, 0x80); /* Do RMC */
return;
}
if (hx->ip->type & IPAC_TYPE_IPACX)
count = ReadHSCX(hx, IPACX_RBCLB);
else
count = ReadHSCX(hx, IPAC_RBCLB);
count &= (hx->fifo_size - 1);
if (count == 0)
count = hx->fifo_size;
hscx_empty_fifo(hx, count);
if (!hx->bch.rx_skb)
return;
if (hx->bch.rx_skb->len < 2) {
pr_debug("%s: B%1d frame too short %d\n",
hx->ip->name, hx->bch.nr, hx->bch.rx_skb->len);
skb_trim(hx->bch.rx_skb, 0);
} else {
skb_trim(hx->bch.rx_skb, hx->bch.rx_skb->len - 1);
recv_Bchannel(&hx->bch, 0, false);
}
}
static void
ipac_irq(struct hscx_hw *hx, u8 ista)
{
u8 istab, m, exirb = 0;
if (hx->ip->type & IPAC_TYPE_IPACX)
istab = ReadHSCX(hx, IPACX_ISTAB);
else if (hx->ip->type & IPAC_TYPE_IPAC) {
istab = ReadHSCX(hx, IPAC_ISTAB);
m = (hx->bch.nr & 1) ? IPAC__EXA : IPAC__EXB;
if (m & ista) {
exirb = ReadHSCX(hx, IPAC_EXIRB);
pr_debug("%s: B%1d EXIRB %02x\n", hx->ip->name,
hx->bch.nr, exirb);
}
} else if (hx->bch.nr & 2) { /* HSCX B */
if (ista & (HSCX__EXA | HSCX__ICA))
ipac_irq(&hx->ip->hscx[0], ista);
if (ista & HSCX__EXB) {
exirb = ReadHSCX(hx, IPAC_EXIRB);
pr_debug("%s: B%1d EXIRB %02x\n", hx->ip->name,
hx->bch.nr, exirb);
}
istab = ista & 0xF8;
} else { /* HSCX A */
istab = ReadHSCX(hx, IPAC_ISTAB);
if (ista & HSCX__EXA) {
exirb = ReadHSCX(hx, IPAC_EXIRB);
pr_debug("%s: B%1d EXIRB %02x\n", hx->ip->name,
hx->bch.nr, exirb);
}
istab = istab & 0xF8;
}
if (exirb & IPAC_B_XDU)
istab |= IPACX_B_XDU;
if (exirb & IPAC_B_RFO)
istab |= IPACX_B_RFO;
pr_debug("%s: B%1d ISTAB %02x\n", hx->ip->name, hx->bch.nr, istab);
if (!test_bit(FLG_ACTIVE, &hx->bch.Flags))
return;
if (istab & IPACX_B_RME)
ipac_rme(hx);
if (istab & IPACX_B_RPF) {
hscx_empty_fifo(hx, hx->fifo_size);
if (test_bit(FLG_TRANSPARENT, &hx->bch.Flags))
recv_Bchannel(&hx->bch, 0, false);
}
if (istab & IPACX_B_RFO) {
pr_debug("%s: B%1d RFO error\n", hx->ip->name, hx->bch.nr);
hscx_cmdr(hx, 0x40); /* RRES */
}
if (istab & IPACX_B_XPR)
hscx_xpr(hx);
if (istab & IPACX_B_XDU) {
if (test_bit(FLG_TRANSPARENT, &hx->bch.Flags)) {
if (test_bit(FLG_FILLEMPTY, &hx->bch.Flags))
test_and_set_bit(FLG_TX_EMPTY, &hx->bch.Flags);
hscx_xpr(hx);
return;
}
pr_debug("%s: B%1d XDU error at len %d\n", hx->ip->name,
hx->bch.nr, hx->bch.tx_idx);
hx->bch.tx_idx = 0;
hscx_cmdr(hx, 0x01); /* XRES */
}
}
irqreturn_t
mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
{
int cnt = maxloop + 1;
u8 ista, istad;
struct isac_hw *isac = &ipac->isac;
if (ipac->type & IPAC_TYPE_IPACX) {
ista = ReadIPAC(ipac, ISACX_ISTA);
while (ista && --cnt) {
pr_debug("%s: ISTA %02x\n", ipac->name, ista);
if (ista & IPACX__ICA)
ipac_irq(&ipac->hscx[0], ista);
if (ista & IPACX__ICB)
ipac_irq(&ipac->hscx[1], ista);
if (ista & (ISACX__ICD | ISACX__CIC))
mISDNisac_irq(&ipac->isac, ista);
ista = ReadIPAC(ipac, ISACX_ISTA);
}
} else if (ipac->type & IPAC_TYPE_IPAC) {
ista = ReadIPAC(ipac, IPAC_ISTA);
while (ista && --cnt) {
pr_debug("%s: ISTA %02x\n", ipac->name, ista);
if (ista & (IPAC__ICD | IPAC__EXD)) {
istad = ReadISAC(isac, ISAC_ISTA);
pr_debug("%s: ISTAD %02x\n", ipac->name, istad);
if (istad & IPAC_D_TIN2)
pr_debug("%s TIN2 irq\n", ipac->name);
if (ista & IPAC__EXD)
istad |= 1; /* ISAC EXI */
mISDNisac_irq(isac, istad);
}
if (ista & (IPAC__ICA | IPAC__EXA))
ipac_irq(&ipac->hscx[0], ista);
if (ista & (IPAC__ICB | IPAC__EXB))
ipac_irq(&ipac->hscx[1], ista);
ista = ReadIPAC(ipac, IPAC_ISTA);
}
} else if (ipac->type & IPAC_TYPE_HSCX) {
while (--cnt) {
ista = ReadIPAC(ipac, IPAC_ISTAB + ipac->hscx[1].off);
pr_debug("%s: B2 ISTA %02x\n", ipac->name, ista);
if (ista)
ipac_irq(&ipac->hscx[1], ista);
istad = ReadISAC(isac, ISAC_ISTA);
pr_debug("%s: ISTAD %02x\n", ipac->name, istad);
if (istad)
mISDNisac_irq(isac, istad);
if (0 == (ista | istad))
break;
}
}
if (cnt > maxloop) /* only for ISAC/HSCX without PCI IRQ test */
return IRQ_NONE;
if (cnt < maxloop)
pr_debug("%s: %d irqloops cpu%d\n", ipac->name,
maxloop - cnt, smp_processor_id());
if (maxloop && !cnt)
pr_notice("%s: %d IRQ LOOP cpu%d\n", ipac->name,
maxloop, smp_processor_id());
return IRQ_HANDLED;
}
EXPORT_SYMBOL(mISDNipac_irq);
static int
hscx_mode(struct hscx_hw *hscx, u32 bprotocol)
{
pr_debug("%s: HSCX %c protocol %x-->%x ch %d\n", hscx->ip->name,
'@' + hscx->bch.nr, hscx->bch.state, bprotocol, hscx->bch.nr);
if (hscx->ip->type & IPAC_TYPE_IPACX) {
if (hscx->bch.nr & 1) { /* B1 and ICA */
WriteIPAC(hscx->ip, ISACX_BCHA_TSDP_BC1, 0x80);
WriteIPAC(hscx->ip, ISACX_BCHA_CR, 0x88);
} else { /* B2 and ICB */
WriteIPAC(hscx->ip, ISACX_BCHB_TSDP_BC1, 0x81);
WriteIPAC(hscx->ip, ISACX_BCHB_CR, 0x88);
}
switch (bprotocol) {
case ISDN_P_NONE: /* init */
WriteHSCX(hscx, IPACX_MODEB, 0xC0); /* rec off */
WriteHSCX(hscx, IPACX_EXMB, 0x30); /* std adj. */
WriteHSCX(hscx, IPACX_MASKB, 0xFF); /* ints off */
hscx_cmdr(hscx, 0x41);
test_and_clear_bit(FLG_HDLC, &hscx->bch.Flags);
test_and_clear_bit(FLG_TRANSPARENT, &hscx->bch.Flags);
break;
case ISDN_P_B_RAW:
WriteHSCX(hscx, IPACX_MODEB, 0x88); /* ex trans */
WriteHSCX(hscx, IPACX_EXMB, 0x00); /* trans */
hscx_cmdr(hscx, 0x41);
WriteHSCX(hscx, IPACX_MASKB, IPACX_B_ON);
test_and_set_bit(FLG_TRANSPARENT, &hscx->bch.Flags);
break;
case ISDN_P_B_HDLC:
WriteHSCX(hscx, IPACX_MODEB, 0xC0); /* trans */
WriteHSCX(hscx, IPACX_EXMB, 0x00); /* hdlc,crc */
hscx_cmdr(hscx, 0x41);
WriteHSCX(hscx, IPACX_MASKB, IPACX_B_ON);
test_and_set_bit(FLG_HDLC, &hscx->bch.Flags);
break;
default:
pr_info("%s: protocol not known %x\n", hscx->ip->name,
bprotocol);
return -ENOPROTOOPT;
}
} else if (hscx->ip->type & IPAC_TYPE_IPAC) { /* IPAC */
WriteHSCX(hscx, IPAC_CCR1, 0x82);
WriteHSCX(hscx, IPAC_CCR2, 0x30);
WriteHSCX(hscx, IPAC_XCCR, 0x07);
WriteHSCX(hscx, IPAC_RCCR, 0x07);
WriteHSCX(hscx, IPAC_TSAX, hscx->slot);
WriteHSCX(hscx, IPAC_TSAR, hscx->slot);
switch (bprotocol) {
case ISDN_P_NONE:
WriteHSCX(hscx, IPAC_TSAX, 0x1F);
WriteHSCX(hscx, IPAC_TSAR, 0x1F);
WriteHSCX(hscx, IPAC_MODEB, 0x84);
WriteHSCX(hscx, IPAC_CCR1, 0x82);
WriteHSCX(hscx, IPAC_MASKB, 0xFF); /* ints off */
test_and_clear_bit(FLG_HDLC, &hscx->bch.Flags);
test_and_clear_bit(FLG_TRANSPARENT, &hscx->bch.Flags);
break;
case ISDN_P_B_RAW:
WriteHSCX(hscx, IPAC_MODEB, 0xe4); /* ex trans */
WriteHSCX(hscx, IPAC_CCR1, 0x82);
hscx_cmdr(hscx, 0x41);
WriteHSCX(hscx, IPAC_MASKB, 0);
test_and_set_bit(FLG_TRANSPARENT, &hscx->bch.Flags);
break;
case ISDN_P_B_HDLC:
WriteHSCX(hscx, IPAC_MODEB, 0x8c);
WriteHSCX(hscx, IPAC_CCR1, 0x8a);
hscx_cmdr(hscx, 0x41);
WriteHSCX(hscx, IPAC_MASKB, 0);
test_and_set_bit(FLG_HDLC, &hscx->bch.Flags);
break;
default:
pr_info("%s: protocol not known %x\n", hscx->ip->name,
bprotocol);
return -ENOPROTOOPT;
}
} else if (hscx->ip->type & IPAC_TYPE_HSCX) { /* HSCX */
WriteHSCX(hscx, IPAC_CCR1, 0x85);
WriteHSCX(hscx, IPAC_CCR2, 0x30);
WriteHSCX(hscx, IPAC_XCCR, 0x07);
WriteHSCX(hscx, IPAC_RCCR, 0x07);
WriteHSCX(hscx, IPAC_TSAX, hscx->slot);
WriteHSCX(hscx, IPAC_TSAR, hscx->slot);
switch (bprotocol) {
case ISDN_P_NONE:
WriteHSCX(hscx, IPAC_TSAX, 0x1F);
WriteHSCX(hscx, IPAC_TSAR, 0x1F);
WriteHSCX(hscx, IPAC_MODEB, 0x84);
WriteHSCX(hscx, IPAC_CCR1, 0x85);
WriteHSCX(hscx, IPAC_MASKB, 0xFF); /* ints off */
test_and_clear_bit(FLG_HDLC, &hscx->bch.Flags);
test_and_clear_bit(FLG_TRANSPARENT, &hscx->bch.Flags);
break;
case ISDN_P_B_RAW:
WriteHSCX(hscx, IPAC_MODEB, 0xe4); /* ex trans */
WriteHSCX(hscx, IPAC_CCR1, 0x85);
hscx_cmdr(hscx, 0x41);
WriteHSCX(hscx, IPAC_MASKB, 0);
test_and_set_bit(FLG_TRANSPARENT, &hscx->bch.Flags);
break;
case ISDN_P_B_HDLC:
WriteHSCX(hscx, IPAC_MODEB, 0x8c);
WriteHSCX(hscx, IPAC_CCR1, 0x8d);
hscx_cmdr(hscx, 0x41);
WriteHSCX(hscx, IPAC_MASKB, 0);
test_and_set_bit(FLG_HDLC, &hscx->bch.Flags);
break;
default:
pr_info("%s: protocol not known %x\n", hscx->ip->name,
bprotocol);
return -ENOPROTOOPT;
}
} else
return -EINVAL;
hscx->bch.state = bprotocol;
return 0;
}
static int
hscx_l2l1(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct bchannel *bch = container_of(ch, struct bchannel, ch);
struct hscx_hw *hx = container_of(bch, struct hscx_hw, bch);
int ret = -EINVAL;
struct mISDNhead *hh = mISDN_HEAD_P(skb);
unsigned long flags;
switch (hh->prim) {
case PH_DATA_REQ:
spin_lock_irqsave(hx->ip->hwlock, flags);
ret = bchannel_senddata(bch, skb);
if (ret > 0) { /* direct TX */
ret = 0;
hscx_fill_fifo(hx);
}
spin_unlock_irqrestore(hx->ip->hwlock, flags);
return ret;
case PH_ACTIVATE_REQ:
spin_lock_irqsave(hx->ip->hwlock, flags);
if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
ret = hscx_mode(hx, ch->protocol);
else
ret = 0;
spin_unlock_irqrestore(hx->ip->hwlock, flags);
if (!ret)
_queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
NULL, GFP_KERNEL);
break;
case PH_DEACTIVATE_REQ:
spin_lock_irqsave(hx->ip->hwlock, flags);
mISDN_clear_bchannel(bch);
hscx_mode(hx, ISDN_P_NONE);
spin_unlock_irqrestore(hx->ip->hwlock, flags);
_queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
NULL, GFP_KERNEL);
ret = 0;
break;
default:
pr_info("%s: %s unknown prim(%x,%x)\n",
hx->ip->name, __func__, hh->prim, hh->id);
ret = -EINVAL;
}
if (!ret)
dev_kfree_skb(skb);
return ret;
}
static int
channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
{
return mISDN_ctrl_bchannel(bch, cq);
}
static int
hscx_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
{
struct bchannel *bch = container_of(ch, struct bchannel, ch);
struct hscx_hw *hx = container_of(bch, struct hscx_hw, bch);
int ret = -EINVAL;
u_long flags;
pr_debug("%s: %s cmd:%x %p\n", hx->ip->name, __func__, cmd, arg);
switch (cmd) {
case CLOSE_CHANNEL:
test_and_clear_bit(FLG_OPEN, &bch->Flags);
cancel_work_sync(&bch->workq);
spin_lock_irqsave(hx->ip->hwlock, flags);
mISDN_clear_bchannel(bch);
hscx_mode(hx, ISDN_P_NONE);
spin_unlock_irqrestore(hx->ip->hwlock, flags);
ch->protocol = ISDN_P_NONE;
ch->peer = NULL;
module_put(hx->ip->owner);
ret = 0;
break;
case CONTROL_CHANNEL:
ret = channel_bctrl(bch, arg);
break;
default:
pr_info("%s: %s unknown prim(%x)\n",
hx->ip->name, __func__, cmd);
}
return ret;
}
static void
free_ipac(struct ipac_hw *ipac)
{
isac_release(&ipac->isac);
}
static const char *HSCXVer[] =
{"A1", "?1", "A2", "?3", "A3", "V2.1", "?6", "?7",
"?8", "?9", "?10", "?11", "?12", "?13", "?14", "???"};
static void
hscx_init(struct hscx_hw *hx)
{
u8 val;
WriteHSCX(hx, IPAC_RAH2, 0xFF);
WriteHSCX(hx, IPAC_XBCH, 0x00);
WriteHSCX(hx, IPAC_RLCR, 0x00);
if (hx->ip->type & IPAC_TYPE_HSCX) {
WriteHSCX(hx, IPAC_CCR1, 0x85);
val = ReadHSCX(hx, HSCX_VSTR);
pr_debug("%s: HSCX VSTR %02x\n", hx->ip->name, val);
if (hx->bch.debug & DEBUG_HW)
pr_notice("%s: HSCX version %s\n", hx->ip->name,
HSCXVer[val & 0x0f]);
} else
WriteHSCX(hx, IPAC_CCR1, 0x82);
WriteHSCX(hx, IPAC_CCR2, 0x30);
WriteHSCX(hx, IPAC_XCCR, 0x07);
WriteHSCX(hx, IPAC_RCCR, 0x07);
}
static int
ipac_init(struct ipac_hw *ipac)
{
u8 val;
if (ipac->type & IPAC_TYPE_HSCX) {
hscx_init(&ipac->hscx[0]);
hscx_init(&ipac->hscx[1]);
val = ReadIPAC(ipac, IPAC_ID);
} else if (ipac->type & IPAC_TYPE_IPAC) {
hscx_init(&ipac->hscx[0]);
hscx_init(&ipac->hscx[1]);
WriteIPAC(ipac, IPAC_MASK, IPAC__ON);
val = ReadIPAC(ipac, IPAC_CONF);
/* conf is default 0, but can be overwritten by card setup */
pr_debug("%s: IPAC CONF %02x/%02x\n", ipac->name,
val, ipac->conf);
WriteIPAC(ipac, IPAC_CONF, ipac->conf);
val = ReadIPAC(ipac, IPAC_ID);
if (ipac->hscx[0].bch.debug & DEBUG_HW)
pr_notice("%s: IPAC Design ID %02x\n", ipac->name, val);
}
/* nothing special for IPACX to do here */
return isac_init(&ipac->isac);
}
static int
open_bchannel(struct ipac_hw *ipac, struct channel_req *rq)
{
struct bchannel *bch;
if (rq->adr.channel == 0 || rq->adr.channel > 2)
return -EINVAL;
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
bch = &ipac->hscx[rq->adr.channel - 1].bch;
if (test_and_set_bit(FLG_OPEN, &bch->Flags))
return -EBUSY; /* b-channel can be only open once */
test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
bch->ch.protocol = rq->protocol;
rq->ch = &bch->ch;
return 0;
}
static int
channel_ctrl(struct ipac_hw *ipac, struct mISDN_ctrl_req *cq)
{
int ret = 0;
switch (cq->op) {
case MISDN_CTRL_GETOP:
cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
break;
case MISDN_CTRL_LOOP:
/* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
if (cq->channel < 0 || cq->channel > 3) {
ret = -EINVAL;
break;
}
ret = ipac->ctrl(ipac, HW_TESTLOOP, cq->channel);
break;
case MISDN_CTRL_L1_TIMER3:
ret = ipac->isac.ctrl(&ipac->isac, HW_TIMER3_VALUE, cq->p1);
break;
default:
pr_info("%s: unknown CTRL OP %x\n", ipac->name, cq->op);
ret = -EINVAL;
break;
}
return ret;
}
static int
ipac_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
{
struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
struct dchannel *dch = container_of(dev, struct dchannel, dev);
struct isac_hw *isac = container_of(dch, struct isac_hw, dch);
struct ipac_hw *ipac = container_of(isac, struct ipac_hw, isac);
struct channel_req *rq;
int err = 0;
pr_debug("%s: DCTRL: %x %p\n", ipac->name, cmd, arg);
switch (cmd) {
case OPEN_CHANNEL:
rq = arg;
if (rq->protocol == ISDN_P_TE_S0)
err = open_dchannel_caller(isac, rq, __builtin_return_address(0));
else
err = open_bchannel(ipac, rq);
if (err)
break;
if (!try_module_get(ipac->owner))
pr_info("%s: cannot get module\n", ipac->name);
break;
case CLOSE_CHANNEL:
pr_debug("%s: dev(%d) close from %p\n", ipac->name,
dch->dev.id, __builtin_return_address(0));
module_put(ipac->owner);
break;
case CONTROL_CHANNEL:
err = channel_ctrl(ipac, arg);
break;
default:
pr_debug("%s: unknown DCTRL command %x\n", ipac->name, cmd);
return -EINVAL;
}
return err;
}
u32
mISDNipac_init(struct ipac_hw *ipac, void *hw)
{
u32 ret;
u8 i;
ipac->hw = hw;
if (ipac->isac.dch.debug & DEBUG_HW)
pr_notice("%s: ipac type %x\n", ipac->name, ipac->type);
if (ipac->type & IPAC_TYPE_HSCX) {
ipac->isac.type = IPAC_TYPE_ISAC;
ipac->hscx[0].off = 0;
ipac->hscx[1].off = 0x40;
ipac->hscx[0].fifo_size = 32;
ipac->hscx[1].fifo_size = 32;
} else if (ipac->type & IPAC_TYPE_IPAC) {
ipac->isac.type = IPAC_TYPE_IPAC | IPAC_TYPE_ISAC;
ipac->hscx[0].off = 0;
ipac->hscx[1].off = 0x40;
ipac->hscx[0].fifo_size = 64;
ipac->hscx[1].fifo_size = 64;
} else if (ipac->type & IPAC_TYPE_IPACX) {
ipac->isac.type = IPAC_TYPE_IPACX | IPAC_TYPE_ISACX;
ipac->hscx[0].off = IPACX_OFF_ICA;
ipac->hscx[1].off = IPACX_OFF_ICB;
ipac->hscx[0].fifo_size = 64;
ipac->hscx[1].fifo_size = 64;
} else
return 0;
mISDNisac_init(&ipac->isac, hw);
ipac->isac.dch.dev.D.ctrl = ipac_dctrl;
for (i = 0; i < 2; i++) {
ipac->hscx[i].bch.nr = i + 1;
set_channelmap(i + 1, ipac->isac.dch.dev.channelmap);
list_add(&ipac->hscx[i].bch.ch.list,
&ipac->isac.dch.dev.bchannels);
mISDN_initbchannel(&ipac->hscx[i].bch, MAX_DATA_MEM,
ipac->hscx[i].fifo_size);
ipac->hscx[i].bch.ch.nr = i + 1;
ipac->hscx[i].bch.ch.send = &hscx_l2l1;
ipac->hscx[i].bch.ch.ctrl = hscx_bctrl;
ipac->hscx[i].bch.hw = hw;
ipac->hscx[i].ip = ipac;
/* default values for IOM time slots
* can be overwritten by card */
ipac->hscx[i].slot = (i == 0) ? 0x2f : 0x03;
}
ipac->init = ipac_init;
ipac->release = free_ipac;
ret = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
(1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
return ret;
}
EXPORT_SYMBOL(mISDNipac_init);
static int __init
isac_mod_init(void)
{
pr_notice("mISDNipac module version %s\n", ISAC_REV);
return 0;
}
static void __exit
isac_mod_cleanup(void)
{
pr_notice("mISDNipac module unloaded\n");
}
module_init(isac_mod_init);
module_exit(isac_mod_cleanup);
| linux-master | drivers/isdn/hardware/mISDN/mISDNipac.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* mISDNisar.c ISAR (Siemens PSB 7110) specific functions
*
* Author Karsten Keil ([email protected])
*
* Copyright 2009 by Karsten Keil <[email protected]>
*/
/* define this to enable static debug messages, if you kernel supports
* dynamic debugging, you should use debugfs for this
*/
/* #define DEBUG */
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <linux/mISDNhw.h>
#include <linux/module.h>
#include "isar.h"
#define ISAR_REV "2.1"
MODULE_AUTHOR("Karsten Keil");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(ISAR_REV);
#define DEBUG_HW_FIRMWARE_FIFO 0x10000
static const u8 faxmodulation[] = {3, 24, 48, 72, 73, 74, 96, 97, 98, 121,
122, 145, 146};
#define FAXMODCNT 13
static void isar_setup(struct isar_hw *);
static inline int
waitforHIA(struct isar_hw *isar, int timeout)
{
int t = timeout;
u8 val = isar->read_reg(isar->hw, ISAR_HIA);
while ((val & 1) && t) {
udelay(1);
t--;
val = isar->read_reg(isar->hw, ISAR_HIA);
}
pr_debug("%s: HIA after %dus\n", isar->name, timeout - t);
return timeout;
}
/*
* send msg to ISAR mailbox
* if msg is NULL use isar->buf
*/
static int
send_mbox(struct isar_hw *isar, u8 his, u8 creg, u8 len, u8 *msg)
{
if (!waitforHIA(isar, 1000))
return 0;
pr_debug("send_mbox(%02x,%02x,%d)\n", his, creg, len);
isar->write_reg(isar->hw, ISAR_CTRL_H, creg);
isar->write_reg(isar->hw, ISAR_CTRL_L, len);
isar->write_reg(isar->hw, ISAR_WADR, 0);
if (!msg)
msg = isar->buf;
if (msg && len) {
isar->write_fifo(isar->hw, ISAR_MBOX, msg, len);
if (isar->ch[0].bch.debug & DEBUG_HW_BFIFO) {
int l = 0;
while (l < (int)len) {
hex_dump_to_buffer(msg + l, len - l, 32, 1,
isar->log, 256, 1);
pr_debug("%s: %s %02x: %s\n", isar->name,
__func__, l, isar->log);
l += 32;
}
}
}
isar->write_reg(isar->hw, ISAR_HIS, his);
waitforHIA(isar, 1000);
return 1;
}
/*
* receive message from ISAR mailbox
* if msg is NULL use isar->buf
*/
static void
rcv_mbox(struct isar_hw *isar, u8 *msg)
{
if (!msg)
msg = isar->buf;
isar->write_reg(isar->hw, ISAR_RADR, 0);
if (msg && isar->clsb) {
isar->read_fifo(isar->hw, ISAR_MBOX, msg, isar->clsb);
if (isar->ch[0].bch.debug & DEBUG_HW_BFIFO) {
int l = 0;
while (l < (int)isar->clsb) {
hex_dump_to_buffer(msg + l, isar->clsb - l, 32,
1, isar->log, 256, 1);
pr_debug("%s: %s %02x: %s\n", isar->name,
__func__, l, isar->log);
l += 32;
}
}
}
isar->write_reg(isar->hw, ISAR_IIA, 0);
}
static inline void
get_irq_infos(struct isar_hw *isar)
{
isar->iis = isar->read_reg(isar->hw, ISAR_IIS);
isar->cmsb = isar->read_reg(isar->hw, ISAR_CTRL_H);
isar->clsb = isar->read_reg(isar->hw, ISAR_CTRL_L);
pr_debug("%s: rcv_mbox(%02x,%02x,%d)\n", isar->name,
isar->iis, isar->cmsb, isar->clsb);
}
/*
* poll answer message from ISAR mailbox
* should be used only with ISAR IRQs disabled before DSP was started
*
*/
static int
poll_mbox(struct isar_hw *isar, int maxdelay)
{
int t = maxdelay;
u8 irq;
irq = isar->read_reg(isar->hw, ISAR_IRQBIT);
while (t && !(irq & ISAR_IRQSTA)) {
udelay(1);
t--;
}
if (t) {
get_irq_infos(isar);
rcv_mbox(isar, NULL);
}
pr_debug("%s: pulled %d bytes after %d us\n",
isar->name, isar->clsb, maxdelay - t);
return t;
}
static int
ISARVersion(struct isar_hw *isar)
{
int ver;
/* disable ISAR IRQ */
isar->write_reg(isar->hw, ISAR_IRQBIT, 0);
isar->buf[0] = ISAR_MSG_HWVER;
isar->buf[1] = 0;
isar->buf[2] = 1;
if (!send_mbox(isar, ISAR_HIS_VNR, 0, 3, NULL))
return -1;
if (!poll_mbox(isar, 1000))
return -2;
if (isar->iis == ISAR_IIS_VNR) {
if (isar->clsb == 1) {
ver = isar->buf[0] & 0xf;
return ver;
}
return -3;
}
return -4;
}
static int
load_firmware(struct isar_hw *isar, const u8 *buf, int size)
{
u32 saved_debug = isar->ch[0].bch.debug;
int ret, cnt;
u8 nom, noc;
u16 left, val, *sp = (u16 *)buf;
u8 *mp;
u_long flags;
struct {
u16 sadr;
u16 len;
u16 d_key;
} blk_head;
if (1 != isar->version) {
pr_err("%s: ISAR wrong version %d firmware download aborted\n",
isar->name, isar->version);
return -EINVAL;
}
if (!(saved_debug & DEBUG_HW_FIRMWARE_FIFO))
isar->ch[0].bch.debug &= ~DEBUG_HW_BFIFO;
pr_debug("%s: load firmware %d words (%d bytes)\n",
isar->name, size / 2, size);
cnt = 0;
size /= 2;
/* disable ISAR IRQ */
spin_lock_irqsave(isar->hwlock, flags);
isar->write_reg(isar->hw, ISAR_IRQBIT, 0);
spin_unlock_irqrestore(isar->hwlock, flags);
while (cnt < size) {
blk_head.sadr = le16_to_cpu(*sp++);
blk_head.len = le16_to_cpu(*sp++);
blk_head.d_key = le16_to_cpu(*sp++);
cnt += 3;
pr_debug("ISAR firmware block (%#x,%d,%#x)\n",
blk_head.sadr, blk_head.len, blk_head.d_key & 0xff);
left = blk_head.len;
if (cnt + left > size) {
pr_info("%s: firmware error have %d need %d words\n",
isar->name, size, cnt + left);
ret = -EINVAL;
goto reterrflg;
}
spin_lock_irqsave(isar->hwlock, flags);
if (!send_mbox(isar, ISAR_HIS_DKEY, blk_head.d_key & 0xff,
0, NULL)) {
pr_info("ISAR send_mbox dkey failed\n");
ret = -ETIME;
goto reterror;
}
if (!poll_mbox(isar, 1000)) {
pr_warn("ISAR poll_mbox dkey failed\n");
ret = -ETIME;
goto reterror;
}
spin_unlock_irqrestore(isar->hwlock, flags);
if ((isar->iis != ISAR_IIS_DKEY) || isar->cmsb || isar->clsb) {
pr_info("ISAR wrong dkey response (%x,%x,%x)\n",
isar->iis, isar->cmsb, isar->clsb);
ret = 1;
goto reterrflg;
}
while (left > 0) {
if (left > 126)
noc = 126;
else
noc = left;
nom = (2 * noc) + 3;
mp = isar->buf;
/* the ISAR is big endian */
*mp++ = blk_head.sadr >> 8;
*mp++ = blk_head.sadr & 0xFF;
left -= noc;
cnt += noc;
*mp++ = noc;
pr_debug("%s: load %3d words at %04x\n", isar->name,
noc, blk_head.sadr);
blk_head.sadr += noc;
while (noc) {
val = le16_to_cpu(*sp++);
*mp++ = val >> 8;
*mp++ = val & 0xFF;
noc--;
}
spin_lock_irqsave(isar->hwlock, flags);
if (!send_mbox(isar, ISAR_HIS_FIRM, 0, nom, NULL)) {
pr_info("ISAR send_mbox prog failed\n");
ret = -ETIME;
goto reterror;
}
if (!poll_mbox(isar, 1000)) {
pr_info("ISAR poll_mbox prog failed\n");
ret = -ETIME;
goto reterror;
}
spin_unlock_irqrestore(isar->hwlock, flags);
if ((isar->iis != ISAR_IIS_FIRM) ||
isar->cmsb || isar->clsb) {
pr_info("ISAR wrong prog response (%x,%x,%x)\n",
isar->iis, isar->cmsb, isar->clsb);
ret = -EIO;
goto reterrflg;
}
}
pr_debug("%s: ISAR firmware block %d words loaded\n",
isar->name, blk_head.len);
}
isar->ch[0].bch.debug = saved_debug;
/* 10ms delay */
cnt = 10;
while (cnt--)
mdelay(1);
isar->buf[0] = 0xff;
isar->buf[1] = 0xfe;
isar->bstat = 0;
spin_lock_irqsave(isar->hwlock, flags);
if (!send_mbox(isar, ISAR_HIS_STDSP, 0, 2, NULL)) {
pr_info("ISAR send_mbox start dsp failed\n");
ret = -ETIME;
goto reterror;
}
if (!poll_mbox(isar, 1000)) {
pr_info("ISAR poll_mbox start dsp failed\n");
ret = -ETIME;
goto reterror;
}
if ((isar->iis != ISAR_IIS_STDSP) || isar->cmsb || isar->clsb) {
pr_info("ISAR wrong start dsp response (%x,%x,%x)\n",
isar->iis, isar->cmsb, isar->clsb);
ret = -EIO;
goto reterror;
} else
pr_debug("%s: ISAR start dsp success\n", isar->name);
/* NORMAL mode entered */
/* Enable IRQs of ISAR */
isar->write_reg(isar->hw, ISAR_IRQBIT, ISAR_IRQSTA);
spin_unlock_irqrestore(isar->hwlock, flags);
cnt = 1000; /* max 1s */
while ((!isar->bstat) && cnt) {
mdelay(1);
cnt--;
}
if (!cnt) {
pr_info("ISAR no general status event received\n");
ret = -ETIME;
goto reterrflg;
} else
pr_debug("%s: ISAR general status event %x\n",
isar->name, isar->bstat);
/* 10ms delay */
cnt = 10;
while (cnt--)
mdelay(1);
isar->iis = 0;
spin_lock_irqsave(isar->hwlock, flags);
if (!send_mbox(isar, ISAR_HIS_DIAG, ISAR_CTRL_STST, 0, NULL)) {
pr_info("ISAR send_mbox self tst failed\n");
ret = -ETIME;
goto reterror;
}
spin_unlock_irqrestore(isar->hwlock, flags);
cnt = 10000; /* max 100 ms */
while ((isar->iis != ISAR_IIS_DIAG) && cnt) {
udelay(10);
cnt--;
}
mdelay(1);
if (!cnt) {
pr_info("ISAR no self tst response\n");
ret = -ETIME;
goto reterrflg;
}
if ((isar->cmsb == ISAR_CTRL_STST) && (isar->clsb == 1)
&& (isar->buf[0] == 0))
pr_debug("%s: ISAR selftest OK\n", isar->name);
else {
pr_info("ISAR selftest not OK %x/%x/%x\n",
isar->cmsb, isar->clsb, isar->buf[0]);
ret = -EIO;
goto reterrflg;
}
spin_lock_irqsave(isar->hwlock, flags);
isar->iis = 0;
if (!send_mbox(isar, ISAR_HIS_DIAG, ISAR_CTRL_SWVER, 0, NULL)) {
pr_info("ISAR RQST SVN failed\n");
ret = -ETIME;
goto reterror;
}
spin_unlock_irqrestore(isar->hwlock, flags);
cnt = 30000; /* max 300 ms */
while ((isar->iis != ISAR_IIS_DIAG) && cnt) {
udelay(10);
cnt--;
}
mdelay(1);
if (!cnt) {
pr_info("ISAR no SVN response\n");
ret = -ETIME;
goto reterrflg;
} else {
if ((isar->cmsb == ISAR_CTRL_SWVER) && (isar->clsb == 1)) {
pr_notice("%s: ISAR software version %#x\n",
isar->name, isar->buf[0]);
} else {
pr_info("%s: ISAR wrong swver response (%x,%x)"
" cnt(%d)\n", isar->name, isar->cmsb,
isar->clsb, cnt);
ret = -EIO;
goto reterrflg;
}
}
spin_lock_irqsave(isar->hwlock, flags);
isar_setup(isar);
spin_unlock_irqrestore(isar->hwlock, flags);
ret = 0;
reterrflg:
spin_lock_irqsave(isar->hwlock, flags);
reterror:
isar->ch[0].bch.debug = saved_debug;
if (ret)
/* disable ISAR IRQ */
isar->write_reg(isar->hw, ISAR_IRQBIT, 0);
spin_unlock_irqrestore(isar->hwlock, flags);
return ret;
}
static inline void
deliver_status(struct isar_ch *ch, int status)
{
pr_debug("%s: HL->LL FAXIND %x\n", ch->is->name, status);
_queue_data(&ch->bch.ch, PH_CONTROL_IND, status, 0, NULL, GFP_ATOMIC);
}
static inline void
isar_rcv_frame(struct isar_ch *ch)
{
u8 *ptr;
int maxlen;
if (!ch->is->clsb) {
pr_debug("%s; ISAR zero len frame\n", ch->is->name);
ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
return;
}
if (test_bit(FLG_RX_OFF, &ch->bch.Flags)) {
ch->bch.dropcnt += ch->is->clsb;
ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
return;
}
switch (ch->bch.state) {
case ISDN_P_NONE:
pr_debug("%s: ISAR protocol 0 spurious IIS_RDATA %x/%x/%x\n",
ch->is->name, ch->is->iis, ch->is->cmsb, ch->is->clsb);
ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
break;
case ISDN_P_B_RAW:
case ISDN_P_B_L2DTMF:
case ISDN_P_B_MODEM_ASYNC:
maxlen = bchannel_get_rxbuf(&ch->bch, ch->is->clsb);
if (maxlen < 0) {
pr_warn("%s.B%d: No bufferspace for %d bytes\n",
ch->is->name, ch->bch.nr, ch->is->clsb);
ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
break;
}
rcv_mbox(ch->is, skb_put(ch->bch.rx_skb, ch->is->clsb));
recv_Bchannel(&ch->bch, 0, false);
break;
case ISDN_P_B_HDLC:
maxlen = bchannel_get_rxbuf(&ch->bch, ch->is->clsb);
if (maxlen < 0) {
pr_warn("%s.B%d: No bufferspace for %d bytes\n",
ch->is->name, ch->bch.nr, ch->is->clsb);
ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
break;
}
if (ch->is->cmsb & HDLC_ERROR) {
pr_debug("%s: ISAR frame error %x len %d\n",
ch->is->name, ch->is->cmsb, ch->is->clsb);
#ifdef ERROR_STATISTIC
if (ch->is->cmsb & HDLC_ERR_RER)
ch->bch.err_inv++;
if (ch->is->cmsb & HDLC_ERR_CER)
ch->bch.err_crc++;
#endif
skb_trim(ch->bch.rx_skb, 0);
ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
break;
}
if (ch->is->cmsb & HDLC_FSD)
skb_trim(ch->bch.rx_skb, 0);
ptr = skb_put(ch->bch.rx_skb, ch->is->clsb);
rcv_mbox(ch->is, ptr);
if (ch->is->cmsb & HDLC_FED) {
if (ch->bch.rx_skb->len < 3) { /* last 2 are the FCS */
pr_debug("%s: ISAR frame too short %d\n",
ch->is->name, ch->bch.rx_skb->len);
skb_trim(ch->bch.rx_skb, 0);
break;
}
skb_trim(ch->bch.rx_skb, ch->bch.rx_skb->len - 2);
recv_Bchannel(&ch->bch, 0, false);
}
break;
case ISDN_P_B_T30_FAX:
if (ch->state != STFAX_ACTIV) {
pr_debug("%s: isar_rcv_frame: not ACTIV\n",
ch->is->name);
ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
if (ch->bch.rx_skb)
skb_trim(ch->bch.rx_skb, 0);
break;
}
if (!ch->bch.rx_skb) {
ch->bch.rx_skb = mI_alloc_skb(ch->bch.maxlen,
GFP_ATOMIC);
if (unlikely(!ch->bch.rx_skb)) {
pr_info("%s: B receive out of memory\n",
__func__);
ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
break;
}
}
if (ch->cmd == PCTRL_CMD_FRM) {
rcv_mbox(ch->is, skb_put(ch->bch.rx_skb, ch->is->clsb));
pr_debug("%s: isar_rcv_frame: %d\n",
ch->is->name, ch->bch.rx_skb->len);
if (ch->is->cmsb & SART_NMD) { /* ABORT */
pr_debug("%s: isar_rcv_frame: no more data\n",
ch->is->name);
ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
send_mbox(ch->is, SET_DPS(ch->dpath) |
ISAR_HIS_PUMPCTRL, PCTRL_CMD_ESC,
0, NULL);
ch->state = STFAX_ESCAPE;
/* set_skb_flag(skb, DF_NOMOREDATA); */
}
recv_Bchannel(&ch->bch, 0, false);
if (ch->is->cmsb & SART_NMD)
deliver_status(ch, HW_MOD_NOCARR);
break;
}
if (ch->cmd != PCTRL_CMD_FRH) {
pr_debug("%s: isar_rcv_frame: unknown fax mode %x\n",
ch->is->name, ch->cmd);
ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
if (ch->bch.rx_skb)
skb_trim(ch->bch.rx_skb, 0);
break;
}
/* PCTRL_CMD_FRH */
if ((ch->bch.rx_skb->len + ch->is->clsb) >
(ch->bch.maxlen + 2)) {
pr_info("%s: %s incoming packet too large\n",
ch->is->name, __func__);
ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
skb_trim(ch->bch.rx_skb, 0);
break;
} else if (ch->is->cmsb & HDLC_ERROR) {
pr_info("%s: ISAR frame error %x len %d\n",
ch->is->name, ch->is->cmsb, ch->is->clsb);
skb_trim(ch->bch.rx_skb, 0);
ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
break;
}
if (ch->is->cmsb & HDLC_FSD)
skb_trim(ch->bch.rx_skb, 0);
ptr = skb_put(ch->bch.rx_skb, ch->is->clsb);
rcv_mbox(ch->is, ptr);
if (ch->is->cmsb & HDLC_FED) {
if (ch->bch.rx_skb->len < 3) { /* last 2 are the FCS */
pr_info("%s: ISAR frame too short %d\n",
ch->is->name, ch->bch.rx_skb->len);
skb_trim(ch->bch.rx_skb, 0);
break;
}
skb_trim(ch->bch.rx_skb, ch->bch.rx_skb->len - 2);
recv_Bchannel(&ch->bch, 0, false);
}
if (ch->is->cmsb & SART_NMD) { /* ABORT */
pr_debug("%s: isar_rcv_frame: no more data\n",
ch->is->name);
ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
if (ch->bch.rx_skb)
skb_trim(ch->bch.rx_skb, 0);
send_mbox(ch->is, SET_DPS(ch->dpath) |
ISAR_HIS_PUMPCTRL, PCTRL_CMD_ESC, 0, NULL);
ch->state = STFAX_ESCAPE;
deliver_status(ch, HW_MOD_NOCARR);
}
break;
default:
pr_info("isar_rcv_frame protocol (%x)error\n", ch->bch.state);
ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
break;
}
}
static void
isar_fill_fifo(struct isar_ch *ch)
{
int count;
u8 msb;
u8 *ptr;
pr_debug("%s: ch%d tx_skb %d tx_idx %d\n", ch->is->name, ch->bch.nr,
ch->bch.tx_skb ? ch->bch.tx_skb->len : -1, ch->bch.tx_idx);
if (!(ch->is->bstat &
(ch->dpath == 1 ? BSTAT_RDM1 : BSTAT_RDM2)))
return;
if (!ch->bch.tx_skb) {
if (!test_bit(FLG_TX_EMPTY, &ch->bch.Flags) ||
(ch->bch.state != ISDN_P_B_RAW))
return;
count = ch->mml;
/* use the card buffer */
memset(ch->is->buf, ch->bch.fill[0], count);
send_mbox(ch->is, SET_DPS(ch->dpath) | ISAR_HIS_SDATA,
0, count, ch->is->buf);
return;
}
count = ch->bch.tx_skb->len - ch->bch.tx_idx;
if (count <= 0)
return;
if (count > ch->mml) {
msb = 0;
count = ch->mml;
} else {
msb = HDLC_FED;
}
ptr = ch->bch.tx_skb->data + ch->bch.tx_idx;
if (!ch->bch.tx_idx) {
pr_debug("%s: frame start\n", ch->is->name);
if ((ch->bch.state == ISDN_P_B_T30_FAX) &&
(ch->cmd == PCTRL_CMD_FTH)) {
if (count > 1) {
if ((ptr[0] == 0xff) && (ptr[1] == 0x13)) {
/* last frame */
test_and_set_bit(FLG_LASTDATA,
&ch->bch.Flags);
pr_debug("%s: set LASTDATA\n",
ch->is->name);
if (msb == HDLC_FED)
test_and_set_bit(FLG_DLEETX,
&ch->bch.Flags);
}
}
}
msb |= HDLC_FST;
}
ch->bch.tx_idx += count;
switch (ch->bch.state) {
case ISDN_P_NONE:
pr_info("%s: wrong protocol 0\n", __func__);
break;
case ISDN_P_B_RAW:
case ISDN_P_B_L2DTMF:
case ISDN_P_B_MODEM_ASYNC:
send_mbox(ch->is, SET_DPS(ch->dpath) | ISAR_HIS_SDATA,
0, count, ptr);
break;
case ISDN_P_B_HDLC:
send_mbox(ch->is, SET_DPS(ch->dpath) | ISAR_HIS_SDATA,
msb, count, ptr);
break;
case ISDN_P_B_T30_FAX:
if (ch->state != STFAX_ACTIV)
pr_debug("%s: not ACTIV\n", ch->is->name);
else if (ch->cmd == PCTRL_CMD_FTH)
send_mbox(ch->is, SET_DPS(ch->dpath) | ISAR_HIS_SDATA,
msb, count, ptr);
else if (ch->cmd == PCTRL_CMD_FTM)
send_mbox(ch->is, SET_DPS(ch->dpath) | ISAR_HIS_SDATA,
0, count, ptr);
else
pr_debug("%s: not FTH/FTM\n", ch->is->name);
break;
default:
pr_info("%s: protocol(%x) error\n",
__func__, ch->bch.state);
break;
}
}
static inline struct isar_ch *
sel_bch_isar(struct isar_hw *isar, u8 dpath)
{
struct isar_ch *base = &isar->ch[0];
if ((!dpath) || (dpath > 2))
return NULL;
if (base->dpath == dpath)
return base;
base++;
if (base->dpath == dpath)
return base;
return NULL;
}
static void
send_next(struct isar_ch *ch)
{
pr_debug("%s: %s ch%d tx_skb %d tx_idx %d\n", ch->is->name, __func__,
ch->bch.nr, ch->bch.tx_skb ? ch->bch.tx_skb->len : -1,
ch->bch.tx_idx);
if (ch->bch.state == ISDN_P_B_T30_FAX) {
if (ch->cmd == PCTRL_CMD_FTH) {
if (test_bit(FLG_LASTDATA, &ch->bch.Flags)) {
pr_debug("set NMD_DATA\n");
test_and_set_bit(FLG_NMD_DATA, &ch->bch.Flags);
}
} else if (ch->cmd == PCTRL_CMD_FTM) {
if (test_bit(FLG_DLEETX, &ch->bch.Flags)) {
test_and_set_bit(FLG_LASTDATA, &ch->bch.Flags);
test_and_set_bit(FLG_NMD_DATA, &ch->bch.Flags);
}
}
}
dev_kfree_skb(ch->bch.tx_skb);
if (get_next_bframe(&ch->bch)) {
isar_fill_fifo(ch);
test_and_clear_bit(FLG_TX_EMPTY, &ch->bch.Flags);
} else if (test_bit(FLG_TX_EMPTY, &ch->bch.Flags)) {
isar_fill_fifo(ch);
} else {
if (test_and_clear_bit(FLG_DLEETX, &ch->bch.Flags)) {
if (test_and_clear_bit(FLG_LASTDATA,
&ch->bch.Flags)) {
if (test_and_clear_bit(FLG_NMD_DATA,
&ch->bch.Flags)) {
u8 zd = 0;
send_mbox(ch->is, SET_DPS(ch->dpath) |
ISAR_HIS_SDATA, 0x01, 1, &zd);
}
test_and_set_bit(FLG_LL_OK, &ch->bch.Flags);
} else {
deliver_status(ch, HW_MOD_CONNECT);
}
} else if (test_bit(FLG_FILLEMPTY, &ch->bch.Flags)) {
test_and_set_bit(FLG_TX_EMPTY, &ch->bch.Flags);
}
}
}
static void
check_send(struct isar_hw *isar, u8 rdm)
{
struct isar_ch *ch;
pr_debug("%s: rdm %x\n", isar->name, rdm);
if (rdm & BSTAT_RDM1) {
ch = sel_bch_isar(isar, 1);
if (ch && test_bit(FLG_ACTIVE, &ch->bch.Flags)) {
if (ch->bch.tx_skb && (ch->bch.tx_skb->len >
ch->bch.tx_idx))
isar_fill_fifo(ch);
else
send_next(ch);
}
}
if (rdm & BSTAT_RDM2) {
ch = sel_bch_isar(isar, 2);
if (ch && test_bit(FLG_ACTIVE, &ch->bch.Flags)) {
if (ch->bch.tx_skb && (ch->bch.tx_skb->len >
ch->bch.tx_idx))
isar_fill_fifo(ch);
else
send_next(ch);
}
}
}
static const char *dmril[] = {"NO SPEED", "1200/75", "NODEF2", "75/1200", "NODEF4",
"300", "600", "1200", "2400", "4800", "7200",
"9600nt", "9600t", "12000", "14400", "WRONG"};
static const char *dmrim[] = {"NO MOD", "NO DEF", "V32/V32b", "V22", "V21",
"Bell103", "V23", "Bell202", "V17", "V29", "V27ter"};
static void
isar_pump_status_rsp(struct isar_ch *ch) {
u8 ril = ch->is->buf[0];
u8 rim;
if (!test_and_clear_bit(ISAR_RATE_REQ, &ch->is->Flags))
return;
if (ril > 14) {
pr_info("%s: wrong pstrsp ril=%d\n", ch->is->name, ril);
ril = 15;
}
switch (ch->is->buf[1]) {
case 0:
rim = 0;
break;
case 0x20:
rim = 2;
break;
case 0x40:
rim = 3;
break;
case 0x41:
rim = 4;
break;
case 0x51:
rim = 5;
break;
case 0x61:
rim = 6;
break;
case 0x71:
rim = 7;
break;
case 0x82:
rim = 8;
break;
case 0x92:
rim = 9;
break;
case 0xa2:
rim = 10;
break;
default:
rim = 1;
break;
}
sprintf(ch->conmsg, "%s %s", dmril[ril], dmrim[rim]);
pr_debug("%s: pump strsp %s\n", ch->is->name, ch->conmsg);
}
static void
isar_pump_statev_modem(struct isar_ch *ch, u8 devt) {
u8 dps = SET_DPS(ch->dpath);
switch (devt) {
case PSEV_10MS_TIMER:
pr_debug("%s: pump stev TIMER\n", ch->is->name);
break;
case PSEV_CON_ON:
pr_debug("%s: pump stev CONNECT\n", ch->is->name);
deliver_status(ch, HW_MOD_CONNECT);
break;
case PSEV_CON_OFF:
pr_debug("%s: pump stev NO CONNECT\n", ch->is->name);
send_mbox(ch->is, dps | ISAR_HIS_PSTREQ, 0, 0, NULL);
deliver_status(ch, HW_MOD_NOCARR);
break;
case PSEV_V24_OFF:
pr_debug("%s: pump stev V24 OFF\n", ch->is->name);
break;
case PSEV_CTS_ON:
pr_debug("%s: pump stev CTS ON\n", ch->is->name);
break;
case PSEV_CTS_OFF:
pr_debug("%s pump stev CTS OFF\n", ch->is->name);
break;
case PSEV_DCD_ON:
pr_debug("%s: pump stev CARRIER ON\n", ch->is->name);
test_and_set_bit(ISAR_RATE_REQ, &ch->is->Flags);
send_mbox(ch->is, dps | ISAR_HIS_PSTREQ, 0, 0, NULL);
break;
case PSEV_DCD_OFF:
pr_debug("%s: pump stev CARRIER OFF\n", ch->is->name);
break;
case PSEV_DSR_ON:
pr_debug("%s: pump stev DSR ON\n", ch->is->name);
break;
case PSEV_DSR_OFF:
pr_debug("%s: pump stev DSR_OFF\n", ch->is->name);
break;
case PSEV_REM_RET:
pr_debug("%s: pump stev REMOTE RETRAIN\n", ch->is->name);
break;
case PSEV_REM_REN:
pr_debug("%s: pump stev REMOTE RENEGOTIATE\n", ch->is->name);
break;
case PSEV_GSTN_CLR:
pr_debug("%s: pump stev GSTN CLEAR\n", ch->is->name);
break;
default:
pr_info("u%s: unknown pump stev %x\n", ch->is->name, devt);
break;
}
}
static void
isar_pump_statev_fax(struct isar_ch *ch, u8 devt) {
u8 dps = SET_DPS(ch->dpath);
u8 p1;
switch (devt) {
case PSEV_10MS_TIMER:
pr_debug("%s: pump stev TIMER\n", ch->is->name);
break;
case PSEV_RSP_READY:
pr_debug("%s: pump stev RSP_READY\n", ch->is->name);
ch->state = STFAX_READY;
deliver_status(ch, HW_MOD_READY);
#ifdef AUTOCON
if (test_bit(BC_FLG_ORIG, &ch->bch.Flags))
isar_pump_cmd(bch, HW_MOD_FRH, 3);
else
isar_pump_cmd(bch, HW_MOD_FTH, 3);
#endif
break;
case PSEV_LINE_TX_H:
if (ch->state == STFAX_LINE) {
pr_debug("%s: pump stev LINE_TX_H\n", ch->is->name);
ch->state = STFAX_CONT;
send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL,
PCTRL_CMD_CONT, 0, NULL);
} else {
pr_debug("%s: pump stev LINE_TX_H wrong st %x\n",
ch->is->name, ch->state);
}
break;
case PSEV_LINE_RX_H:
if (ch->state == STFAX_LINE) {
pr_debug("%s: pump stev LINE_RX_H\n", ch->is->name);
ch->state = STFAX_CONT;
send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL,
PCTRL_CMD_CONT, 0, NULL);
} else {
pr_debug("%s: pump stev LINE_RX_H wrong st %x\n",
ch->is->name, ch->state);
}
break;
case PSEV_LINE_TX_B:
if (ch->state == STFAX_LINE) {
pr_debug("%s: pump stev LINE_TX_B\n", ch->is->name);
ch->state = STFAX_CONT;
send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL,
PCTRL_CMD_CONT, 0, NULL);
} else {
pr_debug("%s: pump stev LINE_TX_B wrong st %x\n",
ch->is->name, ch->state);
}
break;
case PSEV_LINE_RX_B:
if (ch->state == STFAX_LINE) {
pr_debug("%s: pump stev LINE_RX_B\n", ch->is->name);
ch->state = STFAX_CONT;
send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL,
PCTRL_CMD_CONT, 0, NULL);
} else {
pr_debug("%s: pump stev LINE_RX_B wrong st %x\n",
ch->is->name, ch->state);
}
break;
case PSEV_RSP_CONN:
if (ch->state == STFAX_CONT) {
pr_debug("%s: pump stev RSP_CONN\n", ch->is->name);
ch->state = STFAX_ACTIV;
test_and_set_bit(ISAR_RATE_REQ, &ch->is->Flags);
send_mbox(ch->is, dps | ISAR_HIS_PSTREQ, 0, 0, NULL);
if (ch->cmd == PCTRL_CMD_FTH) {
int delay = (ch->mod == 3) ? 1000 : 200;
/* 1s (200 ms) Flags before data */
if (test_and_set_bit(FLG_FTI_RUN,
&ch->bch.Flags))
del_timer(&ch->ftimer);
ch->ftimer.expires =
jiffies + ((delay * HZ) / 1000);
test_and_set_bit(FLG_LL_CONN,
&ch->bch.Flags);
add_timer(&ch->ftimer);
} else {
deliver_status(ch, HW_MOD_CONNECT);
}
} else {
pr_debug("%s: pump stev RSP_CONN wrong st %x\n",
ch->is->name, ch->state);
}
break;
case PSEV_FLAGS_DET:
pr_debug("%s: pump stev FLAGS_DET\n", ch->is->name);
break;
case PSEV_RSP_DISC:
pr_debug("%s: pump stev RSP_DISC state(%d)\n",
ch->is->name, ch->state);
if (ch->state == STFAX_ESCAPE) {
p1 = 5;
switch (ch->newcmd) {
case 0:
ch->state = STFAX_READY;
break;
case PCTRL_CMD_FTM:
p1 = 2;
fallthrough;
case PCTRL_CMD_FTH:
send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL,
PCTRL_CMD_SILON, 1, &p1);
ch->state = STFAX_SILDET;
break;
case PCTRL_CMD_FRH:
case PCTRL_CMD_FRM:
ch->mod = ch->newmod;
p1 = ch->newmod;
ch->newmod = 0;
ch->cmd = ch->newcmd;
ch->newcmd = 0;
send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL,
ch->cmd, 1, &p1);
ch->state = STFAX_LINE;
ch->try_mod = 3;
break;
default:
pr_debug("%s: RSP_DISC unknown newcmd %x\n",
ch->is->name, ch->newcmd);
break;
}
} else if (ch->state == STFAX_ACTIV) {
if (test_and_clear_bit(FLG_LL_OK, &ch->bch.Flags))
deliver_status(ch, HW_MOD_OK);
else if (ch->cmd == PCTRL_CMD_FRM)
deliver_status(ch, HW_MOD_NOCARR);
else
deliver_status(ch, HW_MOD_FCERROR);
ch->state = STFAX_READY;
} else if (ch->state != STFAX_SILDET) {
/* ignore in STFAX_SILDET */
ch->state = STFAX_READY;
deliver_status(ch, HW_MOD_FCERROR);
}
break;
case PSEV_RSP_SILDET:
pr_debug("%s: pump stev RSP_SILDET\n", ch->is->name);
if (ch->state == STFAX_SILDET) {
ch->mod = ch->newmod;
p1 = ch->newmod;
ch->newmod = 0;
ch->cmd = ch->newcmd;
ch->newcmd = 0;
send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL,
ch->cmd, 1, &p1);
ch->state = STFAX_LINE;
ch->try_mod = 3;
}
break;
case PSEV_RSP_SILOFF:
pr_debug("%s: pump stev RSP_SILOFF\n", ch->is->name);
break;
case PSEV_RSP_FCERR:
if (ch->state == STFAX_LINE) {
pr_debug("%s: pump stev RSP_FCERR try %d\n",
ch->is->name, ch->try_mod);
if (ch->try_mod--) {
send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL,
ch->cmd, 1, &ch->mod);
break;
}
}
pr_debug("%s: pump stev RSP_FCERR\n", ch->is->name);
ch->state = STFAX_ESCAPE;
send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL, PCTRL_CMD_ESC,
0, NULL);
deliver_status(ch, HW_MOD_FCERROR);
break;
default:
break;
}
}
void
mISDNisar_irq(struct isar_hw *isar)
{
struct isar_ch *ch;
get_irq_infos(isar);
switch (isar->iis & ISAR_IIS_MSCMSD) {
case ISAR_IIS_RDATA:
ch = sel_bch_isar(isar, isar->iis >> 6);
if (ch)
isar_rcv_frame(ch);
else {
pr_debug("%s: ISAR spurious IIS_RDATA %x/%x/%x\n",
isar->name, isar->iis, isar->cmsb,
isar->clsb);
isar->write_reg(isar->hw, ISAR_IIA, 0);
}
break;
case ISAR_IIS_GSTEV:
isar->write_reg(isar->hw, ISAR_IIA, 0);
isar->bstat |= isar->cmsb;
check_send(isar, isar->cmsb);
break;
case ISAR_IIS_BSTEV:
#ifdef ERROR_STATISTIC
ch = sel_bch_isar(isar, isar->iis >> 6);
if (ch) {
if (isar->cmsb == BSTEV_TBO)
ch->bch.err_tx++;
if (isar->cmsb == BSTEV_RBO)
ch->bch.err_rdo++;
}
#endif
pr_debug("%s: Buffer STEV dpath%d msb(%x)\n",
isar->name, isar->iis >> 6, isar->cmsb);
isar->write_reg(isar->hw, ISAR_IIA, 0);
break;
case ISAR_IIS_PSTEV:
ch = sel_bch_isar(isar, isar->iis >> 6);
if (ch) {
rcv_mbox(isar, NULL);
if (ch->bch.state == ISDN_P_B_MODEM_ASYNC)
isar_pump_statev_modem(ch, isar->cmsb);
else if (ch->bch.state == ISDN_P_B_T30_FAX)
isar_pump_statev_fax(ch, isar->cmsb);
else if (ch->bch.state == ISDN_P_B_RAW) {
int tt;
tt = isar->cmsb | 0x30;
if (tt == 0x3e)
tt = '*';
else if (tt == 0x3f)
tt = '#';
else if (tt > '9')
tt += 7;
tt |= DTMF_TONE_VAL;
_queue_data(&ch->bch.ch, PH_CONTROL_IND,
MISDN_ID_ANY, sizeof(tt), &tt,
GFP_ATOMIC);
} else
pr_debug("%s: ISAR IIS_PSTEV pm %d sta %x\n",
isar->name, ch->bch.state,
isar->cmsb);
} else {
pr_debug("%s: ISAR spurious IIS_PSTEV %x/%x/%x\n",
isar->name, isar->iis, isar->cmsb,
isar->clsb);
isar->write_reg(isar->hw, ISAR_IIA, 0);
}
break;
case ISAR_IIS_PSTRSP:
ch = sel_bch_isar(isar, isar->iis >> 6);
if (ch) {
rcv_mbox(isar, NULL);
isar_pump_status_rsp(ch);
} else {
pr_debug("%s: ISAR spurious IIS_PSTRSP %x/%x/%x\n",
isar->name, isar->iis, isar->cmsb,
isar->clsb);
isar->write_reg(isar->hw, ISAR_IIA, 0);
}
break;
case ISAR_IIS_DIAG:
case ISAR_IIS_BSTRSP:
case ISAR_IIS_IOM2RSP:
rcv_mbox(isar, NULL);
break;
case ISAR_IIS_INVMSG:
rcv_mbox(isar, NULL);
pr_debug("%s: invalid msg his:%x\n", isar->name, isar->cmsb);
break;
default:
rcv_mbox(isar, NULL);
pr_debug("%s: unhandled msg iis(%x) ctrl(%x/%x)\n",
isar->name, isar->iis, isar->cmsb, isar->clsb);
break;
}
}
EXPORT_SYMBOL(mISDNisar_irq);
static void
ftimer_handler(struct timer_list *t)
{
struct isar_ch *ch = from_timer(ch, t, ftimer);
pr_debug("%s: ftimer flags %lx\n", ch->is->name, ch->bch.Flags);
test_and_clear_bit(FLG_FTI_RUN, &ch->bch.Flags);
if (test_and_clear_bit(FLG_LL_CONN, &ch->bch.Flags))
deliver_status(ch, HW_MOD_CONNECT);
}
static void
setup_pump(struct isar_ch *ch) {
u8 dps = SET_DPS(ch->dpath);
u8 ctrl, param[6];
switch (ch->bch.state) {
case ISDN_P_NONE:
case ISDN_P_B_RAW:
case ISDN_P_B_HDLC:
send_mbox(ch->is, dps | ISAR_HIS_PUMPCFG, PMOD_BYPASS, 0, NULL);
break;
case ISDN_P_B_L2DTMF:
if (test_bit(FLG_DTMFSEND, &ch->bch.Flags)) {
param[0] = 5; /* TOA 5 db */
send_mbox(ch->is, dps | ISAR_HIS_PUMPCFG,
PMOD_DTMF_TRANS, 1, param);
} else {
param[0] = 40; /* REL -46 dbm */
send_mbox(ch->is, dps | ISAR_HIS_PUMPCFG,
PMOD_DTMF, 1, param);
}
fallthrough;
case ISDN_P_B_MODEM_ASYNC:
ctrl = PMOD_DATAMODEM;
if (test_bit(FLG_ORIGIN, &ch->bch.Flags)) {
ctrl |= PCTRL_ORIG;
param[5] = PV32P6_CTN;
} else {
param[5] = PV32P6_ATN;
}
param[0] = 6; /* 6 db */
param[1] = PV32P2_V23R | PV32P2_V22A | PV32P2_V22B |
PV32P2_V22C | PV32P2_V21 | PV32P2_BEL;
param[2] = PV32P3_AMOD | PV32P3_V32B | PV32P3_V23B;
param[3] = PV32P4_UT144;
param[4] = PV32P5_UT144;
send_mbox(ch->is, dps | ISAR_HIS_PUMPCFG, ctrl, 6, param);
break;
case ISDN_P_B_T30_FAX:
ctrl = PMOD_FAX;
if (test_bit(FLG_ORIGIN, &ch->bch.Flags)) {
ctrl |= PCTRL_ORIG;
param[1] = PFAXP2_CTN;
} else {
param[1] = PFAXP2_ATN;
}
param[0] = 6; /* 6 db */
send_mbox(ch->is, dps | ISAR_HIS_PUMPCFG, ctrl, 2, param);
ch->state = STFAX_NULL;
ch->newcmd = 0;
ch->newmod = 0;
test_and_set_bit(FLG_FTI_RUN, &ch->bch.Flags);
break;
}
udelay(1000);
send_mbox(ch->is, dps | ISAR_HIS_PSTREQ, 0, 0, NULL);
udelay(1000);
}
static void
setup_sart(struct isar_ch *ch) {
u8 dps = SET_DPS(ch->dpath);
u8 ctrl, param[2] = {0, 0};
switch (ch->bch.state) {
case ISDN_P_NONE:
send_mbox(ch->is, dps | ISAR_HIS_SARTCFG, SMODE_DISABLE,
0, NULL);
break;
case ISDN_P_B_RAW:
case ISDN_P_B_L2DTMF:
send_mbox(ch->is, dps | ISAR_HIS_SARTCFG, SMODE_BINARY,
2, param);
break;
case ISDN_P_B_HDLC:
case ISDN_P_B_T30_FAX:
send_mbox(ch->is, dps | ISAR_HIS_SARTCFG, SMODE_HDLC,
1, param);
break;
case ISDN_P_B_MODEM_ASYNC:
ctrl = SMODE_V14 | SCTRL_HDMC_BOTH;
param[0] = S_P1_CHS_8;
param[1] = S_P2_BFT_DEF;
send_mbox(ch->is, dps | ISAR_HIS_SARTCFG, ctrl, 2, param);
break;
}
udelay(1000);
send_mbox(ch->is, dps | ISAR_HIS_BSTREQ, 0, 0, NULL);
udelay(1000);
}
static void
setup_iom2(struct isar_ch *ch) {
u8 dps = SET_DPS(ch->dpath);
u8 cmsb = IOM_CTRL_ENA, msg[5] = {IOM_P1_TXD, 0, 0, 0, 0};
if (ch->bch.nr == 2) {
msg[1] = 1;
msg[3] = 1;
}
switch (ch->bch.state) {
case ISDN_P_NONE:
cmsb = 0;
/* dummy slot */
msg[1] = ch->dpath + 2;
msg[3] = ch->dpath + 2;
break;
case ISDN_P_B_RAW:
case ISDN_P_B_HDLC:
break;
case ISDN_P_B_MODEM_ASYNC:
case ISDN_P_B_T30_FAX:
cmsb |= IOM_CTRL_RCV;
fallthrough;
case ISDN_P_B_L2DTMF:
if (test_bit(FLG_DTMFSEND, &ch->bch.Flags))
cmsb |= IOM_CTRL_RCV;
cmsb |= IOM_CTRL_ALAW;
break;
}
send_mbox(ch->is, dps | ISAR_HIS_IOM2CFG, cmsb, 5, msg);
udelay(1000);
send_mbox(ch->is, dps | ISAR_HIS_IOM2REQ, 0, 0, NULL);
udelay(1000);
}
static int
modeisar(struct isar_ch *ch, u32 bprotocol)
{
/* Here we are selecting the best datapath for requested protocol */
if (ch->bch.state == ISDN_P_NONE) { /* New Setup */
switch (bprotocol) {
case ISDN_P_NONE: /* init */
if (!ch->dpath)
/* no init for dpath 0 */
return 0;
test_and_clear_bit(FLG_HDLC, &ch->bch.Flags);
test_and_clear_bit(FLG_TRANSPARENT, &ch->bch.Flags);
break;
case ISDN_P_B_RAW:
case ISDN_P_B_HDLC:
/* best is datapath 2 */
if (!test_and_set_bit(ISAR_DP2_USE, &ch->is->Flags))
ch->dpath = 2;
else if (!test_and_set_bit(ISAR_DP1_USE,
&ch->is->Flags))
ch->dpath = 1;
else {
pr_info("modeisar both paths in use\n");
return -EBUSY;
}
if (bprotocol == ISDN_P_B_HDLC)
test_and_set_bit(FLG_HDLC, &ch->bch.Flags);
else
test_and_set_bit(FLG_TRANSPARENT,
&ch->bch.Flags);
break;
case ISDN_P_B_MODEM_ASYNC:
case ISDN_P_B_T30_FAX:
case ISDN_P_B_L2DTMF:
/* only datapath 1 */
if (!test_and_set_bit(ISAR_DP1_USE, &ch->is->Flags))
ch->dpath = 1;
else {
pr_info("%s: ISAR modeisar analog functions"
"only with DP1\n", ch->is->name);
return -EBUSY;
}
break;
default:
pr_info("%s: protocol not known %x\n", ch->is->name,
bprotocol);
return -ENOPROTOOPT;
}
}
pr_debug("%s: ISAR ch%d dp%d protocol %x->%x\n", ch->is->name,
ch->bch.nr, ch->dpath, ch->bch.state, bprotocol);
ch->bch.state = bprotocol;
setup_pump(ch);
setup_iom2(ch);
setup_sart(ch);
if (ch->bch.state == ISDN_P_NONE) {
/* Clear resources */
if (ch->dpath == 1)
test_and_clear_bit(ISAR_DP1_USE, &ch->is->Flags);
else if (ch->dpath == 2)
test_and_clear_bit(ISAR_DP2_USE, &ch->is->Flags);
ch->dpath = 0;
ch->is->ctrl(ch->is->hw, HW_DEACT_IND, ch->bch.nr);
} else
ch->is->ctrl(ch->is->hw, HW_ACTIVATE_IND, ch->bch.nr);
return 0;
}
static void
isar_pump_cmd(struct isar_ch *ch, u32 cmd, u8 para)
{
u8 dps = SET_DPS(ch->dpath);
u8 ctrl = 0, nom = 0, p1 = 0;
pr_debug("%s: isar_pump_cmd %x/%x state(%x)\n",
ch->is->name, cmd, para, ch->bch.state);
switch (cmd) {
case HW_MOD_FTM:
if (ch->state == STFAX_READY) {
p1 = para;
ctrl = PCTRL_CMD_FTM;
nom = 1;
ch->state = STFAX_LINE;
ch->cmd = ctrl;
ch->mod = para;
ch->newmod = 0;
ch->newcmd = 0;
ch->try_mod = 3;
} else if ((ch->state == STFAX_ACTIV) &&
(ch->cmd == PCTRL_CMD_FTM) && (ch->mod == para))
deliver_status(ch, HW_MOD_CONNECT);
else {
ch->newmod = para;
ch->newcmd = PCTRL_CMD_FTM;
nom = 0;
ctrl = PCTRL_CMD_ESC;
ch->state = STFAX_ESCAPE;
}
break;
case HW_MOD_FTH:
if (ch->state == STFAX_READY) {
p1 = para;
ctrl = PCTRL_CMD_FTH;
nom = 1;
ch->state = STFAX_LINE;
ch->cmd = ctrl;
ch->mod = para;
ch->newmod = 0;
ch->newcmd = 0;
ch->try_mod = 3;
} else if ((ch->state == STFAX_ACTIV) &&
(ch->cmd == PCTRL_CMD_FTH) && (ch->mod == para))
deliver_status(ch, HW_MOD_CONNECT);
else {
ch->newmod = para;
ch->newcmd = PCTRL_CMD_FTH;
nom = 0;
ctrl = PCTRL_CMD_ESC;
ch->state = STFAX_ESCAPE;
}
break;
case HW_MOD_FRM:
if (ch->state == STFAX_READY) {
p1 = para;
ctrl = PCTRL_CMD_FRM;
nom = 1;
ch->state = STFAX_LINE;
ch->cmd = ctrl;
ch->mod = para;
ch->newmod = 0;
ch->newcmd = 0;
ch->try_mod = 3;
} else if ((ch->state == STFAX_ACTIV) &&
(ch->cmd == PCTRL_CMD_FRM) && (ch->mod == para))
deliver_status(ch, HW_MOD_CONNECT);
else {
ch->newmod = para;
ch->newcmd = PCTRL_CMD_FRM;
nom = 0;
ctrl = PCTRL_CMD_ESC;
ch->state = STFAX_ESCAPE;
}
break;
case HW_MOD_FRH:
if (ch->state == STFAX_READY) {
p1 = para;
ctrl = PCTRL_CMD_FRH;
nom = 1;
ch->state = STFAX_LINE;
ch->cmd = ctrl;
ch->mod = para;
ch->newmod = 0;
ch->newcmd = 0;
ch->try_mod = 3;
} else if ((ch->state == STFAX_ACTIV) &&
(ch->cmd == PCTRL_CMD_FRH) && (ch->mod == para))
deliver_status(ch, HW_MOD_CONNECT);
else {
ch->newmod = para;
ch->newcmd = PCTRL_CMD_FRH;
nom = 0;
ctrl = PCTRL_CMD_ESC;
ch->state = STFAX_ESCAPE;
}
break;
case PCTRL_CMD_TDTMF:
p1 = para;
nom = 1;
ctrl = PCTRL_CMD_TDTMF;
break;
}
if (ctrl)
send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL, ctrl, nom, &p1);
}
static void
isar_setup(struct isar_hw *isar)
{
u8 msg;
int i;
/* Dpath 1, 2 */
msg = 61;
for (i = 0; i < 2; i++) {
/* Buffer Config */
send_mbox(isar, (i ? ISAR_HIS_DPS2 : ISAR_HIS_DPS1) |
ISAR_HIS_P12CFG, 4, 1, &msg);
isar->ch[i].mml = msg;
isar->ch[i].bch.state = 0;
isar->ch[i].dpath = i + 1;
modeisar(&isar->ch[i], ISDN_P_NONE);
}
}
static int
isar_l2l1(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct bchannel *bch = container_of(ch, struct bchannel, ch);
struct isar_ch *ich = container_of(bch, struct isar_ch, bch);
int ret = -EINVAL;
struct mISDNhead *hh = mISDN_HEAD_P(skb);
u32 id, *val;
u_long flags;
switch (hh->prim) {
case PH_DATA_REQ:
spin_lock_irqsave(ich->is->hwlock, flags);
ret = bchannel_senddata(bch, skb);
if (ret > 0) { /* direct TX */
ret = 0;
isar_fill_fifo(ich);
}
spin_unlock_irqrestore(ich->is->hwlock, flags);
return ret;
case PH_ACTIVATE_REQ:
spin_lock_irqsave(ich->is->hwlock, flags);
if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
ret = modeisar(ich, ch->protocol);
else
ret = 0;
spin_unlock_irqrestore(ich->is->hwlock, flags);
if (!ret)
_queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
NULL, GFP_KERNEL);
break;
case PH_DEACTIVATE_REQ:
spin_lock_irqsave(ich->is->hwlock, flags);
mISDN_clear_bchannel(bch);
modeisar(ich, ISDN_P_NONE);
spin_unlock_irqrestore(ich->is->hwlock, flags);
_queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
NULL, GFP_KERNEL);
ret = 0;
break;
case PH_CONTROL_REQ:
val = (u32 *)skb->data;
pr_debug("%s: PH_CONTROL | REQUEST %x/%x\n", ich->is->name,
hh->id, *val);
if ((hh->id == 0) && ((*val & ~DTMF_TONE_MASK) ==
DTMF_TONE_VAL)) {
if (bch->state == ISDN_P_B_L2DTMF) {
char tt = *val & DTMF_TONE_MASK;
if (tt == '*')
tt = 0x1e;
else if (tt == '#')
tt = 0x1f;
else if (tt > '9')
tt -= 7;
tt &= 0x1f;
spin_lock_irqsave(ich->is->hwlock, flags);
isar_pump_cmd(ich, PCTRL_CMD_TDTMF, tt);
spin_unlock_irqrestore(ich->is->hwlock, flags);
} else {
pr_info("%s: DTMF send wrong protocol %x\n",
__func__, bch->state);
return -EINVAL;
}
} else if ((hh->id == HW_MOD_FRM) || (hh->id == HW_MOD_FRH) ||
(hh->id == HW_MOD_FTM) || (hh->id == HW_MOD_FTH)) {
for (id = 0; id < FAXMODCNT; id++)
if (faxmodulation[id] == *val)
break;
if ((FAXMODCNT > id) &&
test_bit(FLG_INITIALIZED, &bch->Flags)) {
pr_debug("%s: isar: new mod\n", ich->is->name);
isar_pump_cmd(ich, hh->id, *val);
ret = 0;
} else {
pr_info("%s: wrong modulation\n",
ich->is->name);
ret = -EINVAL;
}
} else if (hh->id == HW_MOD_LASTDATA)
test_and_set_bit(FLG_DLEETX, &bch->Flags);
else {
pr_info("%s: unknown PH_CONTROL_REQ %x\n",
ich->is->name, hh->id);
ret = -EINVAL;
}
fallthrough;
default:
pr_info("%s: %s unknown prim(%x,%x)\n",
ich->is->name, __func__, hh->prim, hh->id);
ret = -EINVAL;
}
if (!ret)
dev_kfree_skb(skb);
return ret;
}
static int
channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
{
return mISDN_ctrl_bchannel(bch, cq);
}
static int
isar_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
{
struct bchannel *bch = container_of(ch, struct bchannel, ch);
struct isar_ch *ich = container_of(bch, struct isar_ch, bch);
int ret = -EINVAL;
u_long flags;
pr_debug("%s: %s cmd:%x %p\n", ich->is->name, __func__, cmd, arg);
switch (cmd) {
case CLOSE_CHANNEL:
test_and_clear_bit(FLG_OPEN, &bch->Flags);
cancel_work_sync(&bch->workq);
spin_lock_irqsave(ich->is->hwlock, flags);
mISDN_clear_bchannel(bch);
modeisar(ich, ISDN_P_NONE);
spin_unlock_irqrestore(ich->is->hwlock, flags);
ch->protocol = ISDN_P_NONE;
ch->peer = NULL;
module_put(ich->is->owner);
ret = 0;
break;
case CONTROL_CHANNEL:
ret = channel_bctrl(bch, arg);
break;
default:
pr_info("%s: %s unknown prim(%x)\n",
ich->is->name, __func__, cmd);
}
return ret;
}
static void
free_isar(struct isar_hw *isar)
{
modeisar(&isar->ch[0], ISDN_P_NONE);
modeisar(&isar->ch[1], ISDN_P_NONE);
del_timer(&isar->ch[0].ftimer);
del_timer(&isar->ch[1].ftimer);
test_and_clear_bit(FLG_INITIALIZED, &isar->ch[0].bch.Flags);
test_and_clear_bit(FLG_INITIALIZED, &isar->ch[1].bch.Flags);
}
static int
init_isar(struct isar_hw *isar)
{
int cnt = 3;
while (cnt--) {
isar->version = ISARVersion(isar);
if (isar->ch[0].bch.debug & DEBUG_HW)
pr_notice("%s: Testing version %d (%d time)\n",
isar->name, isar->version, 3 - cnt);
if (isar->version == 1)
break;
isar->ctrl(isar->hw, HW_RESET_REQ, 0);
}
if (isar->version != 1)
return -EINVAL;
timer_setup(&isar->ch[0].ftimer, ftimer_handler, 0);
test_and_set_bit(FLG_INITIALIZED, &isar->ch[0].bch.Flags);
timer_setup(&isar->ch[1].ftimer, ftimer_handler, 0);
test_and_set_bit(FLG_INITIALIZED, &isar->ch[1].bch.Flags);
return 0;
}
static int
isar_open(struct isar_hw *isar, struct channel_req *rq)
{
struct bchannel *bch;
if (rq->adr.channel == 0 || rq->adr.channel > 2)
return -EINVAL;
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
bch = &isar->ch[rq->adr.channel - 1].bch;
if (test_and_set_bit(FLG_OPEN, &bch->Flags))
return -EBUSY; /* b-channel can be only open once */
bch->ch.protocol = rq->protocol;
rq->ch = &bch->ch;
return 0;
}
u32
mISDNisar_init(struct isar_hw *isar, void *hw)
{
u32 ret, i;
isar->hw = hw;
for (i = 0; i < 2; i++) {
isar->ch[i].bch.nr = i + 1;
mISDN_initbchannel(&isar->ch[i].bch, MAX_DATA_MEM, 32);
isar->ch[i].bch.ch.nr = i + 1;
isar->ch[i].bch.ch.send = &isar_l2l1;
isar->ch[i].bch.ch.ctrl = isar_bctrl;
isar->ch[i].bch.hw = hw;
isar->ch[i].is = isar;
}
isar->init = &init_isar;
isar->release = &free_isar;
isar->firmware = &load_firmware;
isar->open = &isar_open;
ret = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
(1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK)) |
(1 << (ISDN_P_B_L2DTMF & ISDN_P_B_MASK)) |
(1 << (ISDN_P_B_MODEM_ASYNC & ISDN_P_B_MASK)) |
(1 << (ISDN_P_B_T30_FAX & ISDN_P_B_MASK));
return ret;
}
EXPORT_SYMBOL(mISDNisar_init);
static int __init isar_mod_init(void)
{
pr_notice("mISDN: ISAR driver Rev. %s\n", ISAR_REV);
return 0;
}
static void __exit isar_mod_cleanup(void)
{
pr_notice("mISDN: ISAR module unloaded\n");
}
module_init(isar_mod_init);
module_exit(isar_mod_cleanup);
| linux-master | drivers/isdn/hardware/mISDN/mISDNisar.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* speedfax.c low level stuff for Sedlbauer Speedfax+ cards
* based on the ISAR DSP
* Thanks to Sedlbauer AG for informations and HW
*
* Author Karsten Keil <[email protected]>
*
* Copyright 2009 by Karsten Keil <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/mISDNhw.h>
#include <linux/firmware.h>
#include "ipac.h"
#include "isar.h"
#define SPEEDFAX_REV "2.0"
#define PCI_SUBVENDOR_SPEEDFAX_PYRAMID 0x51
#define PCI_SUBVENDOR_SPEEDFAX_PCI 0x54
#define PCI_SUB_ID_SEDLBAUER 0x01
#define SFAX_PCI_ADDR 0xc8
#define SFAX_PCI_ISAC 0xd0
#define SFAX_PCI_ISAR 0xe0
/* TIGER 100 Registers */
#define TIGER_RESET_ADDR 0x00
#define TIGER_EXTERN_RESET_ON 0x01
#define TIGER_EXTERN_RESET_OFF 0x00
#define TIGER_AUX_CTRL 0x02
#define TIGER_AUX_DATA 0x03
#define TIGER_AUX_IRQMASK 0x05
#define TIGER_AUX_STATUS 0x07
/* Tiger AUX BITs */
#define SFAX_AUX_IOMASK 0xdd /* 1 and 5 are inputs */
#define SFAX_ISAR_RESET_BIT_OFF 0x00
#define SFAX_ISAR_RESET_BIT_ON 0x01
#define SFAX_TIGER_IRQ_BIT 0x02
#define SFAX_LED1_BIT 0x08
#define SFAX_LED2_BIT 0x10
#define SFAX_PCI_RESET_ON (SFAX_ISAR_RESET_BIT_ON)
#define SFAX_PCI_RESET_OFF (SFAX_LED1_BIT | SFAX_LED2_BIT)
static int sfax_cnt;
static u32 debug;
static u32 irqloops = 4;
struct sfax_hw {
struct list_head list;
struct pci_dev *pdev;
char name[MISDN_MAX_IDLEN];
u32 irq;
u32 irqcnt;
u32 cfg;
struct _ioport p_isac;
struct _ioport p_isar;
u8 aux_data;
spinlock_t lock; /* HW access lock */
struct isac_hw isac;
struct isar_hw isar;
};
static LIST_HEAD(Cards);
static DEFINE_RWLOCK(card_lock); /* protect Cards */
static void
_set_debug(struct sfax_hw *card)
{
card->isac.dch.debug = debug;
card->isar.ch[0].bch.debug = debug;
card->isar.ch[1].bch.debug = debug;
}
static int
set_debug(const char *val, const struct kernel_param *kp)
{
int ret;
struct sfax_hw *card;
ret = param_set_uint(val, kp);
if (!ret) {
read_lock(&card_lock);
list_for_each_entry(card, &Cards, list)
_set_debug(card);
read_unlock(&card_lock);
}
return ret;
}
MODULE_AUTHOR("Karsten Keil");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(SPEEDFAX_REV);
MODULE_FIRMWARE("isdn/ISAR.BIN");
module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Speedfax debug mask");
module_param(irqloops, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(irqloops, "Speedfax maximal irqloops (default 4)");
IOFUNC_IND(ISAC, sfax_hw, p_isac)
IOFUNC_IND(ISAR, sfax_hw, p_isar)
static irqreturn_t
speedfax_irq(int intno, void *dev_id)
{
struct sfax_hw *sf = dev_id;
u8 val;
int cnt = irqloops;
spin_lock(&sf->lock);
val = inb(sf->cfg + TIGER_AUX_STATUS);
if (val & SFAX_TIGER_IRQ_BIT) { /* for us or shared ? */
spin_unlock(&sf->lock);
return IRQ_NONE; /* shared */
}
sf->irqcnt++;
val = ReadISAR_IND(sf, ISAR_IRQBIT);
Start_ISAR:
if (val & ISAR_IRQSTA)
mISDNisar_irq(&sf->isar);
val = ReadISAC_IND(sf, ISAC_ISTA);
if (val)
mISDNisac_irq(&sf->isac, val);
val = ReadISAR_IND(sf, ISAR_IRQBIT);
if ((val & ISAR_IRQSTA) && cnt--)
goto Start_ISAR;
if (cnt < irqloops)
pr_debug("%s: %d irqloops cpu%d\n", sf->name,
irqloops - cnt, smp_processor_id());
if (irqloops && !cnt)
pr_notice("%s: %d IRQ LOOP cpu%d\n", sf->name,
irqloops, smp_processor_id());
spin_unlock(&sf->lock);
return IRQ_HANDLED;
}
static void
enable_hwirq(struct sfax_hw *sf)
{
WriteISAC_IND(sf, ISAC_MASK, 0);
WriteISAR_IND(sf, ISAR_IRQBIT, ISAR_IRQMSK);
outb(SFAX_TIGER_IRQ_BIT, sf->cfg + TIGER_AUX_IRQMASK);
}
static void
disable_hwirq(struct sfax_hw *sf)
{
WriteISAC_IND(sf, ISAC_MASK, 0xFF);
WriteISAR_IND(sf, ISAR_IRQBIT, 0);
outb(0, sf->cfg + TIGER_AUX_IRQMASK);
}
static void
reset_speedfax(struct sfax_hw *sf)
{
pr_debug("%s: resetting card\n", sf->name);
outb(TIGER_EXTERN_RESET_ON, sf->cfg + TIGER_RESET_ADDR);
outb(SFAX_PCI_RESET_ON, sf->cfg + TIGER_AUX_DATA);
mdelay(1);
outb(TIGER_EXTERN_RESET_OFF, sf->cfg + TIGER_RESET_ADDR);
sf->aux_data = SFAX_PCI_RESET_OFF;
outb(sf->aux_data, sf->cfg + TIGER_AUX_DATA);
mdelay(1);
}
static int
sfax_ctrl(struct sfax_hw *sf, u32 cmd, u_long arg)
{
int ret = 0;
switch (cmd) {
case HW_RESET_REQ:
reset_speedfax(sf);
break;
case HW_ACTIVATE_IND:
if (arg & 1)
sf->aux_data &= ~SFAX_LED1_BIT;
if (arg & 2)
sf->aux_data &= ~SFAX_LED2_BIT;
outb(sf->aux_data, sf->cfg + TIGER_AUX_DATA);
break;
case HW_DEACT_IND:
if (arg & 1)
sf->aux_data |= SFAX_LED1_BIT;
if (arg & 2)
sf->aux_data |= SFAX_LED2_BIT;
outb(sf->aux_data, sf->cfg + TIGER_AUX_DATA);
break;
default:
pr_info("%s: %s unknown command %x %lx\n",
sf->name, __func__, cmd, arg);
ret = -EINVAL;
break;
}
return ret;
}
static int
channel_ctrl(struct sfax_hw *sf, struct mISDN_ctrl_req *cq)
{
int ret = 0;
switch (cq->op) {
case MISDN_CTRL_GETOP:
cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
break;
case MISDN_CTRL_LOOP:
/* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
if (cq->channel < 0 || cq->channel > 3) {
ret = -EINVAL;
break;
}
ret = sf->isac.ctrl(&sf->isac, HW_TESTLOOP, cq->channel);
break;
case MISDN_CTRL_L1_TIMER3:
ret = sf->isac.ctrl(&sf->isac, HW_TIMER3_VALUE, cq->p1);
break;
default:
pr_info("%s: unknown Op %x\n", sf->name, cq->op);
ret = -EINVAL;
break;
}
return ret;
}
static int
sfax_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
{
struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
struct dchannel *dch = container_of(dev, struct dchannel, dev);
struct sfax_hw *sf = dch->hw;
struct channel_req *rq;
int err = 0;
pr_debug("%s: cmd:%x %p\n", sf->name, cmd, arg);
switch (cmd) {
case OPEN_CHANNEL:
rq = arg;
if (rq->protocol == ISDN_P_TE_S0)
err = sf->isac.open(&sf->isac, rq);
else
err = sf->isar.open(&sf->isar, rq);
if (err)
break;
if (!try_module_get(THIS_MODULE))
pr_info("%s: cannot get module\n", sf->name);
break;
case CLOSE_CHANNEL:
pr_debug("%s: dev(%d) close from %p\n", sf->name,
dch->dev.id, __builtin_return_address(0));
module_put(THIS_MODULE);
break;
case CONTROL_CHANNEL:
err = channel_ctrl(sf, arg);
break;
default:
pr_debug("%s: unknown command %x\n", sf->name, cmd);
return -EINVAL;
}
return err;
}
static int
init_card(struct sfax_hw *sf)
{
int ret, cnt = 3;
u_long flags;
ret = request_irq(sf->irq, speedfax_irq, IRQF_SHARED, sf->name, sf);
if (ret) {
pr_info("%s: couldn't get interrupt %d\n", sf->name, sf->irq);
return ret;
}
while (cnt--) {
spin_lock_irqsave(&sf->lock, flags);
ret = sf->isac.init(&sf->isac);
if (ret) {
spin_unlock_irqrestore(&sf->lock, flags);
pr_info("%s: ISAC init failed with %d\n",
sf->name, ret);
break;
}
enable_hwirq(sf);
/* RESET Receiver and Transmitter */
WriteISAC_IND(sf, ISAC_CMDR, 0x41);
spin_unlock_irqrestore(&sf->lock, flags);
msleep_interruptible(10);
if (debug & DEBUG_HW)
pr_notice("%s: IRQ %d count %d\n", sf->name,
sf->irq, sf->irqcnt);
if (!sf->irqcnt) {
pr_info("%s: IRQ(%d) got no requests during init %d\n",
sf->name, sf->irq, 3 - cnt);
} else
return 0;
}
free_irq(sf->irq, sf);
return -EIO;
}
static int
setup_speedfax(struct sfax_hw *sf)
{
u_long flags;
if (!request_region(sf->cfg, 256, sf->name)) {
pr_info("mISDN: %s config port %x-%x already in use\n",
sf->name, sf->cfg, sf->cfg + 255);
return -EIO;
}
outb(0xff, sf->cfg);
outb(0, sf->cfg);
outb(0xdd, sf->cfg + TIGER_AUX_CTRL);
outb(0, sf->cfg + TIGER_AUX_IRQMASK);
sf->isac.type = IPAC_TYPE_ISAC;
sf->p_isac.ale = sf->cfg + SFAX_PCI_ADDR;
sf->p_isac.port = sf->cfg + SFAX_PCI_ISAC;
sf->p_isar.ale = sf->cfg + SFAX_PCI_ADDR;
sf->p_isar.port = sf->cfg + SFAX_PCI_ISAR;
ASSIGN_FUNC(IND, ISAC, sf->isac);
ASSIGN_FUNC(IND, ISAR, sf->isar);
spin_lock_irqsave(&sf->lock, flags);
reset_speedfax(sf);
disable_hwirq(sf);
spin_unlock_irqrestore(&sf->lock, flags);
return 0;
}
static void
release_card(struct sfax_hw *card) {
u_long flags;
spin_lock_irqsave(&card->lock, flags);
disable_hwirq(card);
spin_unlock_irqrestore(&card->lock, flags);
card->isac.release(&card->isac);
free_irq(card->irq, card);
card->isar.release(&card->isar);
mISDN_unregister_device(&card->isac.dch.dev);
release_region(card->cfg, 256);
pci_disable_device(card->pdev);
pci_set_drvdata(card->pdev, NULL);
write_lock_irqsave(&card_lock, flags);
list_del(&card->list);
write_unlock_irqrestore(&card_lock, flags);
kfree(card);
sfax_cnt--;
}
static int
setup_instance(struct sfax_hw *card)
{
const struct firmware *firmware;
int i, err;
u_long flags;
snprintf(card->name, MISDN_MAX_IDLEN - 1, "Speedfax.%d", sfax_cnt + 1);
write_lock_irqsave(&card_lock, flags);
list_add_tail(&card->list, &Cards);
write_unlock_irqrestore(&card_lock, flags);
_set_debug(card);
spin_lock_init(&card->lock);
card->isac.hwlock = &card->lock;
card->isar.hwlock = &card->lock;
card->isar.ctrl = (void *)&sfax_ctrl;
card->isac.name = card->name;
card->isar.name = card->name;
card->isar.owner = THIS_MODULE;
err = request_firmware(&firmware, "isdn/ISAR.BIN", &card->pdev->dev);
if (err < 0) {
pr_info("%s: firmware request failed %d\n",
card->name, err);
goto error_fw;
}
if (debug & DEBUG_HW)
pr_notice("%s: got firmware %zu bytes\n",
card->name, firmware->size);
mISDNisac_init(&card->isac, card);
card->isac.dch.dev.D.ctrl = sfax_dctrl;
card->isac.dch.dev.Bprotocols =
mISDNisar_init(&card->isar, card);
for (i = 0; i < 2; i++) {
set_channelmap(i + 1, card->isac.dch.dev.channelmap);
list_add(&card->isar.ch[i].bch.ch.list,
&card->isac.dch.dev.bchannels);
}
err = setup_speedfax(card);
if (err)
goto error_setup;
err = card->isar.init(&card->isar);
if (err)
goto error;
err = mISDN_register_device(&card->isac.dch.dev,
&card->pdev->dev, card->name);
if (err)
goto error;
err = init_card(card);
if (err)
goto error_init;
err = card->isar.firmware(&card->isar, firmware->data, firmware->size);
if (!err) {
release_firmware(firmware);
sfax_cnt++;
pr_notice("SpeedFax %d cards installed\n", sfax_cnt);
return 0;
}
disable_hwirq(card);
free_irq(card->irq, card);
error_init:
mISDN_unregister_device(&card->isac.dch.dev);
error:
release_region(card->cfg, 256);
error_setup:
card->isac.release(&card->isac);
card->isar.release(&card->isar);
release_firmware(firmware);
error_fw:
pci_disable_device(card->pdev);
write_lock_irqsave(&card_lock, flags);
list_del(&card->list);
write_unlock_irqrestore(&card_lock, flags);
kfree(card);
return err;
}
static int
sfaxpci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -ENOMEM;
struct sfax_hw *card = kzalloc(sizeof(struct sfax_hw), GFP_KERNEL);
if (!card) {
pr_info("No memory for Speedfax+ PCI\n");
return err;
}
card->pdev = pdev;
err = pci_enable_device(pdev);
if (err) {
kfree(card);
return err;
}
pr_notice("mISDN: Speedfax found adapter %s at %s\n",
(char *)ent->driver_data, pci_name(pdev));
card->cfg = pci_resource_start(pdev, 0);
card->irq = pdev->irq;
pci_set_drvdata(pdev, card);
err = setup_instance(card);
if (err)
pci_set_drvdata(pdev, NULL);
return err;
}
static void
sfax_remove_pci(struct pci_dev *pdev)
{
struct sfax_hw *card = pci_get_drvdata(pdev);
if (card)
release_card(card);
else
pr_debug("%s: drvdata already removed\n", __func__);
}
static struct pci_device_id sfaxpci_ids[] = {
{ PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_100,
PCI_SUBVENDOR_SPEEDFAX_PYRAMID, PCI_SUB_ID_SEDLBAUER,
0, 0, (unsigned long) "Pyramid Speedfax + PCI"
},
{ PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_100,
PCI_SUBVENDOR_SPEEDFAX_PCI, PCI_SUB_ID_SEDLBAUER,
0, 0, (unsigned long) "Sedlbauer Speedfax + PCI"
},
{ }
};
MODULE_DEVICE_TABLE(pci, sfaxpci_ids);
static struct pci_driver sfaxpci_driver = {
.name = "speedfax+ pci",
.probe = sfaxpci_probe,
.remove = sfax_remove_pci,
.id_table = sfaxpci_ids,
};
static int __init
Speedfax_init(void)
{
int err;
pr_notice("Sedlbauer Speedfax+ Driver Rev. %s\n",
SPEEDFAX_REV);
err = pci_register_driver(&sfaxpci_driver);
return err;
}
static void __exit
Speedfax_cleanup(void)
{
pci_unregister_driver(&sfaxpci_driver);
}
module_init(Speedfax_init);
module_exit(Speedfax_cleanup);
| linux-master | drivers/isdn/hardware/mISDN/speedfax.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* hfcpci.c low level driver for CCD's hfc-pci based cards
*
* Author Werner Cornelius ([email protected])
* based on existing driver for CCD hfc ISA cards
* type approval valid for HFC-S PCI A based card
*
* Copyright 1999 by Werner Cornelius ([email protected])
* Copyright 2008 by Karsten Keil <[email protected]>
*
* Module options:
*
* debug:
* NOTE: only one poll value must be given for all cards
* See hfc_pci.h for debug flags.
*
* poll:
* NOTE: only one poll value must be given for all cards
* Give the number of samples for each fifo process.
* By default 128 is used. Decrease to reduce delay, increase to
* reduce cpu load. If unsure, don't mess with it!
* A value of 128 will use controller's interrupt. Other values will
* use kernel timer, because the controller will not allow lower values
* than 128.
* Also note that the value depends on the kernel timer frequency.
* If kernel uses a frequency of 1000 Hz, steps of 8 samples are possible.
* If the kernel uses 100 Hz, steps of 80 samples are possible.
* If the kernel uses 300 Hz, steps of about 26 samples are possible.
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/mISDNhw.h>
#include <linux/slab.h>
#include "hfc_pci.h"
static const char *hfcpci_revision = "2.0";
static int HFC_cnt;
static uint debug;
static uint poll, tics;
static struct timer_list hfc_tl;
static unsigned long hfc_jiffies;
MODULE_AUTHOR("Karsten Keil");
MODULE_LICENSE("GPL");
module_param(debug, uint, S_IRUGO | S_IWUSR);
module_param(poll, uint, S_IRUGO | S_IWUSR);
enum {
HFC_CCD_2BD0,
HFC_CCD_B000,
HFC_CCD_B006,
HFC_CCD_B007,
HFC_CCD_B008,
HFC_CCD_B009,
HFC_CCD_B00A,
HFC_CCD_B00B,
HFC_CCD_B00C,
HFC_CCD_B100,
HFC_CCD_B700,
HFC_CCD_B701,
HFC_ASUS_0675,
HFC_BERKOM_A1T,
HFC_BERKOM_TCONCEPT,
HFC_ANIGMA_MC145575,
HFC_ZOLTRIX_2BD0,
HFC_DIGI_DF_M_IOM2_E,
HFC_DIGI_DF_M_E,
HFC_DIGI_DF_M_IOM2_A,
HFC_DIGI_DF_M_A,
HFC_ABOCOM_2BD1,
HFC_SITECOM_DC105V2,
};
struct hfcPCI_hw {
unsigned char cirm;
unsigned char ctmt;
unsigned char clkdel;
unsigned char states;
unsigned char conn;
unsigned char mst_m;
unsigned char int_m1;
unsigned char int_m2;
unsigned char sctrl;
unsigned char sctrl_r;
unsigned char sctrl_e;
unsigned char trm;
unsigned char fifo_en;
unsigned char bswapped;
unsigned char protocol;
int nt_timer;
unsigned char __iomem *pci_io; /* start of PCI IO memory */
dma_addr_t dmahandle;
void *fifos; /* FIFO memory */
int last_bfifo_cnt[2];
/* marker saving last b-fifo frame count */
struct timer_list timer;
};
#define HFC_CFG_MASTER 1
#define HFC_CFG_SLAVE 2
#define HFC_CFG_PCM 3
#define HFC_CFG_2HFC 4
#define HFC_CFG_SLAVEHFC 5
#define HFC_CFG_NEG_F0 6
#define HFC_CFG_SW_DD_DU 7
#define FLG_HFC_TIMER_T1 16
#define FLG_HFC_TIMER_T3 17
#define NT_T1_COUNT 1120 /* number of 3.125ms interrupts (3.5s) */
#define NT_T3_COUNT 31 /* number of 3.125ms interrupts (97 ms) */
#define CLKDEL_TE 0x0e /* CLKDEL in TE mode */
#define CLKDEL_NT 0x6c /* CLKDEL in NT mode */
struct hfc_pci {
u_char subtype;
u_char chanlimit;
u_char initdone;
u_long cfg;
u_int irq;
u_int irqcnt;
struct pci_dev *pdev;
struct hfcPCI_hw hw;
spinlock_t lock; /* card lock */
struct dchannel dch;
struct bchannel bch[2];
};
/* Interface functions */
static void
enable_hwirq(struct hfc_pci *hc)
{
hc->hw.int_m2 |= HFCPCI_IRQ_ENABLE;
Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2);
}
static void
disable_hwirq(struct hfc_pci *hc)
{
hc->hw.int_m2 &= ~((u_char)HFCPCI_IRQ_ENABLE);
Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2);
}
/*
* free hardware resources used by driver
*/
static void
release_io_hfcpci(struct hfc_pci *hc)
{
/* disable memory mapped ports + busmaster */
pci_write_config_word(hc->pdev, PCI_COMMAND, 0);
del_timer(&hc->hw.timer);
dma_free_coherent(&hc->pdev->dev, 0x8000, hc->hw.fifos,
hc->hw.dmahandle);
iounmap(hc->hw.pci_io);
}
/*
* set mode (NT or TE)
*/
static void
hfcpci_setmode(struct hfc_pci *hc)
{
if (hc->hw.protocol == ISDN_P_NT_S0) {
hc->hw.clkdel = CLKDEL_NT; /* ST-Bit delay for NT-Mode */
hc->hw.sctrl |= SCTRL_MODE_NT; /* NT-MODE */
hc->hw.states = 1; /* G1 */
} else {
hc->hw.clkdel = CLKDEL_TE; /* ST-Bit delay for TE-Mode */
hc->hw.sctrl &= ~SCTRL_MODE_NT; /* TE-MODE */
hc->hw.states = 2; /* F2 */
}
Write_hfc(hc, HFCPCI_CLKDEL, hc->hw.clkdel);
Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | hc->hw.states);
udelay(10);
Write_hfc(hc, HFCPCI_STATES, hc->hw.states | 0x40); /* Deactivate */
Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl);
}
/*
* function called to reset the HFC PCI chip. A complete software reset of chip
* and fifos is done.
*/
static void
reset_hfcpci(struct hfc_pci *hc)
{
u_char val;
int cnt = 0;
printk(KERN_DEBUG "reset_hfcpci: entered\n");
val = Read_hfc(hc, HFCPCI_CHIP_ID);
printk(KERN_INFO "HFC_PCI: resetting HFC ChipId(%x)\n", val);
/* enable memory mapped ports, disable busmaster */
pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
disable_hwirq(hc);
/* enable memory ports + busmaster */
pci_write_config_word(hc->pdev, PCI_COMMAND,
PCI_ENA_MEMIO + PCI_ENA_MASTER);
val = Read_hfc(hc, HFCPCI_STATUS);
printk(KERN_DEBUG "HFC-PCI status(%x) before reset\n", val);
hc->hw.cirm = HFCPCI_RESET; /* Reset On */
Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
set_current_state(TASK_UNINTERRUPTIBLE);
mdelay(10); /* Timeout 10ms */
hc->hw.cirm = 0; /* Reset Off */
Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
val = Read_hfc(hc, HFCPCI_STATUS);
printk(KERN_DEBUG "HFC-PCI status(%x) after reset\n", val);
while (cnt < 50000) { /* max 50000 us */
udelay(5);
cnt += 5;
val = Read_hfc(hc, HFCPCI_STATUS);
if (!(val & 2))
break;
}
printk(KERN_DEBUG "HFC-PCI status(%x) after %dus\n", val, cnt);
hc->hw.fifo_en = 0x30; /* only D fifos enabled */
hc->hw.bswapped = 0; /* no exchange */
hc->hw.ctmt = HFCPCI_TIM3_125 | HFCPCI_AUTO_TIMER;
hc->hw.trm = HFCPCI_BTRANS_THRESMASK; /* no echo connect , threshold */
hc->hw.sctrl = 0x40; /* set tx_lo mode, error in datasheet ! */
hc->hw.sctrl_r = 0;
hc->hw.sctrl_e = HFCPCI_AUTO_AWAKE; /* S/T Auto awake */
hc->hw.mst_m = 0;
if (test_bit(HFC_CFG_MASTER, &hc->cfg))
hc->hw.mst_m |= HFCPCI_MASTER; /* HFC Master Mode */
if (test_bit(HFC_CFG_NEG_F0, &hc->cfg))
hc->hw.mst_m |= HFCPCI_F0_NEGATIV;
Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e);
Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
hc->hw.int_m1 = HFCPCI_INTS_DTRANS | HFCPCI_INTS_DREC |
HFCPCI_INTS_L1STATE | HFCPCI_INTS_TIMER;
Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
/* Clear already pending ints */
val = Read_hfc(hc, HFCPCI_INT_S1);
/* set NT/TE mode */
hfcpci_setmode(hc);
Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
/*
* Init GCI/IOM2 in master mode
* Slots 0 and 1 are set for B-chan 1 and 2
* D- and monitor/CI channel are not enabled
* STIO1 is used as output for data, B1+B2 from ST->IOM+HFC
* STIO2 is used as data input, B1+B2 from IOM->ST
* ST B-channel send disabled -> continuous 1s
* The IOM slots are always enabled
*/
if (test_bit(HFC_CFG_PCM, &hc->cfg)) {
/* set data flow directions: connect B1,B2: HFC to/from PCM */
hc->hw.conn = 0x09;
} else {
hc->hw.conn = 0x36; /* set data flow directions */
if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) {
Write_hfc(hc, HFCPCI_B1_SSL, 0xC0);
Write_hfc(hc, HFCPCI_B2_SSL, 0xC1);
Write_hfc(hc, HFCPCI_B1_RSL, 0xC0);
Write_hfc(hc, HFCPCI_B2_RSL, 0xC1);
} else {
Write_hfc(hc, HFCPCI_B1_SSL, 0x80);
Write_hfc(hc, HFCPCI_B2_SSL, 0x81);
Write_hfc(hc, HFCPCI_B1_RSL, 0x80);
Write_hfc(hc, HFCPCI_B2_RSL, 0x81);
}
}
Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
val = Read_hfc(hc, HFCPCI_INT_S2);
}
/*
* Timer function called when kernel timer expires
*/
static void
hfcpci_Timer(struct timer_list *t)
{
struct hfc_pci *hc = from_timer(hc, t, hw.timer);
hc->hw.timer.expires = jiffies + 75;
/* WD RESET */
/*
* WriteReg(hc, HFCD_DATA, HFCD_CTMT, hc->hw.ctmt | 0x80);
* add_timer(&hc->hw.timer);
*/
}
/*
* select a b-channel entry matching and active
*/
static struct bchannel *
Sel_BCS(struct hfc_pci *hc, int channel)
{
if (test_bit(FLG_ACTIVE, &hc->bch[0].Flags) &&
(hc->bch[0].nr & channel))
return &hc->bch[0];
else if (test_bit(FLG_ACTIVE, &hc->bch[1].Flags) &&
(hc->bch[1].nr & channel))
return &hc->bch[1];
else
return NULL;
}
/*
* clear the desired B-channel rx fifo
*/
static void
hfcpci_clear_fifo_rx(struct hfc_pci *hc, int fifo)
{
u_char fifo_state;
struct bzfifo *bzr;
if (fifo) {
bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2RX;
} else {
bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1RX;
}
if (fifo_state)
hc->hw.fifo_en ^= fifo_state;
Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
hc->hw.last_bfifo_cnt[fifo] = 0;
bzr->f1 = MAX_B_FRAMES;
bzr->f2 = bzr->f1; /* init F pointers to remain constant */
bzr->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
bzr->za[MAX_B_FRAMES].z2 = cpu_to_le16(
le16_to_cpu(bzr->za[MAX_B_FRAMES].z1));
if (fifo_state)
hc->hw.fifo_en |= fifo_state;
Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
}
/*
* clear the desired B-channel tx fifo
*/
static void hfcpci_clear_fifo_tx(struct hfc_pci *hc, int fifo)
{
u_char fifo_state;
struct bzfifo *bzt;
if (fifo) {
bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2TX;
} else {
bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1TX;
}
if (fifo_state)
hc->hw.fifo_en ^= fifo_state;
Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL)
printk(KERN_DEBUG "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) "
"z1(%x) z2(%x) state(%x)\n",
fifo, bzt->f1, bzt->f2,
le16_to_cpu(bzt->za[MAX_B_FRAMES].z1),
le16_to_cpu(bzt->za[MAX_B_FRAMES].z2),
fifo_state);
bzt->f2 = MAX_B_FRAMES;
bzt->f1 = bzt->f2; /* init F pointers to remain constant */
bzt->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
bzt->za[MAX_B_FRAMES].z2 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 2);
if (fifo_state)
hc->hw.fifo_en |= fifo_state;
Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL)
printk(KERN_DEBUG
"hfcpci_clear_fifo_tx%d f1(%x) f2(%x) z1(%x) z2(%x)\n",
fifo, bzt->f1, bzt->f2,
le16_to_cpu(bzt->za[MAX_B_FRAMES].z1),
le16_to_cpu(bzt->za[MAX_B_FRAMES].z2));
}
/*
* read a complete B-frame out of the buffer
*/
static void
hfcpci_empty_bfifo(struct bchannel *bch, struct bzfifo *bz,
u_char *bdata, int count)
{
u_char *ptr, *ptr1, new_f2;
int maxlen, new_z2;
struct zt *zp;
if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
printk(KERN_DEBUG "hfcpci_empty_fifo\n");
zp = &bz->za[bz->f2]; /* point to Z-Regs */
new_z2 = le16_to_cpu(zp->z2) + count; /* new position in fifo */
if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
new_z2 -= B_FIFO_SIZE; /* buffer wrap */
new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
if ((count > MAX_DATA_SIZE + 3) || (count < 4) ||
(*(bdata + (le16_to_cpu(zp->z1) - B_SUB_VAL)))) {
if (bch->debug & DEBUG_HW)
printk(KERN_DEBUG "hfcpci_empty_fifo: incoming packet "
"invalid length %d or crc\n", count);
#ifdef ERROR_STATISTIC
bch->err_inv++;
#endif
bz->za[new_f2].z2 = cpu_to_le16(new_z2);
bz->f2 = new_f2; /* next buffer */
} else {
bch->rx_skb = mI_alloc_skb(count - 3, GFP_ATOMIC);
if (!bch->rx_skb) {
printk(KERN_WARNING "HFCPCI: receive out of memory\n");
return;
}
count -= 3;
ptr = skb_put(bch->rx_skb, count);
if (le16_to_cpu(zp->z2) + count <= B_FIFO_SIZE + B_SUB_VAL)
maxlen = count; /* complete transfer */
else
maxlen = B_FIFO_SIZE + B_SUB_VAL -
le16_to_cpu(zp->z2); /* maximum */
ptr1 = bdata + (le16_to_cpu(zp->z2) - B_SUB_VAL);
/* start of data */
memcpy(ptr, ptr1, maxlen); /* copy data */
count -= maxlen;
if (count) { /* rest remaining */
ptr += maxlen;
ptr1 = bdata; /* start of buffer */
memcpy(ptr, ptr1, count); /* rest */
}
bz->za[new_f2].z2 = cpu_to_le16(new_z2);
bz->f2 = new_f2; /* next buffer */
recv_Bchannel(bch, MISDN_ID_ANY, false);
}
}
/*
* D-channel receive procedure
*/
static int
receive_dmsg(struct hfc_pci *hc)
{
struct dchannel *dch = &hc->dch;
int maxlen;
int rcnt, total;
int count = 5;
u_char *ptr, *ptr1;
struct dfifo *df;
struct zt *zp;
df = &((union fifo_area *)(hc->hw.fifos))->d_chan.d_rx;
while (((df->f1 & D_FREG_MASK) != (df->f2 & D_FREG_MASK)) && count--) {
zp = &df->za[df->f2 & D_FREG_MASK];
rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2);
if (rcnt < 0)
rcnt += D_FIFO_SIZE;
rcnt++;
if (dch->debug & DEBUG_HW_DCHANNEL)
printk(KERN_DEBUG
"hfcpci recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)\n",
df->f1, df->f2,
le16_to_cpu(zp->z1),
le16_to_cpu(zp->z2),
rcnt);
if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) ||
(df->data[le16_to_cpu(zp->z1)])) {
if (dch->debug & DEBUG_HW)
printk(KERN_DEBUG
"empty_fifo hfcpci packet inv. len "
"%d or crc %d\n",
rcnt,
df->data[le16_to_cpu(zp->z1)]);
#ifdef ERROR_STATISTIC
cs->err_rx++;
#endif
df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
(MAX_D_FRAMES + 1); /* next buffer */
df->za[df->f2 & D_FREG_MASK].z2 =
cpu_to_le16((le16_to_cpu(zp->z2) + rcnt) &
(D_FIFO_SIZE - 1));
} else {
dch->rx_skb = mI_alloc_skb(rcnt - 3, GFP_ATOMIC);
if (!dch->rx_skb) {
printk(KERN_WARNING
"HFC-PCI: D receive out of memory\n");
break;
}
total = rcnt;
rcnt -= 3;
ptr = skb_put(dch->rx_skb, rcnt);
if (le16_to_cpu(zp->z2) + rcnt <= D_FIFO_SIZE)
maxlen = rcnt; /* complete transfer */
else
maxlen = D_FIFO_SIZE - le16_to_cpu(zp->z2);
/* maximum */
ptr1 = df->data + le16_to_cpu(zp->z2);
/* start of data */
memcpy(ptr, ptr1, maxlen); /* copy data */
rcnt -= maxlen;
if (rcnt) { /* rest remaining */
ptr += maxlen;
ptr1 = df->data; /* start of buffer */
memcpy(ptr, ptr1, rcnt); /* rest */
}
df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
(MAX_D_FRAMES + 1); /* next buffer */
df->za[df->f2 & D_FREG_MASK].z2 = cpu_to_le16((
le16_to_cpu(zp->z2) + total) & (D_FIFO_SIZE - 1));
recv_Dchannel(dch);
}
}
return 1;
}
/*
* check for transparent receive data and read max one 'poll' size if avail
*/
static void
hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *rxbz,
struct bzfifo *txbz, u_char *bdata)
{
__le16 *z1r, *z2r, *z1t, *z2t;
int new_z2, fcnt_rx, fcnt_tx, maxlen;
u_char *ptr, *ptr1;
z1r = &rxbz->za[MAX_B_FRAMES].z1; /* pointer to z reg */
z2r = z1r + 1;
z1t = &txbz->za[MAX_B_FRAMES].z1;
z2t = z1t + 1;
fcnt_rx = le16_to_cpu(*z1r) - le16_to_cpu(*z2r);
if (!fcnt_rx)
return; /* no data avail */
if (fcnt_rx <= 0)
fcnt_rx += B_FIFO_SIZE; /* bytes actually buffered */
new_z2 = le16_to_cpu(*z2r) + fcnt_rx; /* new position in fifo */
if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
new_z2 -= B_FIFO_SIZE; /* buffer wrap */
fcnt_tx = le16_to_cpu(*z2t) - le16_to_cpu(*z1t);
if (fcnt_tx <= 0)
fcnt_tx += B_FIFO_SIZE;
/* fcnt_tx contains available bytes in tx-fifo */
fcnt_tx = B_FIFO_SIZE - fcnt_tx;
/* remaining bytes to send (bytes in tx-fifo) */
if (test_bit(FLG_RX_OFF, &bch->Flags)) {
bch->dropcnt += fcnt_rx;
*z2r = cpu_to_le16(new_z2);
return;
}
maxlen = bchannel_get_rxbuf(bch, fcnt_rx);
if (maxlen < 0) {
pr_warn("B%d: No bufferspace for %d bytes\n", bch->nr, fcnt_rx);
} else {
ptr = skb_put(bch->rx_skb, fcnt_rx);
if (le16_to_cpu(*z2r) + fcnt_rx <= B_FIFO_SIZE + B_SUB_VAL)
maxlen = fcnt_rx; /* complete transfer */
else
maxlen = B_FIFO_SIZE + B_SUB_VAL - le16_to_cpu(*z2r);
/* maximum */
ptr1 = bdata + (le16_to_cpu(*z2r) - B_SUB_VAL);
/* start of data */
memcpy(ptr, ptr1, maxlen); /* copy data */
fcnt_rx -= maxlen;
if (fcnt_rx) { /* rest remaining */
ptr += maxlen;
ptr1 = bdata; /* start of buffer */
memcpy(ptr, ptr1, fcnt_rx); /* rest */
}
recv_Bchannel(bch, fcnt_tx, false); /* bch, id, !force */
}
*z2r = cpu_to_le16(new_z2); /* new position */
}
/*
* B-channel main receive routine
*/
static void
main_rec_hfcpci(struct bchannel *bch)
{
struct hfc_pci *hc = bch->hw;
int rcnt, real_fifo;
int receive = 0, count = 5;
struct bzfifo *txbz, *rxbz;
u_char *bdata;
struct zt *zp;
if ((bch->nr & 2) && (!hc->hw.bswapped)) {
rxbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
txbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b2;
real_fifo = 1;
} else {
rxbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
txbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b1;
real_fifo = 0;
}
Begin:
count--;
if (rxbz->f1 != rxbz->f2) {
if (bch->debug & DEBUG_HW_BCHANNEL)
printk(KERN_DEBUG "hfcpci rec ch(%x) f1(%d) f2(%d)\n",
bch->nr, rxbz->f1, rxbz->f2);
zp = &rxbz->za[rxbz->f2];
rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2);
if (rcnt < 0)
rcnt += B_FIFO_SIZE;
rcnt++;
if (bch->debug & DEBUG_HW_BCHANNEL)
printk(KERN_DEBUG
"hfcpci rec ch(%x) z1(%x) z2(%x) cnt(%d)\n",
bch->nr, le16_to_cpu(zp->z1),
le16_to_cpu(zp->z2), rcnt);
hfcpci_empty_bfifo(bch, rxbz, bdata, rcnt);
rcnt = rxbz->f1 - rxbz->f2;
if (rcnt < 0)
rcnt += MAX_B_FRAMES + 1;
if (hc->hw.last_bfifo_cnt[real_fifo] > rcnt + 1) {
rcnt = 0;
hfcpci_clear_fifo_rx(hc, real_fifo);
}
hc->hw.last_bfifo_cnt[real_fifo] = rcnt;
if (rcnt > 1)
receive = 1;
else
receive = 0;
} else if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
hfcpci_empty_fifo_trans(bch, rxbz, txbz, bdata);
return;
} else
receive = 0;
if (count && receive)
goto Begin;
}
/*
* D-channel send routine
*/
static void
hfcpci_fill_dfifo(struct hfc_pci *hc)
{
struct dchannel *dch = &hc->dch;
int fcnt;
int count, new_z1, maxlen;
struct dfifo *df;
u_char *src, *dst, new_f1;
if ((dch->debug & DEBUG_HW_DCHANNEL) && !(dch->debug & DEBUG_HW_DFIFO))
printk(KERN_DEBUG "%s\n", __func__);
if (!dch->tx_skb)
return;
count = dch->tx_skb->len - dch->tx_idx;
if (count <= 0)
return;
df = &((union fifo_area *) (hc->hw.fifos))->d_chan.d_tx;
if (dch->debug & DEBUG_HW_DFIFO)
printk(KERN_DEBUG "%s:f1(%d) f2(%d) z1(f1)(%x)\n", __func__,
df->f1, df->f2,
le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1));
fcnt = df->f1 - df->f2; /* frame count actually buffered */
if (fcnt < 0)
fcnt += (MAX_D_FRAMES + 1); /* if wrap around */
if (fcnt > (MAX_D_FRAMES - 1)) {
if (dch->debug & DEBUG_HW_DCHANNEL)
printk(KERN_DEBUG
"hfcpci_fill_Dfifo more as 14 frames\n");
#ifdef ERROR_STATISTIC
cs->err_tx++;
#endif
return;
}
/* now determine free bytes in FIFO buffer */
maxlen = le16_to_cpu(df->za[df->f2 & D_FREG_MASK].z2) -
le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) - 1;
if (maxlen <= 0)
maxlen += D_FIFO_SIZE; /* count now contains available bytes */
if (dch->debug & DEBUG_HW_DCHANNEL)
printk(KERN_DEBUG "hfcpci_fill_Dfifo count(%d/%d)\n",
count, maxlen);
if (count > maxlen) {
if (dch->debug & DEBUG_HW_DCHANNEL)
printk(KERN_DEBUG "hfcpci_fill_Dfifo no fifo mem\n");
return;
}
new_z1 = (le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) + count) &
(D_FIFO_SIZE - 1);
new_f1 = ((df->f1 + 1) & D_FREG_MASK) | (D_FREG_MASK + 1);
src = dch->tx_skb->data + dch->tx_idx; /* source pointer */
dst = df->data + le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1);
maxlen = D_FIFO_SIZE - le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1);
/* end fifo */
if (maxlen > count)
maxlen = count; /* limit size */
memcpy(dst, src, maxlen); /* first copy */
count -= maxlen; /* remaining bytes */
if (count) {
dst = df->data; /* start of buffer */
src += maxlen; /* new position */
memcpy(dst, src, count);
}
df->za[new_f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1);
/* for next buffer */
df->za[df->f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1);
/* new pos actual buffer */
df->f1 = new_f1; /* next frame */
dch->tx_idx = dch->tx_skb->len;
}
/*
* B-channel send routine
*/
static void
hfcpci_fill_fifo(struct bchannel *bch)
{
struct hfc_pci *hc = bch->hw;
int maxlen, fcnt;
int count, new_z1;
struct bzfifo *bz;
u_char *bdata;
u_char new_f1, *src, *dst;
__le16 *z1t, *z2t;
if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
printk(KERN_DEBUG "%s\n", __func__);
if ((!bch->tx_skb) || bch->tx_skb->len == 0) {
if (!test_bit(FLG_FILLEMPTY, &bch->Flags) &&
!test_bit(FLG_TRANSPARENT, &bch->Flags))
return;
count = HFCPCI_FILLEMPTY;
} else {
count = bch->tx_skb->len - bch->tx_idx;
}
if ((bch->nr & 2) && (!hc->hw.bswapped)) {
bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b2;
} else {
bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b1;
}
if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
z1t = &bz->za[MAX_B_FRAMES].z1;
z2t = z1t + 1;
if (bch->debug & DEBUG_HW_BCHANNEL)
printk(KERN_DEBUG "hfcpci_fill_fifo_trans ch(%x) "
"cnt(%d) z1(%x) z2(%x)\n", bch->nr, count,
le16_to_cpu(*z1t), le16_to_cpu(*z2t));
fcnt = le16_to_cpu(*z2t) - le16_to_cpu(*z1t);
if (fcnt <= 0)
fcnt += B_FIFO_SIZE;
if (test_bit(FLG_FILLEMPTY, &bch->Flags)) {
/* fcnt contains available bytes in fifo */
if (count > fcnt)
count = fcnt;
new_z1 = le16_to_cpu(*z1t) + count;
/* new buffer Position */
if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
new_z1 -= B_FIFO_SIZE; /* buffer wrap */
dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL);
maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t);
/* end of fifo */
if (bch->debug & DEBUG_HW_BFIFO)
printk(KERN_DEBUG "hfcpci_FFt fillempty "
"fcnt(%d) maxl(%d) nz1(%x) dst(%p)\n",
fcnt, maxlen, new_z1, dst);
if (maxlen > count)
maxlen = count; /* limit size */
memset(dst, bch->fill[0], maxlen); /* first copy */
count -= maxlen; /* remaining bytes */
if (count) {
dst = bdata; /* start of buffer */
memset(dst, bch->fill[0], count);
}
*z1t = cpu_to_le16(new_z1); /* now send data */
return;
}
/* fcnt contains available bytes in fifo */
fcnt = B_FIFO_SIZE - fcnt;
/* remaining bytes to send (bytes in fifo) */
next_t_frame:
count = bch->tx_skb->len - bch->tx_idx;
/* maximum fill shall be poll*2 */
if (count > (poll << 1) - fcnt)
count = (poll << 1) - fcnt;
if (count <= 0)
return;
/* data is suitable for fifo */
new_z1 = le16_to_cpu(*z1t) + count;
/* new buffer Position */
if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
new_z1 -= B_FIFO_SIZE; /* buffer wrap */
src = bch->tx_skb->data + bch->tx_idx;
/* source pointer */
dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL);
maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t);
/* end of fifo */
if (bch->debug & DEBUG_HW_BFIFO)
printk(KERN_DEBUG "hfcpci_FFt fcnt(%d) "
"maxl(%d) nz1(%x) dst(%p)\n",
fcnt, maxlen, new_z1, dst);
fcnt += count;
bch->tx_idx += count;
if (maxlen > count)
maxlen = count; /* limit size */
memcpy(dst, src, maxlen); /* first copy */
count -= maxlen; /* remaining bytes */
if (count) {
dst = bdata; /* start of buffer */
src += maxlen; /* new position */
memcpy(dst, src, count);
}
*z1t = cpu_to_le16(new_z1); /* now send data */
if (bch->tx_idx < bch->tx_skb->len)
return;
dev_kfree_skb_any(bch->tx_skb);
if (get_next_bframe(bch))
goto next_t_frame;
return;
}
if (bch->debug & DEBUG_HW_BCHANNEL)
printk(KERN_DEBUG
"%s: ch(%x) f1(%d) f2(%d) z1(f1)(%x)\n",
__func__, bch->nr, bz->f1, bz->f2,
bz->za[bz->f1].z1);
fcnt = bz->f1 - bz->f2; /* frame count actually buffered */
if (fcnt < 0)
fcnt += (MAX_B_FRAMES + 1); /* if wrap around */
if (fcnt > (MAX_B_FRAMES - 1)) {
if (bch->debug & DEBUG_HW_BCHANNEL)
printk(KERN_DEBUG
"hfcpci_fill_Bfifo more as 14 frames\n");
return;
}
/* now determine free bytes in FIFO buffer */
maxlen = le16_to_cpu(bz->za[bz->f2].z2) -
le16_to_cpu(bz->za[bz->f1].z1) - 1;
if (maxlen <= 0)
maxlen += B_FIFO_SIZE; /* count now contains available bytes */
if (bch->debug & DEBUG_HW_BCHANNEL)
printk(KERN_DEBUG "hfcpci_fill_fifo ch(%x) count(%d/%d)\n",
bch->nr, count, maxlen);
if (maxlen < count) {
if (bch->debug & DEBUG_HW_BCHANNEL)
printk(KERN_DEBUG "hfcpci_fill_fifo no fifo mem\n");
return;
}
new_z1 = le16_to_cpu(bz->za[bz->f1].z1) + count;
/* new buffer Position */
if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
new_z1 -= B_FIFO_SIZE; /* buffer wrap */
new_f1 = ((bz->f1 + 1) & MAX_B_FRAMES);
src = bch->tx_skb->data + bch->tx_idx; /* source pointer */
dst = bdata + (le16_to_cpu(bz->za[bz->f1].z1) - B_SUB_VAL);
maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(bz->za[bz->f1].z1);
/* end fifo */
if (maxlen > count)
maxlen = count; /* limit size */
memcpy(dst, src, maxlen); /* first copy */
count -= maxlen; /* remaining bytes */
if (count) {
dst = bdata; /* start of buffer */
src += maxlen; /* new position */
memcpy(dst, src, count);
}
bz->za[new_f1].z1 = cpu_to_le16(new_z1); /* for next buffer */
bz->f1 = new_f1; /* next frame */
dev_kfree_skb_any(bch->tx_skb);
get_next_bframe(bch);
}
/*
* handle L1 state changes TE
*/
static void
ph_state_te(struct dchannel *dch)
{
if (dch->debug)
printk(KERN_DEBUG "%s: TE newstate %x\n",
__func__, dch->state);
switch (dch->state) {
case 0:
l1_event(dch->l1, HW_RESET_IND);
break;
case 3:
l1_event(dch->l1, HW_DEACT_IND);
break;
case 5:
case 8:
l1_event(dch->l1, ANYSIGNAL);
break;
case 6:
l1_event(dch->l1, INFO2);
break;
case 7:
l1_event(dch->l1, INFO4_P8);
break;
}
}
/*
* handle L1 state changes NT
*/
static void
handle_nt_timer3(struct dchannel *dch) {
struct hfc_pci *hc = dch->hw;
test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
hc->hw.nt_timer = 0;
test_and_set_bit(FLG_ACTIVE, &dch->Flags);
if (test_bit(HFC_CFG_MASTER, &hc->cfg))
hc->hw.mst_m |= HFCPCI_MASTER;
Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
_queue_data(&dch->dev.D, PH_ACTIVATE_IND,
MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
}
static void
ph_state_nt(struct dchannel *dch)
{
struct hfc_pci *hc = dch->hw;
if (dch->debug)
printk(KERN_DEBUG "%s: NT newstate %x\n",
__func__, dch->state);
switch (dch->state) {
case 2:
if (hc->hw.nt_timer < 0) {
hc->hw.nt_timer = 0;
test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
/* Clear already pending ints */
(void) Read_hfc(hc, HFCPCI_INT_S1);
Write_hfc(hc, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE);
udelay(10);
Write_hfc(hc, HFCPCI_STATES, 4);
dch->state = 4;
} else if (hc->hw.nt_timer == 0) {
hc->hw.int_m1 |= HFCPCI_INTS_TIMER;
Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
hc->hw.nt_timer = NT_T1_COUNT;
hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER;
hc->hw.ctmt |= HFCPCI_TIM3_125;
Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt |
HFCPCI_CLTIMER);
test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
test_and_set_bit(FLG_HFC_TIMER_T1, &dch->Flags);
/* allow G2 -> G3 transition */
Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3);
} else {
Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3);
}
break;
case 1:
hc->hw.nt_timer = 0;
test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
hc->hw.mst_m &= ~HFCPCI_MASTER;
Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
_queue_data(&dch->dev.D, PH_DEACTIVATE_IND,
MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
break;
case 4:
hc->hw.nt_timer = 0;
test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
break;
case 3:
if (!test_and_set_bit(FLG_HFC_TIMER_T3, &dch->Flags)) {
if (!test_and_clear_bit(FLG_L2_ACTIVATED,
&dch->Flags)) {
handle_nt_timer3(dch);
break;
}
test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
hc->hw.int_m1 |= HFCPCI_INTS_TIMER;
Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
hc->hw.nt_timer = NT_T3_COUNT;
hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER;
hc->hw.ctmt |= HFCPCI_TIM3_125;
Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt |
HFCPCI_CLTIMER);
}
break;
}
}
static void
ph_state(struct dchannel *dch)
{
struct hfc_pci *hc = dch->hw;
if (hc->hw.protocol == ISDN_P_NT_S0) {
if (test_bit(FLG_HFC_TIMER_T3, &dch->Flags) &&
hc->hw.nt_timer < 0)
handle_nt_timer3(dch);
else
ph_state_nt(dch);
} else
ph_state_te(dch);
}
/*
* Layer 1 callback function
*/
static int
hfc_l1callback(struct dchannel *dch, u_int cmd)
{
struct hfc_pci *hc = dch->hw;
switch (cmd) {
case INFO3_P8:
case INFO3_P10:
if (test_bit(HFC_CFG_MASTER, &hc->cfg))
hc->hw.mst_m |= HFCPCI_MASTER;
Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
break;
case HW_RESET_REQ:
Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | 3);
/* HFC ST 3 */
udelay(6);
Write_hfc(hc, HFCPCI_STATES, 3); /* HFC ST 2 */
if (test_bit(HFC_CFG_MASTER, &hc->cfg))
hc->hw.mst_m |= HFCPCI_MASTER;
Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE |
HFCPCI_DO_ACTION);
l1_event(dch->l1, HW_POWERUP_IND);
break;
case HW_DEACT_REQ:
hc->hw.mst_m &= ~HFCPCI_MASTER;
Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
skb_queue_purge(&dch->squeue);
if (dch->tx_skb) {
dev_kfree_skb(dch->tx_skb);
dch->tx_skb = NULL;
}
dch->tx_idx = 0;
if (dch->rx_skb) {
dev_kfree_skb(dch->rx_skb);
dch->rx_skb = NULL;
}
test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
del_timer(&dch->timer);
break;
case HW_POWERUP_REQ:
Write_hfc(hc, HFCPCI_STATES, HFCPCI_DO_ACTION);
break;
case PH_ACTIVATE_IND:
test_and_set_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
GFP_ATOMIC);
break;
case PH_DEACTIVATE_IND:
test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
GFP_ATOMIC);
break;
default:
if (dch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: unknown command %x\n",
__func__, cmd);
return -1;
}
return 0;
}
/*
* Interrupt handler
*/
static inline void
tx_birq(struct bchannel *bch)
{
if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len)
hfcpci_fill_fifo(bch);
else {
dev_kfree_skb_any(bch->tx_skb);
if (get_next_bframe(bch))
hfcpci_fill_fifo(bch);
}
}
static inline void
tx_dirq(struct dchannel *dch)
{
if (dch->tx_skb && dch->tx_idx < dch->tx_skb->len)
hfcpci_fill_dfifo(dch->hw);
else {
dev_kfree_skb(dch->tx_skb);
if (get_next_dframe(dch))
hfcpci_fill_dfifo(dch->hw);
}
}
static irqreturn_t
hfcpci_int(int intno, void *dev_id)
{
struct hfc_pci *hc = dev_id;
u_char exval;
struct bchannel *bch;
u_char val, stat;
spin_lock(&hc->lock);
if (!(hc->hw.int_m2 & 0x08)) {
spin_unlock(&hc->lock);
return IRQ_NONE; /* not initialised */
}
stat = Read_hfc(hc, HFCPCI_STATUS);
if (HFCPCI_ANYINT & stat) {
val = Read_hfc(hc, HFCPCI_INT_S1);
if (hc->dch.debug & DEBUG_HW_DCHANNEL)
printk(KERN_DEBUG
"HFC-PCI: stat(%02x) s1(%02x)\n", stat, val);
} else {
/* shared */
spin_unlock(&hc->lock);
return IRQ_NONE;
}
hc->irqcnt++;
if (hc->dch.debug & DEBUG_HW_DCHANNEL)
printk(KERN_DEBUG "HFC-PCI irq %x\n", val);
val &= hc->hw.int_m1;
if (val & 0x40) { /* state machine irq */
exval = Read_hfc(hc, HFCPCI_STATES) & 0xf;
if (hc->dch.debug & DEBUG_HW_DCHANNEL)
printk(KERN_DEBUG "ph_state chg %d->%d\n",
hc->dch.state, exval);
hc->dch.state = exval;
schedule_event(&hc->dch, FLG_PHCHANGE);
val &= ~0x40;
}
if (val & 0x80) { /* timer irq */
if (hc->hw.protocol == ISDN_P_NT_S0) {
if ((--hc->hw.nt_timer) < 0)
schedule_event(&hc->dch, FLG_PHCHANGE);
}
val &= ~0x80;
Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt | HFCPCI_CLTIMER);
}
if (val & 0x08) { /* B1 rx */
bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
if (bch)
main_rec_hfcpci(bch);
else if (hc->dch.debug)
printk(KERN_DEBUG "hfcpci spurious 0x08 IRQ\n");
}
if (val & 0x10) { /* B2 rx */
bch = Sel_BCS(hc, 2);
if (bch)
main_rec_hfcpci(bch);
else if (hc->dch.debug)
printk(KERN_DEBUG "hfcpci spurious 0x10 IRQ\n");
}
if (val & 0x01) { /* B1 tx */
bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
if (bch)
tx_birq(bch);
else if (hc->dch.debug)
printk(KERN_DEBUG "hfcpci spurious 0x01 IRQ\n");
}
if (val & 0x02) { /* B2 tx */
bch = Sel_BCS(hc, 2);
if (bch)
tx_birq(bch);
else if (hc->dch.debug)
printk(KERN_DEBUG "hfcpci spurious 0x02 IRQ\n");
}
if (val & 0x20) /* D rx */
receive_dmsg(hc);
if (val & 0x04) { /* D tx */
if (test_and_clear_bit(FLG_BUSY_TIMER, &hc->dch.Flags))
del_timer(&hc->dch.timer);
tx_dirq(&hc->dch);
}
spin_unlock(&hc->lock);
return IRQ_HANDLED;
}
/*
* timer callback for D-chan busy resolution. Currently no function
*/
static void
hfcpci_dbusy_timer(struct timer_list *t)
{
}
/*
* activate/deactivate hardware for selected channels and mode
*/
static int
mode_hfcpci(struct bchannel *bch, int bc, int protocol)
{
struct hfc_pci *hc = bch->hw;
int fifo2;
u_char rx_slot = 0, tx_slot = 0, pcm_mode;
if (bch->debug & DEBUG_HW_BCHANNEL)
printk(KERN_DEBUG
"HFCPCI bchannel protocol %x-->%x ch %x-->%x\n",
bch->state, protocol, bch->nr, bc);
fifo2 = bc;
pcm_mode = (bc >> 24) & 0xff;
if (pcm_mode) { /* PCM SLOT USE */
if (!test_bit(HFC_CFG_PCM, &hc->cfg))
printk(KERN_WARNING
"%s: pcm channel id without HFC_CFG_PCM\n",
__func__);
rx_slot = (bc >> 8) & 0xff;
tx_slot = (bc >> 16) & 0xff;
bc = bc & 0xff;
} else if (test_bit(HFC_CFG_PCM, &hc->cfg) && (protocol > ISDN_P_NONE))
printk(KERN_WARNING "%s: no pcm channel id but HFC_CFG_PCM\n",
__func__);
if (hc->chanlimit > 1) {
hc->hw.bswapped = 0; /* B1 and B2 normal mode */
hc->hw.sctrl_e &= ~0x80;
} else {
if (bc & 2) {
if (protocol != ISDN_P_NONE) {
hc->hw.bswapped = 1; /* B1 and B2 exchanged */
hc->hw.sctrl_e |= 0x80;
} else {
hc->hw.bswapped = 0; /* B1 and B2 normal mode */
hc->hw.sctrl_e &= ~0x80;
}
fifo2 = 1;
} else {
hc->hw.bswapped = 0; /* B1 and B2 normal mode */
hc->hw.sctrl_e &= ~0x80;
}
}
switch (protocol) {
case (-1): /* used for init */
bch->state = -1;
bch->nr = bc;
fallthrough;
case (ISDN_P_NONE):
if (bch->state == ISDN_P_NONE)
return 0;
if (bc & 2) {
hc->hw.sctrl &= ~SCTRL_B2_ENA;
hc->hw.sctrl_r &= ~SCTRL_B2_ENA;
} else {
hc->hw.sctrl &= ~SCTRL_B1_ENA;
hc->hw.sctrl_r &= ~SCTRL_B1_ENA;
}
if (fifo2 & 2) {
hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B2;
hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS |
HFCPCI_INTS_B2REC);
} else {
hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B1;
hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS |
HFCPCI_INTS_B1REC);
}
#ifdef REVERSE_BITORDER
if (bch->nr & 2)
hc->hw.cirm &= 0x7f;
else
hc->hw.cirm &= 0xbf;
#endif
bch->state = ISDN_P_NONE;
bch->nr = bc;
test_and_clear_bit(FLG_HDLC, &bch->Flags);
test_and_clear_bit(FLG_TRANSPARENT, &bch->Flags);
break;
case (ISDN_P_B_RAW):
bch->state = protocol;
bch->nr = bc;
hfcpci_clear_fifo_rx(hc, (fifo2 & 2) ? 1 : 0);
hfcpci_clear_fifo_tx(hc, (fifo2 & 2) ? 1 : 0);
if (bc & 2) {
hc->hw.sctrl |= SCTRL_B2_ENA;
hc->hw.sctrl_r |= SCTRL_B2_ENA;
#ifdef REVERSE_BITORDER
hc->hw.cirm |= 0x80;
#endif
} else {
hc->hw.sctrl |= SCTRL_B1_ENA;
hc->hw.sctrl_r |= SCTRL_B1_ENA;
#ifdef REVERSE_BITORDER
hc->hw.cirm |= 0x40;
#endif
}
if (fifo2 & 2) {
hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
if (!tics)
hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS |
HFCPCI_INTS_B2REC);
hc->hw.ctmt |= 2;
hc->hw.conn &= ~0x18;
} else {
hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
if (!tics)
hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS |
HFCPCI_INTS_B1REC);
hc->hw.ctmt |= 1;
hc->hw.conn &= ~0x03;
}
test_and_set_bit(FLG_TRANSPARENT, &bch->Flags);
break;
case (ISDN_P_B_HDLC):
bch->state = protocol;
bch->nr = bc;
hfcpci_clear_fifo_rx(hc, (fifo2 & 2) ? 1 : 0);
hfcpci_clear_fifo_tx(hc, (fifo2 & 2) ? 1 : 0);
if (bc & 2) {
hc->hw.sctrl |= SCTRL_B2_ENA;
hc->hw.sctrl_r |= SCTRL_B2_ENA;
} else {
hc->hw.sctrl |= SCTRL_B1_ENA;
hc->hw.sctrl_r |= SCTRL_B1_ENA;
}
if (fifo2 & 2) {
hc->hw.last_bfifo_cnt[1] = 0;
hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS |
HFCPCI_INTS_B2REC);
hc->hw.ctmt &= ~2;
hc->hw.conn &= ~0x18;
} else {
hc->hw.last_bfifo_cnt[0] = 0;
hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS |
HFCPCI_INTS_B1REC);
hc->hw.ctmt &= ~1;
hc->hw.conn &= ~0x03;
}
test_and_set_bit(FLG_HDLC, &bch->Flags);
break;
default:
printk(KERN_DEBUG "prot not known %x\n", protocol);
return -ENOPROTOOPT;
}
if (test_bit(HFC_CFG_PCM, &hc->cfg)) {
if ((protocol == ISDN_P_NONE) ||
(protocol == -1)) { /* init case */
rx_slot = 0;
tx_slot = 0;
} else {
if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) {
rx_slot |= 0xC0;
tx_slot |= 0xC0;
} else {
rx_slot |= 0x80;
tx_slot |= 0x80;
}
}
if (bc & 2) {
hc->hw.conn &= 0xc7;
hc->hw.conn |= 0x08;
printk(KERN_DEBUG "%s: Write_hfc: B2_SSL 0x%x\n",
__func__, tx_slot);
printk(KERN_DEBUG "%s: Write_hfc: B2_RSL 0x%x\n",
__func__, rx_slot);
Write_hfc(hc, HFCPCI_B2_SSL, tx_slot);
Write_hfc(hc, HFCPCI_B2_RSL, rx_slot);
} else {
hc->hw.conn &= 0xf8;
hc->hw.conn |= 0x01;
printk(KERN_DEBUG "%s: Write_hfc: B1_SSL 0x%x\n",
__func__, tx_slot);
printk(KERN_DEBUG "%s: Write_hfc: B1_RSL 0x%x\n",
__func__, rx_slot);
Write_hfc(hc, HFCPCI_B1_SSL, tx_slot);
Write_hfc(hc, HFCPCI_B1_RSL, rx_slot);
}
}
Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e);
Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl);
Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
#ifdef REVERSE_BITORDER
Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
#endif
return 0;
}
static int
set_hfcpci_rxtest(struct bchannel *bch, int protocol, int chan)
{
struct hfc_pci *hc = bch->hw;
if (bch->debug & DEBUG_HW_BCHANNEL)
printk(KERN_DEBUG
"HFCPCI bchannel test rx protocol %x-->%x ch %x-->%x\n",
bch->state, protocol, bch->nr, chan);
if (bch->nr != chan) {
printk(KERN_DEBUG
"HFCPCI rxtest wrong channel parameter %x/%x\n",
bch->nr, chan);
return -EINVAL;
}
switch (protocol) {
case (ISDN_P_B_RAW):
bch->state = protocol;
hfcpci_clear_fifo_rx(hc, (chan & 2) ? 1 : 0);
if (chan & 2) {
hc->hw.sctrl_r |= SCTRL_B2_ENA;
hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
if (!tics)
hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
hc->hw.ctmt |= 2;
hc->hw.conn &= ~0x18;
#ifdef REVERSE_BITORDER
hc->hw.cirm |= 0x80;
#endif
} else {
hc->hw.sctrl_r |= SCTRL_B1_ENA;
hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
if (!tics)
hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
hc->hw.ctmt |= 1;
hc->hw.conn &= ~0x03;
#ifdef REVERSE_BITORDER
hc->hw.cirm |= 0x40;
#endif
}
break;
case (ISDN_P_B_HDLC):
bch->state = protocol;
hfcpci_clear_fifo_rx(hc, (chan & 2) ? 1 : 0);
if (chan & 2) {
hc->hw.sctrl_r |= SCTRL_B2_ENA;
hc->hw.last_bfifo_cnt[1] = 0;
hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
hc->hw.ctmt &= ~2;
hc->hw.conn &= ~0x18;
} else {
hc->hw.sctrl_r |= SCTRL_B1_ENA;
hc->hw.last_bfifo_cnt[0] = 0;
hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
hc->hw.ctmt &= ~1;
hc->hw.conn &= ~0x03;
}
break;
default:
printk(KERN_DEBUG "prot not known %x\n", protocol);
return -ENOPROTOOPT;
}
Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
#ifdef REVERSE_BITORDER
Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
#endif
return 0;
}
static void
deactivate_bchannel(struct bchannel *bch)
{
struct hfc_pci *hc = bch->hw;
u_long flags;
spin_lock_irqsave(&hc->lock, flags);
mISDN_clear_bchannel(bch);
mode_hfcpci(bch, bch->nr, ISDN_P_NONE);
spin_unlock_irqrestore(&hc->lock, flags);
}
/*
* Layer 1 B-channel hardware access
*/
static int
channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
{
return mISDN_ctrl_bchannel(bch, cq);
}
static int
hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
{
struct bchannel *bch = container_of(ch, struct bchannel, ch);
struct hfc_pci *hc = bch->hw;
int ret = -EINVAL;
u_long flags;
if (bch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg);
switch (cmd) {
case HW_TESTRX_RAW:
spin_lock_irqsave(&hc->lock, flags);
ret = set_hfcpci_rxtest(bch, ISDN_P_B_RAW, (int)(long)arg);
spin_unlock_irqrestore(&hc->lock, flags);
break;
case HW_TESTRX_HDLC:
spin_lock_irqsave(&hc->lock, flags);
ret = set_hfcpci_rxtest(bch, ISDN_P_B_HDLC, (int)(long)arg);
spin_unlock_irqrestore(&hc->lock, flags);
break;
case HW_TESTRX_OFF:
spin_lock_irqsave(&hc->lock, flags);
mode_hfcpci(bch, bch->nr, ISDN_P_NONE);
spin_unlock_irqrestore(&hc->lock, flags);
ret = 0;
break;
case CLOSE_CHANNEL:
test_and_clear_bit(FLG_OPEN, &bch->Flags);
deactivate_bchannel(bch);
ch->protocol = ISDN_P_NONE;
ch->peer = NULL;
module_put(THIS_MODULE);
ret = 0;
break;
case CONTROL_CHANNEL:
ret = channel_bctrl(bch, arg);
break;
default:
printk(KERN_WARNING "%s: unknown prim(%x)\n",
__func__, cmd);
}
return ret;
}
/*
* Layer2 -> Layer 1 Dchannel data
*/
static int
hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
struct dchannel *dch = container_of(dev, struct dchannel, dev);
struct hfc_pci *hc = dch->hw;
int ret = -EINVAL;
struct mISDNhead *hh = mISDN_HEAD_P(skb);
unsigned int id;
u_long flags;
switch (hh->prim) {
case PH_DATA_REQ:
spin_lock_irqsave(&hc->lock, flags);
ret = dchannel_senddata(dch, skb);
if (ret > 0) { /* direct TX */
id = hh->id; /* skb can be freed */
hfcpci_fill_dfifo(dch->hw);
ret = 0;
spin_unlock_irqrestore(&hc->lock, flags);
queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
} else
spin_unlock_irqrestore(&hc->lock, flags);
return ret;
case PH_ACTIVATE_REQ:
spin_lock_irqsave(&hc->lock, flags);
if (hc->hw.protocol == ISDN_P_NT_S0) {
ret = 0;
if (test_bit(HFC_CFG_MASTER, &hc->cfg))
hc->hw.mst_m |= HFCPCI_MASTER;
Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
if (test_bit(FLG_ACTIVE, &dch->Flags)) {
spin_unlock_irqrestore(&hc->lock, flags);
_queue_data(&dch->dev.D, PH_ACTIVATE_IND,
MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
break;
}
test_and_set_bit(FLG_L2_ACTIVATED, &dch->Flags);
Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE |
HFCPCI_DO_ACTION | 1);
} else
ret = l1_event(dch->l1, hh->prim);
spin_unlock_irqrestore(&hc->lock, flags);
break;
case PH_DEACTIVATE_REQ:
test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
spin_lock_irqsave(&hc->lock, flags);
if (hc->hw.protocol == ISDN_P_NT_S0) {
struct sk_buff_head free_queue;
__skb_queue_head_init(&free_queue);
/* prepare deactivation */
Write_hfc(hc, HFCPCI_STATES, 0x40);
skb_queue_splice_init(&dch->squeue, &free_queue);
if (dch->tx_skb) {
__skb_queue_tail(&free_queue, dch->tx_skb);
dch->tx_skb = NULL;
}
dch->tx_idx = 0;
if (dch->rx_skb) {
__skb_queue_tail(&free_queue, dch->rx_skb);
dch->rx_skb = NULL;
}
test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
del_timer(&dch->timer);
#ifdef FIXME
if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags))
dchannel_sched_event(&hc->dch, D_CLEARBUSY);
#endif
hc->hw.mst_m &= ~HFCPCI_MASTER;
Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
ret = 0;
spin_unlock_irqrestore(&hc->lock, flags);
__skb_queue_purge(&free_queue);
} else {
ret = l1_event(dch->l1, hh->prim);
spin_unlock_irqrestore(&hc->lock, flags);
}
break;
}
if (!ret)
dev_kfree_skb(skb);
return ret;
}
/*
* Layer2 -> Layer 1 Bchannel data
*/
static int
hfcpci_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct bchannel *bch = container_of(ch, struct bchannel, ch);
struct hfc_pci *hc = bch->hw;
int ret = -EINVAL;
struct mISDNhead *hh = mISDN_HEAD_P(skb);
unsigned long flags;
switch (hh->prim) {
case PH_DATA_REQ:
spin_lock_irqsave(&hc->lock, flags);
ret = bchannel_senddata(bch, skb);
if (ret > 0) { /* direct TX */
hfcpci_fill_fifo(bch);
ret = 0;
}
spin_unlock_irqrestore(&hc->lock, flags);
return ret;
case PH_ACTIVATE_REQ:
spin_lock_irqsave(&hc->lock, flags);
if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
ret = mode_hfcpci(bch, bch->nr, ch->protocol);
else
ret = 0;
spin_unlock_irqrestore(&hc->lock, flags);
if (!ret)
_queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
NULL, GFP_KERNEL);
break;
case PH_DEACTIVATE_REQ:
deactivate_bchannel(bch);
_queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
NULL, GFP_KERNEL);
ret = 0;
break;
}
if (!ret)
dev_kfree_skb(skb);
return ret;
}
/*
* called for card init message
*/
static void
inithfcpci(struct hfc_pci *hc)
{
printk(KERN_DEBUG "inithfcpci: entered\n");
timer_setup(&hc->dch.timer, hfcpci_dbusy_timer, 0);
hc->chanlimit = 2;
mode_hfcpci(&hc->bch[0], 1, -1);
mode_hfcpci(&hc->bch[1], 2, -1);
}
static int
init_card(struct hfc_pci *hc)
{
int cnt = 3;
u_long flags;
printk(KERN_DEBUG "init_card: entered\n");
spin_lock_irqsave(&hc->lock, flags);
disable_hwirq(hc);
spin_unlock_irqrestore(&hc->lock, flags);
if (request_irq(hc->irq, hfcpci_int, IRQF_SHARED, "HFC PCI", hc)) {
printk(KERN_WARNING
"mISDN: couldn't get interrupt %d\n", hc->irq);
return -EIO;
}
spin_lock_irqsave(&hc->lock, flags);
reset_hfcpci(hc);
while (cnt) {
inithfcpci(hc);
/*
* Finally enable IRQ output
* this is only allowed, if an IRQ routine is already
* established for this HFC, so don't do that earlier
*/
enable_hwirq(hc);
spin_unlock_irqrestore(&hc->lock, flags);
/* Timeout 80ms */
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout((80 * HZ) / 1000);
printk(KERN_INFO "HFC PCI: IRQ %d count %d\n",
hc->irq, hc->irqcnt);
/* now switch timer interrupt off */
spin_lock_irqsave(&hc->lock, flags);
hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
/* reinit mode reg */
Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
if (!hc->irqcnt) {
printk(KERN_WARNING
"HFC PCI: IRQ(%d) getting no interrupts "
"during init %d\n", hc->irq, 4 - cnt);
if (cnt == 1)
break;
else {
reset_hfcpci(hc);
cnt--;
}
} else {
spin_unlock_irqrestore(&hc->lock, flags);
hc->initdone = 1;
return 0;
}
}
disable_hwirq(hc);
spin_unlock_irqrestore(&hc->lock, flags);
free_irq(hc->irq, hc);
return -EIO;
}
static int
channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq)
{
int ret = 0;
u_char slot;
switch (cq->op) {
case MISDN_CTRL_GETOP:
cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT |
MISDN_CTRL_DISCONNECT | MISDN_CTRL_L1_TIMER3;
break;
case MISDN_CTRL_LOOP:
/* channel 0 disabled loop */
if (cq->channel < 0 || cq->channel > 2) {
ret = -EINVAL;
break;
}
if (cq->channel & 1) {
if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
slot = 0xC0;
else
slot = 0x80;
printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n",
__func__, slot);
Write_hfc(hc, HFCPCI_B1_SSL, slot);
Write_hfc(hc, HFCPCI_B1_RSL, slot);
hc->hw.conn = (hc->hw.conn & ~7) | 6;
Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
}
if (cq->channel & 2) {
if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
slot = 0xC1;
else
slot = 0x81;
printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n",
__func__, slot);
Write_hfc(hc, HFCPCI_B2_SSL, slot);
Write_hfc(hc, HFCPCI_B2_RSL, slot);
hc->hw.conn = (hc->hw.conn & ~0x38) | 0x30;
Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
}
if (cq->channel & 3)
hc->hw.trm |= 0x80; /* enable IOM-loop */
else {
hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09;
Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
hc->hw.trm &= 0x7f; /* disable IOM-loop */
}
Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
break;
case MISDN_CTRL_CONNECT:
if (cq->channel == cq->p1) {
ret = -EINVAL;
break;
}
if (cq->channel < 1 || cq->channel > 2 ||
cq->p1 < 1 || cq->p1 > 2) {
ret = -EINVAL;
break;
}
if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
slot = 0xC0;
else
slot = 0x80;
printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n",
__func__, slot);
Write_hfc(hc, HFCPCI_B1_SSL, slot);
Write_hfc(hc, HFCPCI_B2_RSL, slot);
if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
slot = 0xC1;
else
slot = 0x81;
printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n",
__func__, slot);
Write_hfc(hc, HFCPCI_B2_SSL, slot);
Write_hfc(hc, HFCPCI_B1_RSL, slot);
hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x36;
Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
hc->hw.trm |= 0x80;
Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
break;
case MISDN_CTRL_DISCONNECT:
hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09;
Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
hc->hw.trm &= 0x7f; /* disable IOM-loop */
break;
case MISDN_CTRL_L1_TIMER3:
ret = l1_event(hc->dch.l1, HW_TIMER3_VALUE | (cq->p1 & 0xff));
break;
default:
printk(KERN_WARNING "%s: unknown Op %x\n",
__func__, cq->op);
ret = -EINVAL;
break;
}
return ret;
}
static int
open_dchannel(struct hfc_pci *hc, struct mISDNchannel *ch,
struct channel_req *rq)
{
int err = 0;
if (debug & DEBUG_HW_OPEN)
printk(KERN_DEBUG "%s: dev(%d) open from %p\n", __func__,
hc->dch.dev.id, __builtin_return_address(0));
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
if (rq->adr.channel == 1) {
/* TODO: E-Channel */
return -EINVAL;
}
if (!hc->initdone) {
if (rq->protocol == ISDN_P_TE_S0) {
err = create_l1(&hc->dch, hfc_l1callback);
if (err)
return err;
}
hc->hw.protocol = rq->protocol;
ch->protocol = rq->protocol;
err = init_card(hc);
if (err)
return err;
} else {
if (rq->protocol != ch->protocol) {
if (hc->hw.protocol == ISDN_P_TE_S0)
l1_event(hc->dch.l1, CLOSE_CHANNEL);
if (rq->protocol == ISDN_P_TE_S0) {
err = create_l1(&hc->dch, hfc_l1callback);
if (err)
return err;
}
hc->hw.protocol = rq->protocol;
ch->protocol = rq->protocol;
hfcpci_setmode(hc);
}
}
if (((ch->protocol == ISDN_P_NT_S0) && (hc->dch.state == 3)) ||
((ch->protocol == ISDN_P_TE_S0) && (hc->dch.state == 7))) {
_queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
0, NULL, GFP_KERNEL);
}
rq->ch = ch;
if (!try_module_get(THIS_MODULE))
printk(KERN_WARNING "%s:cannot get module\n", __func__);
return 0;
}
static int
open_bchannel(struct hfc_pci *hc, struct channel_req *rq)
{
struct bchannel *bch;
if (rq->adr.channel == 0 || rq->adr.channel > 2)
return -EINVAL;
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
bch = &hc->bch[rq->adr.channel - 1];
if (test_and_set_bit(FLG_OPEN, &bch->Flags))
return -EBUSY; /* b-channel can be only open once */
bch->ch.protocol = rq->protocol;
rq->ch = &bch->ch; /* TODO: E-channel */
if (!try_module_get(THIS_MODULE))
printk(KERN_WARNING "%s:cannot get module\n", __func__);
return 0;
}
/*
* device control function
*/
static int
hfc_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
{
struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
struct dchannel *dch = container_of(dev, struct dchannel, dev);
struct hfc_pci *hc = dch->hw;
struct channel_req *rq;
int err = 0;
if (dch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: cmd:%x %p\n",
__func__, cmd, arg);
switch (cmd) {
case OPEN_CHANNEL:
rq = arg;
if ((rq->protocol == ISDN_P_TE_S0) ||
(rq->protocol == ISDN_P_NT_S0))
err = open_dchannel(hc, ch, rq);
else
err = open_bchannel(hc, rq);
break;
case CLOSE_CHANNEL:
if (debug & DEBUG_HW_OPEN)
printk(KERN_DEBUG "%s: dev(%d) close from %p\n",
__func__, hc->dch.dev.id,
__builtin_return_address(0));
module_put(THIS_MODULE);
break;
case CONTROL_CHANNEL:
err = channel_ctrl(hc, arg);
break;
default:
if (dch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: unknown command %x\n",
__func__, cmd);
return -EINVAL;
}
return err;
}
static int
setup_hw(struct hfc_pci *hc)
{
void *buffer;
printk(KERN_INFO "mISDN: HFC-PCI driver %s\n", hfcpci_revision);
hc->hw.cirm = 0;
hc->dch.state = 0;
pci_set_master(hc->pdev);
if (!hc->irq) {
printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
return -EINVAL;
}
hc->hw.pci_io =
(char __iomem *)(unsigned long)hc->pdev->resource[1].start;
if (!hc->hw.pci_io) {
printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
return -ENOMEM;
}
/* Allocate memory for FIFOS */
/* the memory needs to be on a 32k boundary within the first 4G */
if (dma_set_mask(&hc->pdev->dev, 0xFFFF8000)) {
printk(KERN_WARNING
"HFC-PCI: No usable DMA configuration!\n");
return -EIO;
}
buffer = dma_alloc_coherent(&hc->pdev->dev, 0x8000, &hc->hw.dmahandle,
GFP_KERNEL);
/* We silently assume the address is okay if nonzero */
if (!buffer) {
printk(KERN_WARNING
"HFC-PCI: Error allocating memory for FIFO!\n");
return -ENOMEM;
}
hc->hw.fifos = buffer;
pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle);
hc->hw.pci_io = ioremap((ulong) hc->hw.pci_io, 256);
if (unlikely(!hc->hw.pci_io)) {
printk(KERN_WARNING
"HFC-PCI: Error in ioremap for PCI!\n");
dma_free_coherent(&hc->pdev->dev, 0x8000, hc->hw.fifos,
hc->hw.dmahandle);
return -ENOMEM;
}
printk(KERN_INFO
"HFC-PCI: defined at mem %#lx fifo %p(%pad) IRQ %d HZ %d\n",
(u_long) hc->hw.pci_io, hc->hw.fifos,
&hc->hw.dmahandle, hc->irq, HZ);
/* enable memory mapped ports, disable busmaster */
pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
hc->hw.int_m2 = 0;
disable_hwirq(hc);
hc->hw.int_m1 = 0;
Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
/* At this point the needed PCI config is done */
/* fifos are still not enabled */
timer_setup(&hc->hw.timer, hfcpci_Timer, 0);
/* default PCM master */
test_and_set_bit(HFC_CFG_MASTER, &hc->cfg);
return 0;
}
static void
release_card(struct hfc_pci *hc) {
u_long flags;
spin_lock_irqsave(&hc->lock, flags);
hc->hw.int_m2 = 0; /* interrupt output off ! */
disable_hwirq(hc);
mode_hfcpci(&hc->bch[0], 1, ISDN_P_NONE);
mode_hfcpci(&hc->bch[1], 2, ISDN_P_NONE);
if (hc->dch.timer.function != NULL) {
del_timer(&hc->dch.timer);
hc->dch.timer.function = NULL;
}
spin_unlock_irqrestore(&hc->lock, flags);
if (hc->hw.protocol == ISDN_P_TE_S0)
l1_event(hc->dch.l1, CLOSE_CHANNEL);
if (hc->initdone)
free_irq(hc->irq, hc);
release_io_hfcpci(hc); /* must release after free_irq! */
mISDN_unregister_device(&hc->dch.dev);
mISDN_freebchannel(&hc->bch[1]);
mISDN_freebchannel(&hc->bch[0]);
mISDN_freedchannel(&hc->dch);
pci_set_drvdata(hc->pdev, NULL);
kfree(hc);
}
static int
setup_card(struct hfc_pci *card)
{
int err = -EINVAL;
u_int i;
char name[MISDN_MAX_IDLEN];
card->dch.debug = debug;
spin_lock_init(&card->lock);
mISDN_initdchannel(&card->dch, MAX_DFRAME_LEN_L1, ph_state);
card->dch.hw = card;
card->dch.dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0);
card->dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
(1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
card->dch.dev.D.send = hfcpci_l2l1D;
card->dch.dev.D.ctrl = hfc_dctrl;
card->dch.dev.nrbchan = 2;
for (i = 0; i < 2; i++) {
card->bch[i].nr = i + 1;
set_channelmap(i + 1, card->dch.dev.channelmap);
card->bch[i].debug = debug;
mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM, poll >> 1);
card->bch[i].hw = card;
card->bch[i].ch.send = hfcpci_l2l1B;
card->bch[i].ch.ctrl = hfc_bctrl;
card->bch[i].ch.nr = i + 1;
list_add(&card->bch[i].ch.list, &card->dch.dev.bchannels);
}
err = setup_hw(card);
if (err)
goto error;
snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-pci.%d", HFC_cnt + 1);
err = mISDN_register_device(&card->dch.dev, &card->pdev->dev, name);
if (err)
goto error;
HFC_cnt++;
printk(KERN_INFO "HFC %d cards installed\n", HFC_cnt);
return 0;
error:
mISDN_freebchannel(&card->bch[1]);
mISDN_freebchannel(&card->bch[0]);
mISDN_freedchannel(&card->dch);
kfree(card);
return err;
}
/* private data in the PCI devices list */
struct _hfc_map {
u_int subtype;
u_int flag;
char *name;
};
static const struct _hfc_map hfc_map[] =
{
{HFC_CCD_2BD0, 0, "CCD/Billion/Asuscom 2BD0"},
{HFC_CCD_B000, 0, "Billion B000"},
{HFC_CCD_B006, 0, "Billion B006"},
{HFC_CCD_B007, 0, "Billion B007"},
{HFC_CCD_B008, 0, "Billion B008"},
{HFC_CCD_B009, 0, "Billion B009"},
{HFC_CCD_B00A, 0, "Billion B00A"},
{HFC_CCD_B00B, 0, "Billion B00B"},
{HFC_CCD_B00C, 0, "Billion B00C"},
{HFC_CCD_B100, 0, "Seyeon B100"},
{HFC_CCD_B700, 0, "Primux II S0 B700"},
{HFC_CCD_B701, 0, "Primux II S0 NT B701"},
{HFC_ABOCOM_2BD1, 0, "Abocom/Magitek 2BD1"},
{HFC_ASUS_0675, 0, "Asuscom/Askey 675"},
{HFC_BERKOM_TCONCEPT, 0, "German telekom T-Concept"},
{HFC_BERKOM_A1T, 0, "German telekom A1T"},
{HFC_ANIGMA_MC145575, 0, "Motorola MC145575"},
{HFC_ZOLTRIX_2BD0, 0, "Zoltrix 2BD0"},
{HFC_DIGI_DF_M_IOM2_E, 0,
"Digi International DataFire Micro V IOM2 (Europe)"},
{HFC_DIGI_DF_M_E, 0,
"Digi International DataFire Micro V (Europe)"},
{HFC_DIGI_DF_M_IOM2_A, 0,
"Digi International DataFire Micro V IOM2 (North America)"},
{HFC_DIGI_DF_M_A, 0,
"Digi International DataFire Micro V (North America)"},
{HFC_SITECOM_DC105V2, 0, "Sitecom Connectivity DC-105 ISDN TA"},
{},
};
static const struct pci_device_id hfc_ids[] =
{
{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_2BD0),
(unsigned long) &hfc_map[0] },
{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B000),
(unsigned long) &hfc_map[1] },
{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B006),
(unsigned long) &hfc_map[2] },
{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B007),
(unsigned long) &hfc_map[3] },
{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B008),
(unsigned long) &hfc_map[4] },
{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B009),
(unsigned long) &hfc_map[5] },
{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00A),
(unsigned long) &hfc_map[6] },
{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00B),
(unsigned long) &hfc_map[7] },
{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00C),
(unsigned long) &hfc_map[8] },
{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B100),
(unsigned long) &hfc_map[9] },
{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B700),
(unsigned long) &hfc_map[10] },
{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B701),
(unsigned long) &hfc_map[11] },
{ PCI_VDEVICE(ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1),
(unsigned long) &hfc_map[12] },
{ PCI_VDEVICE(ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675),
(unsigned long) &hfc_map[13] },
{ PCI_VDEVICE(BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT),
(unsigned long) &hfc_map[14] },
{ PCI_VDEVICE(BERKOM, PCI_DEVICE_ID_BERKOM_A1T),
(unsigned long) &hfc_map[15] },
{ PCI_VDEVICE(ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575),
(unsigned long) &hfc_map[16] },
{ PCI_VDEVICE(ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0),
(unsigned long) &hfc_map[17] },
{ PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E),
(unsigned long) &hfc_map[18] },
{ PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_E),
(unsigned long) &hfc_map[19] },
{ PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A),
(unsigned long) &hfc_map[20] },
{ PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_A),
(unsigned long) &hfc_map[21] },
{ PCI_VDEVICE(SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2),
(unsigned long) &hfc_map[22] },
{},
};
static int
hfc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -ENOMEM;
struct hfc_pci *card;
struct _hfc_map *m = (struct _hfc_map *)ent->driver_data;
card = kzalloc(sizeof(struct hfc_pci), GFP_KERNEL);
if (!card) {
printk(KERN_ERR "No kmem for HFC card\n");
return err;
}
card->pdev = pdev;
card->subtype = m->subtype;
err = pci_enable_device(pdev);
if (err) {
kfree(card);
return err;
}
printk(KERN_INFO "mISDN_hfcpci: found adapter %s at %s\n",
m->name, pci_name(pdev));
card->irq = pdev->irq;
pci_set_drvdata(pdev, card);
err = setup_card(card);
if (err)
pci_set_drvdata(pdev, NULL);
return err;
}
static void
hfc_remove_pci(struct pci_dev *pdev)
{
struct hfc_pci *card = pci_get_drvdata(pdev);
if (card)
release_card(card);
else
if (debug)
printk(KERN_DEBUG "%s: drvdata already removed\n",
__func__);
}
static struct pci_driver hfc_driver = {
.name = "hfcpci",
.probe = hfc_probe,
.remove = hfc_remove_pci,
.id_table = hfc_ids,
};
static int
_hfcpci_softirq(struct device *dev, void *unused)
{
struct hfc_pci *hc = dev_get_drvdata(dev);
struct bchannel *bch;
if (hc == NULL)
return 0;
if (hc->hw.int_m2 & HFCPCI_IRQ_ENABLE) {
spin_lock_irq(&hc->lock);
bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
if (bch && bch->state == ISDN_P_B_RAW) { /* B1 rx&tx */
main_rec_hfcpci(bch);
tx_birq(bch);
}
bch = Sel_BCS(hc, hc->hw.bswapped ? 1 : 2);
if (bch && bch->state == ISDN_P_B_RAW) { /* B2 rx&tx */
main_rec_hfcpci(bch);
tx_birq(bch);
}
spin_unlock_irq(&hc->lock);
}
return 0;
}
static void
hfcpci_softirq(struct timer_list *unused)
{
WARN_ON_ONCE(driver_for_each_device(&hfc_driver.driver, NULL, NULL,
_hfcpci_softirq) != 0);
/* if next event would be in the past ... */
if ((s32)(hfc_jiffies + tics - jiffies) <= 0)
hfc_jiffies = jiffies + 1;
else
hfc_jiffies += tics;
hfc_tl.expires = hfc_jiffies;
add_timer(&hfc_tl);
}
static int __init
HFC_init(void)
{
int err;
if (!poll)
poll = HFCPCI_BTRANS_THRESHOLD;
if (poll != HFCPCI_BTRANS_THRESHOLD) {
tics = (poll * HZ) / 8000;
if (tics < 1)
tics = 1;
poll = (tics * 8000) / HZ;
if (poll > 256 || poll < 8) {
printk(KERN_ERR "%s: Wrong poll value %d not in range "
"of 8..256.\n", __func__, poll);
err = -EINVAL;
return err;
}
}
if (poll != HFCPCI_BTRANS_THRESHOLD) {
printk(KERN_INFO "%s: Using alternative poll value of %d\n",
__func__, poll);
timer_setup(&hfc_tl, hfcpci_softirq, 0);
hfc_tl.expires = jiffies + tics;
hfc_jiffies = hfc_tl.expires;
add_timer(&hfc_tl);
} else
tics = 0; /* indicate the use of controller's timer */
err = pci_register_driver(&hfc_driver);
if (err) {
if (timer_pending(&hfc_tl))
del_timer(&hfc_tl);
}
return err;
}
static void __exit
HFC_cleanup(void)
{
del_timer_sync(&hfc_tl);
pci_unregister_driver(&hfc_driver);
}
module_init(HFC_init);
module_exit(HFC_cleanup);
MODULE_DEVICE_TABLE(pci, hfc_ids);
| linux-master | drivers/isdn/hardware/mISDN/hfcpci.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* avm_fritz.c low level stuff for AVM FRITZ!CARD PCI ISDN cards
* Thanks to AVM, Berlin for informations
*
* Author Karsten Keil <[email protected]>
*
* Copyright 2009 by Karsten Keil <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/mISDNhw.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include "ipac.h"
#define AVMFRITZ_REV "2.3"
static int AVM_cnt;
static int debug;
enum {
AVM_FRITZ_PCI,
AVM_FRITZ_PCIV2,
};
#define HDLC_FIFO 0x0
#define HDLC_STATUS 0x4
#define CHIP_WINDOW 0x10
#define CHIP_INDEX 0x4
#define AVM_HDLC_1 0x00
#define AVM_HDLC_2 0x01
#define AVM_ISAC_FIFO 0x02
#define AVM_ISAC_REG_LOW 0x04
#define AVM_ISAC_REG_HIGH 0x06
#define AVM_STATUS0_IRQ_ISAC 0x01
#define AVM_STATUS0_IRQ_HDLC 0x02
#define AVM_STATUS0_IRQ_TIMER 0x04
#define AVM_STATUS0_IRQ_MASK 0x07
#define AVM_STATUS0_RESET 0x01
#define AVM_STATUS0_DIS_TIMER 0x02
#define AVM_STATUS0_RES_TIMER 0x04
#define AVM_STATUS0_ENA_IRQ 0x08
#define AVM_STATUS0_TESTBIT 0x10
#define AVM_STATUS1_INT_SEL 0x0f
#define AVM_STATUS1_ENA_IOM 0x80
#define HDLC_MODE_ITF_FLG 0x01
#define HDLC_MODE_TRANS 0x02
#define HDLC_MODE_CCR_7 0x04
#define HDLC_MODE_CCR_16 0x08
#define HDLC_FIFO_SIZE_128 0x20
#define HDLC_MODE_TESTLOOP 0x80
#define HDLC_INT_XPR 0x80
#define HDLC_INT_XDU 0x40
#define HDLC_INT_RPR 0x20
#define HDLC_INT_MASK 0xE0
#define HDLC_STAT_RME 0x01
#define HDLC_STAT_RDO 0x10
#define HDLC_STAT_CRCVFRRAB 0x0E
#define HDLC_STAT_CRCVFR 0x06
#define HDLC_STAT_RML_MASK_V1 0x3f00
#define HDLC_STAT_RML_MASK_V2 0x7f00
#define HDLC_CMD_XRS 0x80
#define HDLC_CMD_XME 0x01
#define HDLC_CMD_RRS 0x20
#define HDLC_CMD_XML_MASK 0x3f00
#define HDLC_FIFO_SIZE_V1 32
#define HDLC_FIFO_SIZE_V2 128
/* Fritz PCI v2.0 */
#define AVM_HDLC_FIFO_1 0x10
#define AVM_HDLC_FIFO_2 0x18
#define AVM_HDLC_STATUS_1 0x14
#define AVM_HDLC_STATUS_2 0x1c
#define AVM_ISACX_INDEX 0x04
#define AVM_ISACX_DATA 0x08
/* data struct */
#define LOG_SIZE 63
struct hdlc_stat_reg {
#ifdef __BIG_ENDIAN
u8 fill;
u8 mode;
u8 xml;
u8 cmd;
#else
u8 cmd;
u8 xml;
u8 mode;
u8 fill;
#endif
} __attribute__((packed));
struct hdlc_hw {
union {
u32 ctrl;
struct hdlc_stat_reg sr;
} ctrl;
u32 stat;
};
struct fritzcard {
struct list_head list;
struct pci_dev *pdev;
char name[MISDN_MAX_IDLEN];
u8 type;
u8 ctrlreg;
u16 irq;
u32 irqcnt;
u32 addr;
spinlock_t lock; /* hw lock */
struct isac_hw isac;
struct hdlc_hw hdlc[2];
struct bchannel bch[2];
char log[LOG_SIZE + 1];
};
static LIST_HEAD(Cards);
static DEFINE_RWLOCK(card_lock); /* protect Cards */
static void
_set_debug(struct fritzcard *card)
{
card->isac.dch.debug = debug;
card->bch[0].debug = debug;
card->bch[1].debug = debug;
}
static int
set_debug(const char *val, const struct kernel_param *kp)
{
int ret;
struct fritzcard *card;
ret = param_set_uint(val, kp);
if (!ret) {
read_lock(&card_lock);
list_for_each_entry(card, &Cards, list)
_set_debug(card);
read_unlock(&card_lock);
}
return ret;
}
MODULE_AUTHOR("Karsten Keil");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(AVMFRITZ_REV);
module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "avmfritz debug mask");
/* Interface functions */
static u8
ReadISAC_V1(void *p, u8 offset)
{
struct fritzcard *fc = p;
u8 idx = (offset > 0x2f) ? AVM_ISAC_REG_HIGH : AVM_ISAC_REG_LOW;
outb(idx, fc->addr + CHIP_INDEX);
return inb(fc->addr + CHIP_WINDOW + (offset & 0xf));
}
static void
WriteISAC_V1(void *p, u8 offset, u8 value)
{
struct fritzcard *fc = p;
u8 idx = (offset > 0x2f) ? AVM_ISAC_REG_HIGH : AVM_ISAC_REG_LOW;
outb(idx, fc->addr + CHIP_INDEX);
outb(value, fc->addr + CHIP_WINDOW + (offset & 0xf));
}
static void
ReadFiFoISAC_V1(void *p, u8 off, u8 *data, int size)
{
struct fritzcard *fc = p;
outb(AVM_ISAC_FIFO, fc->addr + CHIP_INDEX);
insb(fc->addr + CHIP_WINDOW, data, size);
}
static void
WriteFiFoISAC_V1(void *p, u8 off, u8 *data, int size)
{
struct fritzcard *fc = p;
outb(AVM_ISAC_FIFO, fc->addr + CHIP_INDEX);
outsb(fc->addr + CHIP_WINDOW, data, size);
}
static u8
ReadISAC_V2(void *p, u8 offset)
{
struct fritzcard *fc = p;
outl(offset, fc->addr + AVM_ISACX_INDEX);
return 0xff & inl(fc->addr + AVM_ISACX_DATA);
}
static void
WriteISAC_V2(void *p, u8 offset, u8 value)
{
struct fritzcard *fc = p;
outl(offset, fc->addr + AVM_ISACX_INDEX);
outl(value, fc->addr + AVM_ISACX_DATA);
}
static void
ReadFiFoISAC_V2(void *p, u8 off, u8 *data, int size)
{
struct fritzcard *fc = p;
int i;
outl(off, fc->addr + AVM_ISACX_INDEX);
for (i = 0; i < size; i++)
data[i] = 0xff & inl(fc->addr + AVM_ISACX_DATA);
}
static void
WriteFiFoISAC_V2(void *p, u8 off, u8 *data, int size)
{
struct fritzcard *fc = p;
int i;
outl(off, fc->addr + AVM_ISACX_INDEX);
for (i = 0; i < size; i++)
outl(data[i], fc->addr + AVM_ISACX_DATA);
}
static struct bchannel *
Sel_BCS(struct fritzcard *fc, u32 channel)
{
if (test_bit(FLG_ACTIVE, &fc->bch[0].Flags) &&
(fc->bch[0].nr & channel))
return &fc->bch[0];
else if (test_bit(FLG_ACTIVE, &fc->bch[1].Flags) &&
(fc->bch[1].nr & channel))
return &fc->bch[1];
else
return NULL;
}
static inline void
__write_ctrl_pci(struct fritzcard *fc, struct hdlc_hw *hdlc, u32 channel) {
u32 idx = channel == 2 ? AVM_HDLC_2 : AVM_HDLC_1;
outl(idx, fc->addr + CHIP_INDEX);
outl(hdlc->ctrl.ctrl, fc->addr + CHIP_WINDOW + HDLC_STATUS);
}
static inline void
__write_ctrl_pciv2(struct fritzcard *fc, struct hdlc_hw *hdlc, u32 channel) {
outl(hdlc->ctrl.ctrl, fc->addr + (channel == 2 ? AVM_HDLC_STATUS_2 :
AVM_HDLC_STATUS_1));
}
static void
write_ctrl(struct bchannel *bch, int which) {
struct fritzcard *fc = bch->hw;
struct hdlc_hw *hdlc;
hdlc = &fc->hdlc[(bch->nr - 1) & 1];
pr_debug("%s: hdlc %c wr%x ctrl %x\n", fc->name, '@' + bch->nr,
which, hdlc->ctrl.ctrl);
switch (fc->type) {
case AVM_FRITZ_PCIV2:
__write_ctrl_pciv2(fc, hdlc, bch->nr);
break;
case AVM_FRITZ_PCI:
__write_ctrl_pci(fc, hdlc, bch->nr);
break;
}
}
static inline u32
__read_status_pci(u_long addr, u32 channel)
{
outl(channel == 2 ? AVM_HDLC_2 : AVM_HDLC_1, addr + CHIP_INDEX);
return inl(addr + CHIP_WINDOW + HDLC_STATUS);
}
static inline u32
__read_status_pciv2(u_long addr, u32 channel)
{
return inl(addr + (channel == 2 ? AVM_HDLC_STATUS_2 :
AVM_HDLC_STATUS_1));
}
static u32
read_status(struct fritzcard *fc, u32 channel)
{
switch (fc->type) {
case AVM_FRITZ_PCIV2:
return __read_status_pciv2(fc->addr, channel);
case AVM_FRITZ_PCI:
return __read_status_pci(fc->addr, channel);
}
/* dummy */
return 0;
}
static void
enable_hwirq(struct fritzcard *fc)
{
fc->ctrlreg |= AVM_STATUS0_ENA_IRQ;
outb(fc->ctrlreg, fc->addr + 2);
}
static void
disable_hwirq(struct fritzcard *fc)
{
fc->ctrlreg &= ~AVM_STATUS0_ENA_IRQ;
outb(fc->ctrlreg, fc->addr + 2);
}
static int
modehdlc(struct bchannel *bch, int protocol)
{
struct fritzcard *fc = bch->hw;
struct hdlc_hw *hdlc;
u8 mode;
hdlc = &fc->hdlc[(bch->nr - 1) & 1];
pr_debug("%s: hdlc %c protocol %x-->%x ch %d\n", fc->name,
'@' + bch->nr, bch->state, protocol, bch->nr);
hdlc->ctrl.ctrl = 0;
mode = (fc->type == AVM_FRITZ_PCIV2) ? HDLC_FIFO_SIZE_128 : 0;
switch (protocol) {
case -1: /* used for init */
bch->state = -1;
fallthrough;
case ISDN_P_NONE:
if (bch->state == ISDN_P_NONE)
break;
hdlc->ctrl.sr.cmd = HDLC_CMD_XRS | HDLC_CMD_RRS;
hdlc->ctrl.sr.mode = mode | HDLC_MODE_TRANS;
write_ctrl(bch, 5);
bch->state = ISDN_P_NONE;
test_and_clear_bit(FLG_HDLC, &bch->Flags);
test_and_clear_bit(FLG_TRANSPARENT, &bch->Flags);
break;
case ISDN_P_B_RAW:
bch->state = protocol;
hdlc->ctrl.sr.cmd = HDLC_CMD_XRS | HDLC_CMD_RRS;
hdlc->ctrl.sr.mode = mode | HDLC_MODE_TRANS;
write_ctrl(bch, 5);
hdlc->ctrl.sr.cmd = HDLC_CMD_XRS;
write_ctrl(bch, 1);
hdlc->ctrl.sr.cmd = 0;
test_and_set_bit(FLG_TRANSPARENT, &bch->Flags);
break;
case ISDN_P_B_HDLC:
bch->state = protocol;
hdlc->ctrl.sr.cmd = HDLC_CMD_XRS | HDLC_CMD_RRS;
hdlc->ctrl.sr.mode = mode | HDLC_MODE_ITF_FLG;
write_ctrl(bch, 5);
hdlc->ctrl.sr.cmd = HDLC_CMD_XRS;
write_ctrl(bch, 1);
hdlc->ctrl.sr.cmd = 0;
test_and_set_bit(FLG_HDLC, &bch->Flags);
break;
default:
pr_info("%s: protocol not known %x\n", fc->name, protocol);
return -ENOPROTOOPT;
}
return 0;
}
static void
hdlc_empty_fifo(struct bchannel *bch, int count)
{
u32 *ptr;
u8 *p;
u32 val, addr;
int cnt;
struct fritzcard *fc = bch->hw;
pr_debug("%s: %s %d\n", fc->name, __func__, count);
if (test_bit(FLG_RX_OFF, &bch->Flags)) {
p = NULL;
bch->dropcnt += count;
} else {
cnt = bchannel_get_rxbuf(bch, count);
if (cnt < 0) {
pr_warn("%s.B%d: No bufferspace for %d bytes\n",
fc->name, bch->nr, count);
return;
}
p = skb_put(bch->rx_skb, count);
}
ptr = (u32 *)p;
if (fc->type == AVM_FRITZ_PCIV2)
addr = fc->addr + (bch->nr == 2 ?
AVM_HDLC_FIFO_2 : AVM_HDLC_FIFO_1);
else {
addr = fc->addr + CHIP_WINDOW;
outl(bch->nr == 2 ? AVM_HDLC_2 : AVM_HDLC_1, fc->addr);
}
cnt = 0;
while (cnt < count) {
val = le32_to_cpu(inl(addr));
if (p) {
put_unaligned(val, ptr);
ptr++;
}
cnt += 4;
}
if (p && (debug & DEBUG_HW_BFIFO)) {
snprintf(fc->log, LOG_SIZE, "B%1d-recv %s %d ",
bch->nr, fc->name, count);
print_hex_dump_bytes(fc->log, DUMP_PREFIX_OFFSET, p, count);
}
}
static void
hdlc_fill_fifo(struct bchannel *bch)
{
struct fritzcard *fc = bch->hw;
struct hdlc_hw *hdlc;
int count, fs, cnt = 0, idx;
bool fillempty = false;
u8 *p;
u32 *ptr, val, addr;
idx = (bch->nr - 1) & 1;
hdlc = &fc->hdlc[idx];
fs = (fc->type == AVM_FRITZ_PCIV2) ?
HDLC_FIFO_SIZE_V2 : HDLC_FIFO_SIZE_V1;
if (!bch->tx_skb) {
if (!test_bit(FLG_TX_EMPTY, &bch->Flags))
return;
count = fs;
p = bch->fill;
fillempty = true;
} else {
count = bch->tx_skb->len - bch->tx_idx;
if (count <= 0)
return;
p = bch->tx_skb->data + bch->tx_idx;
}
hdlc->ctrl.sr.cmd &= ~HDLC_CMD_XME;
if (count > fs) {
count = fs;
} else {
if (test_bit(FLG_HDLC, &bch->Flags))
hdlc->ctrl.sr.cmd |= HDLC_CMD_XME;
}
ptr = (u32 *)p;
if (!fillempty) {
pr_debug("%s.B%d: %d/%d/%d", fc->name, bch->nr, count,
bch->tx_idx, bch->tx_skb->len);
bch->tx_idx += count;
} else {
pr_debug("%s.B%d: fillempty %d\n", fc->name, bch->nr, count);
}
hdlc->ctrl.sr.xml = ((count == fs) ? 0 : count);
if (fc->type == AVM_FRITZ_PCIV2) {
__write_ctrl_pciv2(fc, hdlc, bch->nr);
addr = fc->addr + (bch->nr == 2 ?
AVM_HDLC_FIFO_2 : AVM_HDLC_FIFO_1);
} else {
__write_ctrl_pci(fc, hdlc, bch->nr);
addr = fc->addr + CHIP_WINDOW;
}
if (fillempty) {
while (cnt < count) {
/* all bytes the same - no worry about endian */
outl(*ptr, addr);
cnt += 4;
}
} else {
while (cnt < count) {
val = get_unaligned(ptr);
outl(cpu_to_le32(val), addr);
ptr++;
cnt += 4;
}
}
if ((debug & DEBUG_HW_BFIFO) && !fillempty) {
snprintf(fc->log, LOG_SIZE, "B%1d-send %s %d ",
bch->nr, fc->name, count);
print_hex_dump_bytes(fc->log, DUMP_PREFIX_OFFSET, p, count);
}
}
static void
HDLC_irq_xpr(struct bchannel *bch)
{
if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len) {
hdlc_fill_fifo(bch);
} else {
dev_kfree_skb(bch->tx_skb);
if (get_next_bframe(bch)) {
hdlc_fill_fifo(bch);
test_and_clear_bit(FLG_TX_EMPTY, &bch->Flags);
} else if (test_bit(FLG_TX_EMPTY, &bch->Flags)) {
hdlc_fill_fifo(bch);
}
}
}
static void
HDLC_irq(struct bchannel *bch, u32 stat)
{
struct fritzcard *fc = bch->hw;
int len, fs;
u32 rmlMask;
struct hdlc_hw *hdlc;
hdlc = &fc->hdlc[(bch->nr - 1) & 1];
pr_debug("%s: ch%d stat %#x\n", fc->name, bch->nr, stat);
if (fc->type == AVM_FRITZ_PCIV2) {
rmlMask = HDLC_STAT_RML_MASK_V2;
fs = HDLC_FIFO_SIZE_V2;
} else {
rmlMask = HDLC_STAT_RML_MASK_V1;
fs = HDLC_FIFO_SIZE_V1;
}
if (stat & HDLC_INT_RPR) {
if (stat & HDLC_STAT_RDO) {
pr_warn("%s: ch%d stat %x RDO\n",
fc->name, bch->nr, stat);
hdlc->ctrl.sr.xml = 0;
hdlc->ctrl.sr.cmd |= HDLC_CMD_RRS;
write_ctrl(bch, 1);
hdlc->ctrl.sr.cmd &= ~HDLC_CMD_RRS;
write_ctrl(bch, 1);
if (bch->rx_skb)
skb_trim(bch->rx_skb, 0);
} else {
len = (stat & rmlMask) >> 8;
if (!len)
len = fs;
hdlc_empty_fifo(bch, len);
if (!bch->rx_skb)
goto handle_tx;
if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
recv_Bchannel(bch, 0, false);
} else if (stat & HDLC_STAT_RME) {
if ((stat & HDLC_STAT_CRCVFRRAB) ==
HDLC_STAT_CRCVFR) {
recv_Bchannel(bch, 0, false);
} else {
pr_warn("%s: got invalid frame\n",
fc->name);
skb_trim(bch->rx_skb, 0);
}
}
}
}
handle_tx:
if (stat & HDLC_INT_XDU) {
/* Here we lost an TX interrupt, so
* restart transmitting the whole frame on HDLC
* in transparent mode we send the next data
*/
pr_warn("%s: ch%d stat %x XDU %s\n", fc->name, bch->nr,
stat, bch->tx_skb ? "tx_skb" : "no tx_skb");
if (bch->tx_skb && bch->tx_skb->len) {
if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
bch->tx_idx = 0;
} else if (test_bit(FLG_FILLEMPTY, &bch->Flags)) {
test_and_set_bit(FLG_TX_EMPTY, &bch->Flags);
}
hdlc->ctrl.sr.xml = 0;
hdlc->ctrl.sr.cmd |= HDLC_CMD_XRS;
write_ctrl(bch, 1);
hdlc->ctrl.sr.cmd &= ~HDLC_CMD_XRS;
HDLC_irq_xpr(bch);
return;
} else if (stat & HDLC_INT_XPR)
HDLC_irq_xpr(bch);
}
static inline void
HDLC_irq_main(struct fritzcard *fc)
{
u32 stat;
struct bchannel *bch;
stat = read_status(fc, 1);
if (stat & HDLC_INT_MASK) {
bch = Sel_BCS(fc, 1);
if (bch)
HDLC_irq(bch, stat);
else
pr_debug("%s: spurious ch1 IRQ\n", fc->name);
}
stat = read_status(fc, 2);
if (stat & HDLC_INT_MASK) {
bch = Sel_BCS(fc, 2);
if (bch)
HDLC_irq(bch, stat);
else
pr_debug("%s: spurious ch2 IRQ\n", fc->name);
}
}
static irqreturn_t
avm_fritz_interrupt(int intno, void *dev_id)
{
struct fritzcard *fc = dev_id;
u8 val;
u8 sval;
spin_lock(&fc->lock);
sval = inb(fc->addr + 2);
pr_debug("%s: irq stat0 %x\n", fc->name, sval);
if ((sval & AVM_STATUS0_IRQ_MASK) == AVM_STATUS0_IRQ_MASK) {
/* shared IRQ from other HW */
spin_unlock(&fc->lock);
return IRQ_NONE;
}
fc->irqcnt++;
if (!(sval & AVM_STATUS0_IRQ_ISAC)) {
val = ReadISAC_V1(fc, ISAC_ISTA);
mISDNisac_irq(&fc->isac, val);
}
if (!(sval & AVM_STATUS0_IRQ_HDLC))
HDLC_irq_main(fc);
spin_unlock(&fc->lock);
return IRQ_HANDLED;
}
static irqreturn_t
avm_fritzv2_interrupt(int intno, void *dev_id)
{
struct fritzcard *fc = dev_id;
u8 val;
u8 sval;
spin_lock(&fc->lock);
sval = inb(fc->addr + 2);
pr_debug("%s: irq stat0 %x\n", fc->name, sval);
if (!(sval & AVM_STATUS0_IRQ_MASK)) {
/* shared IRQ from other HW */
spin_unlock(&fc->lock);
return IRQ_NONE;
}
fc->irqcnt++;
if (sval & AVM_STATUS0_IRQ_HDLC)
HDLC_irq_main(fc);
if (sval & AVM_STATUS0_IRQ_ISAC) {
val = ReadISAC_V2(fc, ISACX_ISTA);
mISDNisac_irq(&fc->isac, val);
}
if (sval & AVM_STATUS0_IRQ_TIMER) {
pr_debug("%s: timer irq\n", fc->name);
outb(fc->ctrlreg | AVM_STATUS0_RES_TIMER, fc->addr + 2);
udelay(1);
outb(fc->ctrlreg, fc->addr + 2);
}
spin_unlock(&fc->lock);
return IRQ_HANDLED;
}
static int
avm_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct bchannel *bch = container_of(ch, struct bchannel, ch);
struct fritzcard *fc = bch->hw;
int ret = -EINVAL;
struct mISDNhead *hh = mISDN_HEAD_P(skb);
unsigned long flags;
switch (hh->prim) {
case PH_DATA_REQ:
spin_lock_irqsave(&fc->lock, flags);
ret = bchannel_senddata(bch, skb);
if (ret > 0) { /* direct TX */
hdlc_fill_fifo(bch);
ret = 0;
}
spin_unlock_irqrestore(&fc->lock, flags);
return ret;
case PH_ACTIVATE_REQ:
spin_lock_irqsave(&fc->lock, flags);
if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
ret = modehdlc(bch, ch->protocol);
else
ret = 0;
spin_unlock_irqrestore(&fc->lock, flags);
if (!ret)
_queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
NULL, GFP_KERNEL);
break;
case PH_DEACTIVATE_REQ:
spin_lock_irqsave(&fc->lock, flags);
mISDN_clear_bchannel(bch);
modehdlc(bch, ISDN_P_NONE);
spin_unlock_irqrestore(&fc->lock, flags);
_queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
NULL, GFP_KERNEL);
ret = 0;
break;
}
if (!ret)
dev_kfree_skb(skb);
return ret;
}
static void
inithdlc(struct fritzcard *fc)
{
modehdlc(&fc->bch[0], -1);
modehdlc(&fc->bch[1], -1);
}
static void
clear_pending_hdlc_ints(struct fritzcard *fc)
{
u32 val;
val = read_status(fc, 1);
pr_debug("%s: HDLC 1 STA %x\n", fc->name, val);
val = read_status(fc, 2);
pr_debug("%s: HDLC 2 STA %x\n", fc->name, val);
}
static void
reset_avm(struct fritzcard *fc)
{
switch (fc->type) {
case AVM_FRITZ_PCI:
fc->ctrlreg = AVM_STATUS0_RESET | AVM_STATUS0_DIS_TIMER;
break;
case AVM_FRITZ_PCIV2:
fc->ctrlreg = AVM_STATUS0_RESET;
break;
}
if (debug & DEBUG_HW)
pr_notice("%s: reset\n", fc->name);
disable_hwirq(fc);
mdelay(5);
switch (fc->type) {
case AVM_FRITZ_PCI:
fc->ctrlreg = AVM_STATUS0_DIS_TIMER | AVM_STATUS0_RES_TIMER;
disable_hwirq(fc);
outb(AVM_STATUS1_ENA_IOM, fc->addr + 3);
break;
case AVM_FRITZ_PCIV2:
fc->ctrlreg = 0;
disable_hwirq(fc);
break;
}
mdelay(1);
if (debug & DEBUG_HW)
pr_notice("%s: S0/S1 %x/%x\n", fc->name,
inb(fc->addr + 2), inb(fc->addr + 3));
}
static int
init_card(struct fritzcard *fc)
{
int ret, cnt = 3;
u_long flags;
reset_avm(fc); /* disable IRQ */
if (fc->type == AVM_FRITZ_PCIV2)
ret = request_irq(fc->irq, avm_fritzv2_interrupt,
IRQF_SHARED, fc->name, fc);
else
ret = request_irq(fc->irq, avm_fritz_interrupt,
IRQF_SHARED, fc->name, fc);
if (ret) {
pr_info("%s: couldn't get interrupt %d\n",
fc->name, fc->irq);
return ret;
}
while (cnt--) {
spin_lock_irqsave(&fc->lock, flags);
ret = fc->isac.init(&fc->isac);
if (ret) {
spin_unlock_irqrestore(&fc->lock, flags);
pr_info("%s: ISAC init failed with %d\n",
fc->name, ret);
break;
}
clear_pending_hdlc_ints(fc);
inithdlc(fc);
enable_hwirq(fc);
/* RESET Receiver and Transmitter */
if (fc->type == AVM_FRITZ_PCIV2) {
WriteISAC_V2(fc, ISACX_MASK, 0);
WriteISAC_V2(fc, ISACX_CMDRD, 0x41);
} else {
WriteISAC_V1(fc, ISAC_MASK, 0);
WriteISAC_V1(fc, ISAC_CMDR, 0x41);
}
spin_unlock_irqrestore(&fc->lock, flags);
/* Timeout 10ms */
msleep_interruptible(10);
if (debug & DEBUG_HW)
pr_notice("%s: IRQ %d count %d\n", fc->name,
fc->irq, fc->irqcnt);
if (!fc->irqcnt) {
pr_info("%s: IRQ(%d) getting no IRQs during init %d\n",
fc->name, fc->irq, 3 - cnt);
reset_avm(fc);
} else
return 0;
}
free_irq(fc->irq, fc);
return -EIO;
}
static int
channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
{
return mISDN_ctrl_bchannel(bch, cq);
}
static int
avm_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
{
struct bchannel *bch = container_of(ch, struct bchannel, ch);
struct fritzcard *fc = bch->hw;
int ret = -EINVAL;
u_long flags;
pr_debug("%s: %s cmd:%x %p\n", fc->name, __func__, cmd, arg);
switch (cmd) {
case CLOSE_CHANNEL:
test_and_clear_bit(FLG_OPEN, &bch->Flags);
cancel_work_sync(&bch->workq);
spin_lock_irqsave(&fc->lock, flags);
mISDN_clear_bchannel(bch);
modehdlc(bch, ISDN_P_NONE);
spin_unlock_irqrestore(&fc->lock, flags);
ch->protocol = ISDN_P_NONE;
ch->peer = NULL;
module_put(THIS_MODULE);
ret = 0;
break;
case CONTROL_CHANNEL:
ret = channel_bctrl(bch, arg);
break;
default:
pr_info("%s: %s unknown prim(%x)\n", fc->name, __func__, cmd);
}
return ret;
}
static int
channel_ctrl(struct fritzcard *fc, struct mISDN_ctrl_req *cq)
{
int ret = 0;
switch (cq->op) {
case MISDN_CTRL_GETOP:
cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
break;
case MISDN_CTRL_LOOP:
/* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
if (cq->channel < 0 || cq->channel > 3) {
ret = -EINVAL;
break;
}
ret = fc->isac.ctrl(&fc->isac, HW_TESTLOOP, cq->channel);
break;
case MISDN_CTRL_L1_TIMER3:
ret = fc->isac.ctrl(&fc->isac, HW_TIMER3_VALUE, cq->p1);
break;
default:
pr_info("%s: %s unknown Op %x\n", fc->name, __func__, cq->op);
ret = -EINVAL;
break;
}
return ret;
}
static int
open_bchannel(struct fritzcard *fc, struct channel_req *rq)
{
struct bchannel *bch;
if (rq->adr.channel == 0 || rq->adr.channel > 2)
return -EINVAL;
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
bch = &fc->bch[rq->adr.channel - 1];
if (test_and_set_bit(FLG_OPEN, &bch->Flags))
return -EBUSY; /* b-channel can be only open once */
bch->ch.protocol = rq->protocol;
rq->ch = &bch->ch;
return 0;
}
/*
* device control function
*/
static int
avm_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
{
struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
struct dchannel *dch = container_of(dev, struct dchannel, dev);
struct fritzcard *fc = dch->hw;
struct channel_req *rq;
int err = 0;
pr_debug("%s: %s cmd:%x %p\n", fc->name, __func__, cmd, arg);
switch (cmd) {
case OPEN_CHANNEL:
rq = arg;
if (rq->protocol == ISDN_P_TE_S0)
err = fc->isac.open(&fc->isac, rq);
else
err = open_bchannel(fc, rq);
if (err)
break;
if (!try_module_get(THIS_MODULE))
pr_info("%s: cannot get module\n", fc->name);
break;
case CLOSE_CHANNEL:
pr_debug("%s: dev(%d) close from %p\n", fc->name, dch->dev.id,
__builtin_return_address(0));
module_put(THIS_MODULE);
break;
case CONTROL_CHANNEL:
err = channel_ctrl(fc, arg);
break;
default:
pr_debug("%s: %s unknown command %x\n",
fc->name, __func__, cmd);
return -EINVAL;
}
return err;
}
static int
setup_fritz(struct fritzcard *fc)
{
u32 val, ver;
if (!request_region(fc->addr, 32, fc->name)) {
pr_info("%s: AVM config port %x-%x already in use\n",
fc->name, fc->addr, fc->addr + 31);
return -EIO;
}
switch (fc->type) {
case AVM_FRITZ_PCI:
val = inl(fc->addr);
outl(AVM_HDLC_1, fc->addr + CHIP_INDEX);
ver = inl(fc->addr + CHIP_WINDOW + HDLC_STATUS) >> 24;
if (debug & DEBUG_HW) {
pr_notice("%s: PCI stat %#x\n", fc->name, val);
pr_notice("%s: PCI Class %X Rev %d\n", fc->name,
val & 0xff, (val >> 8) & 0xff);
pr_notice("%s: HDLC version %x\n", fc->name, ver & 0xf);
}
ASSIGN_FUNC(V1, ISAC, fc->isac);
fc->isac.type = IPAC_TYPE_ISAC;
break;
case AVM_FRITZ_PCIV2:
val = inl(fc->addr);
ver = inl(fc->addr + AVM_HDLC_STATUS_1) >> 24;
if (debug & DEBUG_HW) {
pr_notice("%s: PCI V2 stat %#x\n", fc->name, val);
pr_notice("%s: PCI V2 Class %X Rev %d\n", fc->name,
val & 0xff, (val >> 8) & 0xff);
pr_notice("%s: HDLC version %x\n", fc->name, ver & 0xf);
}
ASSIGN_FUNC(V2, ISAC, fc->isac);
fc->isac.type = IPAC_TYPE_ISACX;
break;
default:
release_region(fc->addr, 32);
pr_info("%s: AVM unknown type %d\n", fc->name, fc->type);
return -ENODEV;
}
pr_notice("%s: %s config irq:%d base:0x%X\n", fc->name,
(fc->type == AVM_FRITZ_PCI) ? "AVM Fritz!CARD PCI" :
"AVM Fritz!CARD PCIv2", fc->irq, fc->addr);
return 0;
}
static void
release_card(struct fritzcard *card)
{
u_long flags;
disable_hwirq(card);
spin_lock_irqsave(&card->lock, flags);
modehdlc(&card->bch[0], ISDN_P_NONE);
modehdlc(&card->bch[1], ISDN_P_NONE);
spin_unlock_irqrestore(&card->lock, flags);
card->isac.release(&card->isac);
free_irq(card->irq, card);
mISDN_freebchannel(&card->bch[1]);
mISDN_freebchannel(&card->bch[0]);
mISDN_unregister_device(&card->isac.dch.dev);
release_region(card->addr, 32);
pci_disable_device(card->pdev);
pci_set_drvdata(card->pdev, NULL);
write_lock_irqsave(&card_lock, flags);
list_del(&card->list);
write_unlock_irqrestore(&card_lock, flags);
kfree(card);
AVM_cnt--;
}
static int
setup_instance(struct fritzcard *card)
{
int i, err;
unsigned short minsize;
u_long flags;
snprintf(card->name, MISDN_MAX_IDLEN - 1, "AVM.%d", AVM_cnt + 1);
write_lock_irqsave(&card_lock, flags);
list_add_tail(&card->list, &Cards);
write_unlock_irqrestore(&card_lock, flags);
_set_debug(card);
card->isac.name = card->name;
spin_lock_init(&card->lock);
card->isac.hwlock = &card->lock;
mISDNisac_init(&card->isac, card);
card->isac.dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
(1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
card->isac.dch.dev.D.ctrl = avm_dctrl;
for (i = 0; i < 2; i++) {
card->bch[i].nr = i + 1;
set_channelmap(i + 1, card->isac.dch.dev.channelmap);
if (AVM_FRITZ_PCIV2 == card->type)
minsize = HDLC_FIFO_SIZE_V2;
else
minsize = HDLC_FIFO_SIZE_V1;
mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM, minsize);
card->bch[i].hw = card;
card->bch[i].ch.send = avm_l2l1B;
card->bch[i].ch.ctrl = avm_bctrl;
card->bch[i].ch.nr = i + 1;
list_add(&card->bch[i].ch.list, &card->isac.dch.dev.bchannels);
}
err = setup_fritz(card);
if (err)
goto error;
err = mISDN_register_device(&card->isac.dch.dev, &card->pdev->dev,
card->name);
if (err)
goto error_reg;
err = init_card(card);
if (!err) {
AVM_cnt++;
pr_notice("AVM %d cards installed DEBUG\n", AVM_cnt);
return 0;
}
mISDN_unregister_device(&card->isac.dch.dev);
error_reg:
release_region(card->addr, 32);
error:
card->isac.release(&card->isac);
mISDN_freebchannel(&card->bch[1]);
mISDN_freebchannel(&card->bch[0]);
write_lock_irqsave(&card_lock, flags);
list_del(&card->list);
write_unlock_irqrestore(&card_lock, flags);
kfree(card);
return err;
}
static int
fritzpci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -ENOMEM;
struct fritzcard *card;
card = kzalloc(sizeof(struct fritzcard), GFP_KERNEL);
if (!card) {
pr_info("No kmem for fritzcard\n");
return err;
}
if (pdev->device == PCI_DEVICE_ID_AVM_A1_V2)
card->type = AVM_FRITZ_PCIV2;
else
card->type = AVM_FRITZ_PCI;
card->pdev = pdev;
err = pci_enable_device(pdev);
if (err) {
kfree(card);
return err;
}
pr_notice("mISDN: found adapter %s at %s\n",
(char *) ent->driver_data, pci_name(pdev));
card->addr = pci_resource_start(pdev, 1);
card->irq = pdev->irq;
pci_set_drvdata(pdev, card);
err = setup_instance(card);
if (err)
pci_set_drvdata(pdev, NULL);
return err;
}
static void
fritz_remove_pci(struct pci_dev *pdev)
{
struct fritzcard *card = pci_get_drvdata(pdev);
if (card)
release_card(card);
else
if (debug)
pr_info("%s: drvdata already removed\n", __func__);
}
static const struct pci_device_id fcpci_ids[] = {
{ PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1, PCI_ANY_ID, PCI_ANY_ID,
0, 0, (unsigned long) "Fritz!Card PCI"},
{ PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1_V2, PCI_ANY_ID, PCI_ANY_ID,
0, 0, (unsigned long) "Fritz!Card PCI v2" },
{ }
};
MODULE_DEVICE_TABLE(pci, fcpci_ids);
static struct pci_driver fcpci_driver = {
.name = "fcpci",
.probe = fritzpci_probe,
.remove = fritz_remove_pci,
.id_table = fcpci_ids,
};
static int __init AVM_init(void)
{
int err;
pr_notice("AVM Fritz PCI driver Rev. %s\n", AVMFRITZ_REV);
err = pci_register_driver(&fcpci_driver);
return err;
}
static void __exit AVM_cleanup(void)
{
pci_unregister_driver(&fcpci_driver);
}
module_init(AVM_init);
module_exit(AVM_cleanup);
| linux-master | drivers/isdn/hardware/mISDN/avmfritz.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* NETJet mISDN driver
*
* Author Karsten Keil <[email protected]>
*
* Copyright 2009 by Karsten Keil <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/mISDNhw.h>
#include <linux/slab.h>
#include "ipac.h"
#include "iohelper.h"
#include "netjet.h"
#include "isdnhdlc.h"
#define NETJET_REV "2.0"
enum nj_types {
NETJET_S_TJ300,
NETJET_S_TJ320,
ENTERNOW__TJ320,
};
struct tiger_dma {
size_t size;
u32 *start;
int idx;
u32 dmastart;
u32 dmairq;
u32 dmaend;
u32 dmacur;
};
struct tiger_hw;
struct tiger_ch {
struct bchannel bch;
struct tiger_hw *nj;
int idx;
int free;
int lastrx;
u16 rxstate;
u16 txstate;
struct isdnhdlc_vars hsend;
struct isdnhdlc_vars hrecv;
u8 *hsbuf;
u8 *hrbuf;
};
#define TX_INIT 0x0001
#define TX_IDLE 0x0002
#define TX_RUN 0x0004
#define TX_UNDERRUN 0x0100
#define RX_OVERRUN 0x0100
#define LOG_SIZE 64
struct tiger_hw {
struct list_head list;
struct pci_dev *pdev;
char name[MISDN_MAX_IDLEN];
enum nj_types typ;
int irq;
u32 irqcnt;
u32 base;
size_t base_s;
dma_addr_t dma;
void *dma_p;
spinlock_t lock; /* lock HW */
struct isac_hw isac;
struct tiger_dma send;
struct tiger_dma recv;
struct tiger_ch bc[2];
u8 ctrlreg;
u8 dmactrl;
u8 auxd;
u8 last_is0;
u8 irqmask0;
char log[LOG_SIZE];
};
static LIST_HEAD(Cards);
static DEFINE_RWLOCK(card_lock); /* protect Cards */
static u32 debug;
static int nj_cnt;
static void
_set_debug(struct tiger_hw *card)
{
card->isac.dch.debug = debug;
card->bc[0].bch.debug = debug;
card->bc[1].bch.debug = debug;
}
static int
set_debug(const char *val, const struct kernel_param *kp)
{
int ret;
struct tiger_hw *card;
ret = param_set_uint(val, kp);
if (!ret) {
read_lock(&card_lock);
list_for_each_entry(card, &Cards, list)
_set_debug(card);
read_unlock(&card_lock);
}
return ret;
}
MODULE_AUTHOR("Karsten Keil");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(NETJET_REV);
module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Netjet debug mask");
static void
nj_disable_hwirq(struct tiger_hw *card)
{
outb(0, card->base + NJ_IRQMASK0);
outb(0, card->base + NJ_IRQMASK1);
}
static u8
ReadISAC_nj(void *p, u8 offset)
{
struct tiger_hw *card = p;
u8 ret;
card->auxd &= 0xfc;
card->auxd |= (offset >> 4) & 3;
outb(card->auxd, card->base + NJ_AUXDATA);
ret = inb(card->base + NJ_ISAC_OFF + ((offset & 0x0f) << 2));
return ret;
}
static void
WriteISAC_nj(void *p, u8 offset, u8 value)
{
struct tiger_hw *card = p;
card->auxd &= 0xfc;
card->auxd |= (offset >> 4) & 3;
outb(card->auxd, card->base + NJ_AUXDATA);
outb(value, card->base + NJ_ISAC_OFF + ((offset & 0x0f) << 2));
}
static void
ReadFiFoISAC_nj(void *p, u8 offset, u8 *data, int size)
{
struct tiger_hw *card = p;
card->auxd &= 0xfc;
outb(card->auxd, card->base + NJ_AUXDATA);
insb(card->base + NJ_ISAC_OFF, data, size);
}
static void
WriteFiFoISAC_nj(void *p, u8 offset, u8 *data, int size)
{
struct tiger_hw *card = p;
card->auxd &= 0xfc;
outb(card->auxd, card->base + NJ_AUXDATA);
outsb(card->base + NJ_ISAC_OFF, data, size);
}
static void
fill_mem(struct tiger_ch *bc, u32 idx, u32 cnt, u32 fill)
{
struct tiger_hw *card = bc->bch.hw;
u32 mask = 0xff, val;
pr_debug("%s: B%1d fill %02x len %d idx %d/%d\n", card->name,
bc->bch.nr, fill, cnt, idx, card->send.idx);
if (bc->bch.nr & 2) {
fill <<= 8;
mask <<= 8;
}
mask ^= 0xffffffff;
while (cnt--) {
val = card->send.start[idx];
val &= mask;
val |= fill;
card->send.start[idx++] = val;
if (idx >= card->send.size)
idx = 0;
}
}
static int
mode_tiger(struct tiger_ch *bc, u32 protocol)
{
struct tiger_hw *card = bc->bch.hw;
pr_debug("%s: B%1d protocol %x-->%x\n", card->name,
bc->bch.nr, bc->bch.state, protocol);
switch (protocol) {
case ISDN_P_NONE:
if (bc->bch.state == ISDN_P_NONE)
break;
fill_mem(bc, 0, card->send.size, 0xff);
bc->bch.state = protocol;
/* only stop dma and interrupts if both channels NULL */
if ((card->bc[0].bch.state == ISDN_P_NONE) &&
(card->bc[1].bch.state == ISDN_P_NONE)) {
card->dmactrl = 0;
outb(card->dmactrl, card->base + NJ_DMACTRL);
outb(0, card->base + NJ_IRQMASK0);
}
test_and_clear_bit(FLG_HDLC, &bc->bch.Flags);
test_and_clear_bit(FLG_TRANSPARENT, &bc->bch.Flags);
bc->txstate = 0;
bc->rxstate = 0;
bc->lastrx = -1;
break;
case ISDN_P_B_RAW:
test_and_set_bit(FLG_TRANSPARENT, &bc->bch.Flags);
bc->bch.state = protocol;
bc->idx = 0;
bc->free = card->send.size / 2;
bc->rxstate = 0;
bc->txstate = TX_INIT | TX_IDLE;
bc->lastrx = -1;
if (!card->dmactrl) {
card->dmactrl = 1;
outb(card->dmactrl, card->base + NJ_DMACTRL);
outb(0x0f, card->base + NJ_IRQMASK0);
}
break;
case ISDN_P_B_HDLC:
test_and_set_bit(FLG_HDLC, &bc->bch.Flags);
bc->bch.state = protocol;
bc->idx = 0;
bc->free = card->send.size / 2;
bc->rxstate = 0;
bc->txstate = TX_INIT | TX_IDLE;
isdnhdlc_rcv_init(&bc->hrecv, 0);
isdnhdlc_out_init(&bc->hsend, 0);
bc->lastrx = -1;
if (!card->dmactrl) {
card->dmactrl = 1;
outb(card->dmactrl, card->base + NJ_DMACTRL);
outb(0x0f, card->base + NJ_IRQMASK0);
}
break;
default:
pr_info("%s: %s protocol %x not handled\n", card->name,
__func__, protocol);
return -ENOPROTOOPT;
}
card->send.dmacur = inl(card->base + NJ_DMA_READ_ADR);
card->recv.dmacur = inl(card->base + NJ_DMA_WRITE_ADR);
card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
card->recv.idx = (card->recv.dmacur - card->recv.dmastart) >> 2;
pr_debug("%s: %s ctrl %x irq %02x/%02x idx %d/%d\n",
card->name, __func__,
inb(card->base + NJ_DMACTRL),
inb(card->base + NJ_IRQMASK0),
inb(card->base + NJ_IRQSTAT0),
card->send.idx,
card->recv.idx);
return 0;
}
static void
nj_reset(struct tiger_hw *card)
{
outb(0xff, card->base + NJ_CTRL); /* Reset On */
mdelay(1);
/* now edge triggered for TJ320 GE 13/07/00 */
/* see comment in IRQ function */
if (card->typ == NETJET_S_TJ320) /* TJ320 */
card->ctrlreg = 0x40; /* Reset Off and status read clear */
else
card->ctrlreg = 0x00; /* Reset Off and status read clear */
outb(card->ctrlreg, card->base + NJ_CTRL);
mdelay(10);
/* configure AUX pins (all output except ISAC IRQ pin) */
card->auxd = 0;
card->dmactrl = 0;
outb(~NJ_ISACIRQ, card->base + NJ_AUXCTRL);
outb(NJ_ISACIRQ, card->base + NJ_IRQMASK1);
outb(card->auxd, card->base + NJ_AUXDATA);
}
static int
inittiger(struct tiger_hw *card)
{
int i;
card->dma_p = dma_alloc_coherent(&card->pdev->dev, NJ_DMA_SIZE,
&card->dma, GFP_ATOMIC);
if (!card->dma_p) {
pr_info("%s: No DMA memory\n", card->name);
return -ENOMEM;
}
if ((u64)card->dma > 0xffffffff) {
pr_info("%s: DMA outside 32 bit\n", card->name);
return -ENOMEM;
}
for (i = 0; i < 2; i++) {
card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_ATOMIC);
if (!card->bc[i].hsbuf) {
pr_info("%s: no B%d send buffer\n", card->name, i + 1);
return -ENOMEM;
}
card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_ATOMIC);
if (!card->bc[i].hrbuf) {
pr_info("%s: no B%d recv buffer\n", card->name, i + 1);
return -ENOMEM;
}
}
memset(card->dma_p, 0xff, NJ_DMA_SIZE);
card->send.start = card->dma_p;
card->send.dmastart = (u32)card->dma;
card->send.dmaend = card->send.dmastart +
(4 * (NJ_DMA_TXSIZE - 1));
card->send.dmairq = card->send.dmastart +
(4 * ((NJ_DMA_TXSIZE / 2) - 1));
card->send.size = NJ_DMA_TXSIZE;
if (debug & DEBUG_HW)
pr_notice("%s: send buffer phy %#x - %#x - %#x virt %p"
" size %zu u32\n", card->name,
card->send.dmastart, card->send.dmairq,
card->send.dmaend, card->send.start, card->send.size);
outl(card->send.dmastart, card->base + NJ_DMA_READ_START);
outl(card->send.dmairq, card->base + NJ_DMA_READ_IRQ);
outl(card->send.dmaend, card->base + NJ_DMA_READ_END);
card->recv.start = card->dma_p + (NJ_DMA_SIZE / 2);
card->recv.dmastart = (u32)card->dma + (NJ_DMA_SIZE / 2);
card->recv.dmaend = card->recv.dmastart +
(4 * (NJ_DMA_RXSIZE - 1));
card->recv.dmairq = card->recv.dmastart +
(4 * ((NJ_DMA_RXSIZE / 2) - 1));
card->recv.size = NJ_DMA_RXSIZE;
if (debug & DEBUG_HW)
pr_notice("%s: recv buffer phy %#x - %#x - %#x virt %p"
" size %zu u32\n", card->name,
card->recv.dmastart, card->recv.dmairq,
card->recv.dmaend, card->recv.start, card->recv.size);
outl(card->recv.dmastart, card->base + NJ_DMA_WRITE_START);
outl(card->recv.dmairq, card->base + NJ_DMA_WRITE_IRQ);
outl(card->recv.dmaend, card->base + NJ_DMA_WRITE_END);
return 0;
}
static void
read_dma(struct tiger_ch *bc, u32 idx, int cnt)
{
struct tiger_hw *card = bc->bch.hw;
int i, stat;
u32 val;
u8 *p, *pn;
if (bc->lastrx == idx) {
bc->rxstate |= RX_OVERRUN;
pr_info("%s: B%1d overrun at idx %d\n", card->name,
bc->bch.nr, idx);
}
bc->lastrx = idx;
if (test_bit(FLG_RX_OFF, &bc->bch.Flags)) {
bc->bch.dropcnt += cnt;
return;
}
stat = bchannel_get_rxbuf(&bc->bch, cnt);
/* only transparent use the count here, HDLC overun is detected later */
if (stat == -ENOMEM) {
pr_warn("%s.B%d: No memory for %d bytes\n",
card->name, bc->bch.nr, cnt);
return;
}
if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags))
p = skb_put(bc->bch.rx_skb, cnt);
else
p = bc->hrbuf;
for (i = 0; i < cnt; i++) {
val = card->recv.start[idx++];
if (bc->bch.nr & 2)
val >>= 8;
if (idx >= card->recv.size)
idx = 0;
p[i] = val & 0xff;
}
if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags)) {
recv_Bchannel(&bc->bch, 0, false);
return;
}
pn = bc->hrbuf;
while (cnt > 0) {
stat = isdnhdlc_decode(&bc->hrecv, pn, cnt, &i,
bc->bch.rx_skb->data, bc->bch.maxlen);
if (stat > 0) { /* valid frame received */
p = skb_put(bc->bch.rx_skb, stat);
if (debug & DEBUG_HW_BFIFO) {
snprintf(card->log, LOG_SIZE,
"B%1d-recv %s %d ", bc->bch.nr,
card->name, stat);
print_hex_dump_bytes(card->log,
DUMP_PREFIX_OFFSET, p,
stat);
}
recv_Bchannel(&bc->bch, 0, false);
stat = bchannel_get_rxbuf(&bc->bch, bc->bch.maxlen);
if (stat < 0) {
pr_warn("%s.B%d: No memory for %d bytes\n",
card->name, bc->bch.nr, cnt);
return;
}
} else if (stat == -HDLC_CRC_ERROR) {
pr_info("%s: B%1d receive frame CRC error\n",
card->name, bc->bch.nr);
} else if (stat == -HDLC_FRAMING_ERROR) {
pr_info("%s: B%1d receive framing error\n",
card->name, bc->bch.nr);
} else if (stat == -HDLC_LENGTH_ERROR) {
pr_info("%s: B%1d receive frame too long (> %d)\n",
card->name, bc->bch.nr, bc->bch.maxlen);
}
pn += i;
cnt -= i;
}
}
static void
recv_tiger(struct tiger_hw *card, u8 irq_stat)
{
u32 idx;
int cnt = card->recv.size / 2;
/* Note receive is via the WRITE DMA channel */
card->last_is0 &= ~NJ_IRQM0_WR_MASK;
card->last_is0 |= (irq_stat & NJ_IRQM0_WR_MASK);
if (irq_stat & NJ_IRQM0_WR_END)
idx = cnt - 1;
else
idx = card->recv.size - 1;
if (test_bit(FLG_ACTIVE, &card->bc[0].bch.Flags))
read_dma(&card->bc[0], idx, cnt);
if (test_bit(FLG_ACTIVE, &card->bc[1].bch.Flags))
read_dma(&card->bc[1], idx, cnt);
}
/* sync with current DMA address at start or after exception */
static void
resync(struct tiger_ch *bc, struct tiger_hw *card)
{
card->send.dmacur = inl(card->base | NJ_DMA_READ_ADR);
card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
if (bc->free > card->send.size / 2)
bc->free = card->send.size / 2;
/* currently we simple sync to the next complete free area
* this hast the advantage that we have always maximum time to
* handle TX irq
*/
if (card->send.idx < ((card->send.size / 2) - 1))
bc->idx = (card->recv.size / 2) - 1;
else
bc->idx = card->recv.size - 1;
bc->txstate = TX_RUN;
pr_debug("%s: %s B%1d free %d idx %d/%d\n", card->name,
__func__, bc->bch.nr, bc->free, bc->idx, card->send.idx);
}
static int bc_next_frame(struct tiger_ch *);
static void
fill_hdlc_flag(struct tiger_ch *bc)
{
struct tiger_hw *card = bc->bch.hw;
int count, i;
u32 m, v;
u8 *p;
if (bc->free == 0)
return;
pr_debug("%s: %s B%1d %d state %x idx %d/%d\n", card->name,
__func__, bc->bch.nr, bc->free, bc->txstate,
bc->idx, card->send.idx);
if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN))
resync(bc, card);
count = isdnhdlc_encode(&bc->hsend, NULL, 0, &i,
bc->hsbuf, bc->free);
pr_debug("%s: B%1d hdlc encoded %d flags\n", card->name,
bc->bch.nr, count);
bc->free -= count;
p = bc->hsbuf;
m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff;
for (i = 0; i < count; i++) {
if (bc->idx >= card->send.size)
bc->idx = 0;
v = card->send.start[bc->idx];
v &= m;
v |= (bc->bch.nr & 1) ? (u32)(p[i]) : ((u32)(p[i])) << 8;
card->send.start[bc->idx++] = v;
}
if (debug & DEBUG_HW_BFIFO) {
snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ",
bc->bch.nr, card->name, count);
print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count);
}
}
static void
fill_dma(struct tiger_ch *bc)
{
struct tiger_hw *card = bc->bch.hw;
int count, i, fillempty = 0;
u32 m, v, n = 0;
u8 *p;
if (bc->free == 0)
return;
if (!bc->bch.tx_skb) {
if (!test_bit(FLG_TX_EMPTY, &bc->bch.Flags))
return;
fillempty = 1;
count = card->send.size >> 1;
p = bc->bch.fill;
} else {
count = bc->bch.tx_skb->len - bc->bch.tx_idx;
if (count <= 0)
return;
pr_debug("%s: %s B%1d %d/%d/%d/%d state %x idx %d/%d\n",
card->name, __func__, bc->bch.nr, count, bc->free,
bc->bch.tx_idx, bc->bch.tx_skb->len, bc->txstate,
bc->idx, card->send.idx);
p = bc->bch.tx_skb->data + bc->bch.tx_idx;
}
if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN))
resync(bc, card);
if (test_bit(FLG_HDLC, &bc->bch.Flags) && !fillempty) {
count = isdnhdlc_encode(&bc->hsend, p, count, &i,
bc->hsbuf, bc->free);
pr_debug("%s: B%1d hdlc encoded %d in %d\n", card->name,
bc->bch.nr, i, count);
bc->bch.tx_idx += i;
bc->free -= count;
p = bc->hsbuf;
} else {
if (count > bc->free)
count = bc->free;
if (!fillempty)
bc->bch.tx_idx += count;
bc->free -= count;
}
m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff;
if (fillempty) {
n = p[0];
if (!(bc->bch.nr & 1))
n <<= 8;
for (i = 0; i < count; i++) {
if (bc->idx >= card->send.size)
bc->idx = 0;
v = card->send.start[bc->idx];
v &= m;
v |= n;
card->send.start[bc->idx++] = v;
}
} else {
for (i = 0; i < count; i++) {
if (bc->idx >= card->send.size)
bc->idx = 0;
v = card->send.start[bc->idx];
v &= m;
n = p[i];
v |= (bc->bch.nr & 1) ? n : n << 8;
card->send.start[bc->idx++] = v;
}
}
if (debug & DEBUG_HW_BFIFO) {
snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ",
bc->bch.nr, card->name, count);
print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count);
}
if (bc->free)
bc_next_frame(bc);
}
static int
bc_next_frame(struct tiger_ch *bc)
{
int ret = 1;
if (bc->bch.tx_skb && bc->bch.tx_idx < bc->bch.tx_skb->len) {
fill_dma(bc);
} else {
dev_kfree_skb(bc->bch.tx_skb);
if (get_next_bframe(&bc->bch)) {
fill_dma(bc);
test_and_clear_bit(FLG_TX_EMPTY, &bc->bch.Flags);
} else if (test_bit(FLG_TX_EMPTY, &bc->bch.Flags)) {
fill_dma(bc);
} else if (test_bit(FLG_FILLEMPTY, &bc->bch.Flags)) {
test_and_set_bit(FLG_TX_EMPTY, &bc->bch.Flags);
ret = 0;
} else {
ret = 0;
}
}
return ret;
}
static void
send_tiger_bc(struct tiger_hw *card, struct tiger_ch *bc)
{
int ret;
bc->free += card->send.size / 2;
if (bc->free >= card->send.size) {
if (!(bc->txstate & (TX_UNDERRUN | TX_INIT))) {
pr_info("%s: B%1d TX underrun state %x\n", card->name,
bc->bch.nr, bc->txstate);
bc->txstate |= TX_UNDERRUN;
}
bc->free = card->send.size;
}
ret = bc_next_frame(bc);
if (!ret) {
if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
fill_hdlc_flag(bc);
return;
}
pr_debug("%s: B%1d TX no data free %d idx %d/%d\n", card->name,
bc->bch.nr, bc->free, bc->idx, card->send.idx);
if (!(bc->txstate & (TX_IDLE | TX_INIT))) {
fill_mem(bc, bc->idx, bc->free, 0xff);
if (bc->free == card->send.size)
bc->txstate |= TX_IDLE;
}
}
}
static void
send_tiger(struct tiger_hw *card, u8 irq_stat)
{
int i;
/* Note send is via the READ DMA channel */
if ((irq_stat & card->last_is0) & NJ_IRQM0_RD_MASK) {
pr_info("%s: tiger warn write double dma %x/%x\n",
card->name, irq_stat, card->last_is0);
return;
} else {
card->last_is0 &= ~NJ_IRQM0_RD_MASK;
card->last_is0 |= (irq_stat & NJ_IRQM0_RD_MASK);
}
for (i = 0; i < 2; i++) {
if (test_bit(FLG_ACTIVE, &card->bc[i].bch.Flags))
send_tiger_bc(card, &card->bc[i]);
}
}
static irqreturn_t
nj_irq(int intno, void *dev_id)
{
struct tiger_hw *card = dev_id;
u8 val, s1val, s0val;
spin_lock(&card->lock);
s0val = inb(card->base | NJ_IRQSTAT0);
s1val = inb(card->base | NJ_IRQSTAT1);
if ((s1val & NJ_ISACIRQ) && (s0val == 0)) {
/* shared IRQ */
spin_unlock(&card->lock);
return IRQ_NONE;
}
pr_debug("%s: IRQSTAT0 %02x IRQSTAT1 %02x\n", card->name, s0val, s1val);
card->irqcnt++;
if (!(s1val & NJ_ISACIRQ)) {
val = ReadISAC_nj(card, ISAC_ISTA);
if (val)
mISDNisac_irq(&card->isac, val);
}
if (s0val)
/* write to clear */
outb(s0val, card->base | NJ_IRQSTAT0);
else
goto end;
s1val = s0val;
/* set bits in sval to indicate which page is free */
card->recv.dmacur = inl(card->base | NJ_DMA_WRITE_ADR);
card->recv.idx = (card->recv.dmacur - card->recv.dmastart) >> 2;
if (card->recv.dmacur < card->recv.dmairq)
s0val = 0x08; /* the 2nd write area is free */
else
s0val = 0x04; /* the 1st write area is free */
card->send.dmacur = inl(card->base | NJ_DMA_READ_ADR);
card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
if (card->send.dmacur < card->send.dmairq)
s0val |= 0x02; /* the 2nd read area is free */
else
s0val |= 0x01; /* the 1st read area is free */
pr_debug("%s: DMA Status %02x/%02x/%02x %d/%d\n", card->name,
s1val, s0val, card->last_is0,
card->recv.idx, card->send.idx);
/* test if we have a DMA interrupt */
if (s0val != card->last_is0) {
if ((s0val & NJ_IRQM0_RD_MASK) !=
(card->last_is0 & NJ_IRQM0_RD_MASK))
/* got a write dma int */
send_tiger(card, s0val);
if ((s0val & NJ_IRQM0_WR_MASK) !=
(card->last_is0 & NJ_IRQM0_WR_MASK))
/* got a read dma int */
recv_tiger(card, s0val);
}
end:
spin_unlock(&card->lock);
return IRQ_HANDLED;
}
static int
nj_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
{
int ret = -EINVAL;
struct bchannel *bch = container_of(ch, struct bchannel, ch);
struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
struct tiger_hw *card = bch->hw;
struct mISDNhead *hh = mISDN_HEAD_P(skb);
unsigned long flags;
switch (hh->prim) {
case PH_DATA_REQ:
spin_lock_irqsave(&card->lock, flags);
ret = bchannel_senddata(bch, skb);
if (ret > 0) { /* direct TX */
fill_dma(bc);
ret = 0;
}
spin_unlock_irqrestore(&card->lock, flags);
return ret;
case PH_ACTIVATE_REQ:
spin_lock_irqsave(&card->lock, flags);
if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
ret = mode_tiger(bc, ch->protocol);
else
ret = 0;
spin_unlock_irqrestore(&card->lock, flags);
if (!ret)
_queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
NULL, GFP_KERNEL);
break;
case PH_DEACTIVATE_REQ:
spin_lock_irqsave(&card->lock, flags);
mISDN_clear_bchannel(bch);
mode_tiger(bc, ISDN_P_NONE);
spin_unlock_irqrestore(&card->lock, flags);
_queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
NULL, GFP_KERNEL);
ret = 0;
break;
}
if (!ret)
dev_kfree_skb(skb);
return ret;
}
static int
channel_bctrl(struct tiger_ch *bc, struct mISDN_ctrl_req *cq)
{
return mISDN_ctrl_bchannel(&bc->bch, cq);
}
static int
nj_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
{
struct bchannel *bch = container_of(ch, struct bchannel, ch);
struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
struct tiger_hw *card = bch->hw;
int ret = -EINVAL;
u_long flags;
pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg);
switch (cmd) {
case CLOSE_CHANNEL:
test_and_clear_bit(FLG_OPEN, &bch->Flags);
cancel_work_sync(&bch->workq);
spin_lock_irqsave(&card->lock, flags);
mISDN_clear_bchannel(bch);
mode_tiger(bc, ISDN_P_NONE);
spin_unlock_irqrestore(&card->lock, flags);
ch->protocol = ISDN_P_NONE;
ch->peer = NULL;
module_put(THIS_MODULE);
ret = 0;
break;
case CONTROL_CHANNEL:
ret = channel_bctrl(bc, arg);
break;
default:
pr_info("%s: %s unknown prim(%x)\n", card->name, __func__, cmd);
}
return ret;
}
static int
channel_ctrl(struct tiger_hw *card, struct mISDN_ctrl_req *cq)
{
int ret = 0;
switch (cq->op) {
case MISDN_CTRL_GETOP:
cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
break;
case MISDN_CTRL_LOOP:
/* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
if (cq->channel < 0 || cq->channel > 3) {
ret = -EINVAL;
break;
}
ret = card->isac.ctrl(&card->isac, HW_TESTLOOP, cq->channel);
break;
case MISDN_CTRL_L1_TIMER3:
ret = card->isac.ctrl(&card->isac, HW_TIMER3_VALUE, cq->p1);
break;
default:
pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op);
ret = -EINVAL;
break;
}
return ret;
}
static int
open_bchannel(struct tiger_hw *card, struct channel_req *rq)
{
struct bchannel *bch;
if (rq->adr.channel == 0 || rq->adr.channel > 2)
return -EINVAL;
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
bch = &card->bc[rq->adr.channel - 1].bch;
if (test_and_set_bit(FLG_OPEN, &bch->Flags))
return -EBUSY; /* b-channel can be only open once */
test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
bch->ch.protocol = rq->protocol;
rq->ch = &bch->ch;
return 0;
}
/*
* device control function
*/
static int
nj_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
{
struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
struct dchannel *dch = container_of(dev, struct dchannel, dev);
struct tiger_hw *card = dch->hw;
struct channel_req *rq;
int err = 0;
pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg);
switch (cmd) {
case OPEN_CHANNEL:
rq = arg;
if (rq->protocol == ISDN_P_TE_S0)
err = card->isac.open(&card->isac, rq);
else
err = open_bchannel(card, rq);
if (err)
break;
if (!try_module_get(THIS_MODULE))
pr_info("%s: cannot get module\n", card->name);
break;
case CLOSE_CHANNEL:
pr_debug("%s: dev(%d) close from %p\n", card->name, dch->dev.id,
__builtin_return_address(0));
module_put(THIS_MODULE);
break;
case CONTROL_CHANNEL:
err = channel_ctrl(card, arg);
break;
default:
pr_debug("%s: %s unknown command %x\n",
card->name, __func__, cmd);
return -EINVAL;
}
return err;
}
static int
nj_init_card(struct tiger_hw *card)
{
u_long flags;
int ret;
spin_lock_irqsave(&card->lock, flags);
nj_disable_hwirq(card);
spin_unlock_irqrestore(&card->lock, flags);
card->irq = card->pdev->irq;
if (request_irq(card->irq, nj_irq, IRQF_SHARED, card->name, card)) {
pr_info("%s: couldn't get interrupt %d\n",
card->name, card->irq);
card->irq = -1;
return -EIO;
}
spin_lock_irqsave(&card->lock, flags);
nj_reset(card);
ret = card->isac.init(&card->isac);
if (ret)
goto error;
ret = inittiger(card);
if (ret)
goto error;
mode_tiger(&card->bc[0], ISDN_P_NONE);
mode_tiger(&card->bc[1], ISDN_P_NONE);
error:
spin_unlock_irqrestore(&card->lock, flags);
return ret;
}
static void
nj_release(struct tiger_hw *card)
{
u_long flags;
int i;
if (card->base_s) {
spin_lock_irqsave(&card->lock, flags);
nj_disable_hwirq(card);
mode_tiger(&card->bc[0], ISDN_P_NONE);
mode_tiger(&card->bc[1], ISDN_P_NONE);
spin_unlock_irqrestore(&card->lock, flags);
card->isac.release(&card->isac);
release_region(card->base, card->base_s);
card->base_s = 0;
}
if (card->irq > 0)
free_irq(card->irq, card);
if (device_is_registered(&card->isac.dch.dev.dev))
mISDN_unregister_device(&card->isac.dch.dev);
for (i = 0; i < 2; i++) {
mISDN_freebchannel(&card->bc[i].bch);
kfree(card->bc[i].hsbuf);
kfree(card->bc[i].hrbuf);
}
if (card->dma_p)
dma_free_coherent(&card->pdev->dev, NJ_DMA_SIZE, card->dma_p,
card->dma);
write_lock_irqsave(&card_lock, flags);
list_del(&card->list);
write_unlock_irqrestore(&card_lock, flags);
pci_disable_device(card->pdev);
pci_set_drvdata(card->pdev, NULL);
kfree(card);
}
static int
nj_setup(struct tiger_hw *card)
{
card->base = pci_resource_start(card->pdev, 0);
card->base_s = pci_resource_len(card->pdev, 0);
if (!request_region(card->base, card->base_s, card->name)) {
pr_info("%s: NETjet config port %#x-%#x already in use\n",
card->name, card->base,
(u32)(card->base + card->base_s - 1));
card->base_s = 0;
return -EIO;
}
ASSIGN_FUNC(nj, ISAC, card->isac);
return 0;
}
static int
setup_instance(struct tiger_hw *card)
{
int i, err;
u_long flags;
snprintf(card->name, MISDN_MAX_IDLEN - 1, "netjet.%d", nj_cnt + 1);
write_lock_irqsave(&card_lock, flags);
list_add_tail(&card->list, &Cards);
write_unlock_irqrestore(&card_lock, flags);
_set_debug(card);
card->isac.name = card->name;
spin_lock_init(&card->lock);
card->isac.hwlock = &card->lock;
mISDNisac_init(&card->isac, card);
card->isac.dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
(1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
card->isac.dch.dev.D.ctrl = nj_dctrl;
for (i = 0; i < 2; i++) {
card->bc[i].bch.nr = i + 1;
set_channelmap(i + 1, card->isac.dch.dev.channelmap);
mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM,
NJ_DMA_RXSIZE >> 1);
card->bc[i].bch.hw = card;
card->bc[i].bch.ch.send = nj_l2l1B;
card->bc[i].bch.ch.ctrl = nj_bctrl;
card->bc[i].bch.ch.nr = i + 1;
list_add(&card->bc[i].bch.ch.list,
&card->isac.dch.dev.bchannels);
card->bc[i].bch.hw = card;
}
err = nj_setup(card);
if (err)
goto error;
err = mISDN_register_device(&card->isac.dch.dev, &card->pdev->dev,
card->name);
if (err)
goto error;
err = nj_init_card(card);
if (!err) {
nj_cnt++;
pr_notice("Netjet %d cards installed\n", nj_cnt);
return 0;
}
error:
nj_release(card);
return err;
}
static int
nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -ENOMEM;
int cfg;
struct tiger_hw *card;
if (pdev->subsystem_vendor == 0x8086 &&
pdev->subsystem_device == 0x0003) {
pr_notice("Netjet: Digium X100P/X101P not handled\n");
return -ENODEV;
}
if (pdev->subsystem_vendor == 0x55 &&
pdev->subsystem_device == 0x02) {
pr_notice("Netjet: Enter!Now not handled yet\n");
return -ENODEV;
}
if (pdev->subsystem_vendor == 0xb100 &&
pdev->subsystem_device == 0x0003) {
pr_notice("Netjet: Digium TDM400P not handled yet\n");
return -ENODEV;
}
card = kzalloc(sizeof(struct tiger_hw), GFP_KERNEL);
if (!card) {
pr_info("No kmem for Netjet\n");
return err;
}
card->pdev = pdev;
err = pci_enable_device(pdev);
if (err) {
kfree(card);
return err;
}
printk(KERN_INFO "nj_probe(mISDN): found adapter at %s\n",
pci_name(pdev));
pci_set_master(pdev);
/* the TJ300 and TJ320 must be detected, the IRQ handling is different
* unfortunately the chips use the same device ID, but the TJ320 has
* the bit20 in status PCI cfg register set
*/
pci_read_config_dword(pdev, 0x04, &cfg);
if (cfg & 0x00100000)
card->typ = NETJET_S_TJ320;
else
card->typ = NETJET_S_TJ300;
card->base = pci_resource_start(pdev, 0);
pci_set_drvdata(pdev, card);
err = setup_instance(card);
if (err)
pci_set_drvdata(pdev, NULL);
return err;
}
static void nj_remove(struct pci_dev *pdev)
{
struct tiger_hw *card = pci_get_drvdata(pdev);
if (card)
nj_release(card);
else
pr_info("%s drvdata already removed\n", __func__);
}
/* We cannot select cards with PCI_SUB... IDs, since here are cards with
* SUB IDs set to PCI_ANY_ID, so we need to match all and reject
* known other cards which not work with this driver - see probe function */
static const struct pci_device_id nj_pci_ids[] = {
{ PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ }
};
MODULE_DEVICE_TABLE(pci, nj_pci_ids);
static struct pci_driver nj_driver = {
.name = "netjet",
.probe = nj_probe,
.remove = nj_remove,
.id_table = nj_pci_ids,
};
static int __init nj_init(void)
{
int err;
pr_notice("Netjet PCI driver Rev. %s\n", NETJET_REV);
err = pci_register_driver(&nj_driver);
return err;
}
static void __exit nj_cleanup(void)
{
pci_unregister_driver(&nj_driver);
}
module_init(nj_init);
module_exit(nj_cleanup);
| linux-master | drivers/isdn/hardware/mISDN/netjet.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* hfcmulti.c low level driver for hfc-4s/hfc-8s/hfc-e1 based cards
*
* Author Andreas Eversberg ([email protected])
* ported to mqueue mechanism:
* Peter Sprenger (sprengermoving-bytes.de)
*
* inspired by existing hfc-pci driver:
* Copyright 1999 by Werner Cornelius ([email protected])
* Copyright 2008 by Karsten Keil ([email protected])
* Copyright 2008 by Andreas Eversberg ([email protected])
*
* Thanks to Cologne Chip AG for this great controller!
*/
/*
* module parameters:
* type:
* By default (0), the card is automatically detected.
* Or use the following combinations:
* Bit 0-7 = 0x00001 = HFC-E1 (1 port)
* or Bit 0-7 = 0x00004 = HFC-4S (4 ports)
* or Bit 0-7 = 0x00008 = HFC-8S (8 ports)
* Bit 8 = 0x00100 = uLaw (instead of aLaw)
* Bit 9 = 0x00200 = Disable DTMF detect on all B-channels via hardware
* Bit 10 = spare
* Bit 11 = 0x00800 = Force PCM bus into slave mode. (otherwhise auto)
* or Bit 12 = 0x01000 = Force PCM bus into master mode. (otherwhise auto)
* Bit 13 = spare
* Bit 14 = 0x04000 = Use external ram (128K)
* Bit 15 = 0x08000 = Use external ram (512K)
* Bit 16 = 0x10000 = Use 64 timeslots instead of 32
* or Bit 17 = 0x20000 = Use 128 timeslots instead of anything else
* Bit 18 = spare
* Bit 19 = 0x80000 = Send the Watchdog a Signal (Dual E1 with Watchdog)
* (all other bits are reserved and shall be 0)
* example: 0x20204 one HFC-4S with dtmf detection and 128 timeslots on PCM
* bus (PCM master)
*
* port: (optional or required for all ports on all installed cards)
* HFC-4S/HFC-8S only bits:
* Bit 0 = 0x001 = Use master clock for this S/T interface
* (ony once per chip).
* Bit 1 = 0x002 = transmitter line setup (non capacitive mode)
* Don't use this unless you know what you are doing!
* Bit 2 = 0x004 = Disable E-channel. (No E-channel processing)
* example: 0x0001,0x0000,0x0000,0x0000 one HFC-4S with master clock
* received from port 1
*
* HFC-E1 only bits:
* Bit 0 = 0x0001 = interface: 0=copper, 1=optical
* Bit 1 = 0x0002 = reserved (later for 32 B-channels transparent mode)
* Bit 2 = 0x0004 = Report LOS
* Bit 3 = 0x0008 = Report AIS
* Bit 4 = 0x0010 = Report SLIP
* Bit 5 = 0x0020 = Report RDI
* Bit 8 = 0x0100 = Turn off CRC-4 Multiframe Mode, use double frame
* mode instead.
* Bit 9 = 0x0200 = Force get clock from interface, even in NT mode.
* or Bit 10 = 0x0400 = Force put clock to interface, even in TE mode.
* Bit 11 = 0x0800 = Use direct RX clock for PCM sync rather than PLL.
* (E1 only)
* Bit 12-13 = 0xX000 = elastic jitter buffer (1-3), Set both bits to 0
* for default.
* (all other bits are reserved and shall be 0)
*
* debug:
* NOTE: only one debug value must be given for all cards
* enable debugging (see hfc_multi.h for debug options)
*
* poll:
* NOTE: only one poll value must be given for all cards
* Give the number of samples for each fifo process.
* By default 128 is used. Decrease to reduce delay, increase to
* reduce cpu load. If unsure, don't mess with it!
* Valid is 8, 16, 32, 64, 128, 256.
*
* pcm:
* NOTE: only one pcm value must be given for every card.
* The PCM bus id tells the mISDNdsp module about the connected PCM bus.
* By default (0), the PCM bus id is 100 for the card that is PCM master.
* If multiple cards are PCM master (because they are not interconnected),
* each card with PCM master will have increasing PCM id.
* All PCM busses with the same ID are expected to be connected and have
* common time slots slots.
* Only one chip of the PCM bus must be master, the others slave.
* -1 means no support of PCM bus not even.
* Omit this value, if all cards are interconnected or none is connected.
* If unsure, don't give this parameter.
*
* dmask and bmask:
* NOTE: One dmask value must be given for every HFC-E1 card.
* If omitted, the E1 card has D-channel on time slot 16, which is default.
* dmask is a 32 bit mask. The bit must be set for an alternate time slot.
* If multiple bits are set, multiple virtual card fragments are created.
* For each bit set, a bmask value must be given. Each bit on the bmask
* value stands for a B-channel. The bmask may not overlap with dmask or
* with other bmask values for that card.
* Example: dmask=0x00020002 bmask=0x0000fffc,0xfffc0000
* This will create one fragment with D-channel on slot 1 with
* B-channels on slots 2..15, and a second fragment with D-channel
* on slot 17 with B-channels on slot 18..31. Slot 16 is unused.
* If bit 0 is set (dmask=0x00000001) the D-channel is on slot 0 and will
* not function.
* Example: dmask=0x00000001 bmask=0xfffffffe
* This will create a port with all 31 usable timeslots as
* B-channels.
* If no bits are set on bmask, no B-channel is created for that fragment.
* Example: dmask=0xfffffffe bmask=0,0,0,0.... (31 0-values for bmask)
* This will create 31 ports with one D-channel only.
* If you don't know how to use it, you don't need it!
*
* iomode:
* NOTE: only one mode value must be given for every card.
* -> See hfc_multi.h for HFC_IO_MODE_* values
* By default, the IO mode is pci memory IO (MEMIO).
* Some cards require specific IO mode, so it cannot be changed.
* It may be useful to set IO mode to register io (REGIO) to solve
* PCI bridge problems.
* If unsure, don't give this parameter.
*
* clockdelay_nt:
* NOTE: only one clockdelay_nt value must be given once for all cards.
* Give the value of the clock control register (A_ST_CLK_DLY)
* of the S/T interfaces in NT mode.
* This register is needed for the TBR3 certification, so don't change it.
*
* clockdelay_te:
* NOTE: only one clockdelay_te value must be given once
* Give the value of the clock control register (A_ST_CLK_DLY)
* of the S/T interfaces in TE mode.
* This register is needed for the TBR3 certification, so don't change it.
*
* clock:
* NOTE: only one clock value must be given once
* Selects interface with clock source for mISDN and applications.
* Set to card number starting with 1. Set to -1 to disable.
* By default, the first card is used as clock source.
*
* hwid:
* NOTE: only one hwid value must be given once
* Enable special embedded devices with XHFC controllers.
*/
/*
* debug register access (never use this, it will flood your system log)
* #define HFC_REGISTER_DEBUG
*/
#define HFC_MULTI_VERSION "2.03"
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/mISDNhw.h>
#include <linux/mISDNdsp.h>
/*
#define IRQCOUNT_DEBUG
#define IRQ_DEBUG
*/
#include "hfc_multi.h"
#ifdef ECHOPREP
#include "gaintab.h"
#endif
#define MAX_CARDS 8
#define MAX_PORTS (8 * MAX_CARDS)
#define MAX_FRAGS (32 * MAX_CARDS)
static LIST_HEAD(HFClist);
static DEFINE_SPINLOCK(HFClock); /* global hfc list lock */
static void ph_state_change(struct dchannel *);
static struct hfc_multi *syncmaster;
static int plxsd_master; /* if we have a master card (yet) */
static DEFINE_SPINLOCK(plx_lock); /* may not acquire other lock inside */
#define TYP_E1 1
#define TYP_4S 4
#define TYP_8S 8
static int poll_timer = 6; /* default = 128 samples = 16ms */
/* number of POLL_TIMER interrupts for G2 timeout (ca 1s) */
static int nt_t1_count[] = { 3840, 1920, 960, 480, 240, 120, 60, 30 };
#define CLKDEL_TE 0x0f /* CLKDEL in TE mode */
#define CLKDEL_NT 0x6c /* CLKDEL in NT mode
(0x60 MUST be included!) */
#define DIP_4S 0x1 /* DIP Switches for Beronet 1S/2S/4S cards */
#define DIP_8S 0x2 /* DIP Switches for Beronet 8S+ cards */
#define DIP_E1 0x3 /* DIP Switches for Beronet E1 cards */
/*
* module stuff
*/
static uint type[MAX_CARDS];
static int pcm[MAX_CARDS];
static uint dmask[MAX_CARDS];
static uint bmask[MAX_FRAGS];
static uint iomode[MAX_CARDS];
static uint port[MAX_PORTS];
static uint debug;
static uint poll;
static int clock;
static uint timer;
static uint clockdelay_te = CLKDEL_TE;
static uint clockdelay_nt = CLKDEL_NT;
#define HWID_NONE 0
#define HWID_MINIP4 1
#define HWID_MINIP8 2
#define HWID_MINIP16 3
static uint hwid = HWID_NONE;
static int HFC_cnt, E1_cnt, bmask_cnt, Port_cnt, PCM_cnt = 99;
MODULE_AUTHOR("Andreas Eversberg");
MODULE_LICENSE("GPL");
MODULE_VERSION(HFC_MULTI_VERSION);
module_param(debug, uint, S_IRUGO | S_IWUSR);
module_param(poll, uint, S_IRUGO | S_IWUSR);
module_param(clock, int, S_IRUGO | S_IWUSR);
module_param(timer, uint, S_IRUGO | S_IWUSR);
module_param(clockdelay_te, uint, S_IRUGO | S_IWUSR);
module_param(clockdelay_nt, uint, S_IRUGO | S_IWUSR);
module_param_array(type, uint, NULL, S_IRUGO | S_IWUSR);
module_param_array(pcm, int, NULL, S_IRUGO | S_IWUSR);
module_param_array(dmask, uint, NULL, S_IRUGO | S_IWUSR);
module_param_array(bmask, uint, NULL, S_IRUGO | S_IWUSR);
module_param_array(iomode, uint, NULL, S_IRUGO | S_IWUSR);
module_param_array(port, uint, NULL, S_IRUGO | S_IWUSR);
module_param(hwid, uint, S_IRUGO | S_IWUSR); /* The hardware ID */
#ifdef HFC_REGISTER_DEBUG
#define HFC_outb(hc, reg, val) \
(hc->HFC_outb(hc, reg, val, __func__, __LINE__))
#define HFC_outb_nodebug(hc, reg, val) \
(hc->HFC_outb_nodebug(hc, reg, val, __func__, __LINE__))
#define HFC_inb(hc, reg) \
(hc->HFC_inb(hc, reg, __func__, __LINE__))
#define HFC_inb_nodebug(hc, reg) \
(hc->HFC_inb_nodebug(hc, reg, __func__, __LINE__))
#define HFC_inw(hc, reg) \
(hc->HFC_inw(hc, reg, __func__, __LINE__))
#define HFC_inw_nodebug(hc, reg) \
(hc->HFC_inw_nodebug(hc, reg, __func__, __LINE__))
#define HFC_wait(hc) \
(hc->HFC_wait(hc, __func__, __LINE__))
#define HFC_wait_nodebug(hc) \
(hc->HFC_wait_nodebug(hc, __func__, __LINE__))
#else
#define HFC_outb(hc, reg, val) (hc->HFC_outb(hc, reg, val))
#define HFC_outb_nodebug(hc, reg, val) (hc->HFC_outb_nodebug(hc, reg, val))
#define HFC_inb(hc, reg) (hc->HFC_inb(hc, reg))
#define HFC_inb_nodebug(hc, reg) (hc->HFC_inb_nodebug(hc, reg))
#define HFC_inw(hc, reg) (hc->HFC_inw(hc, reg))
#define HFC_inw_nodebug(hc, reg) (hc->HFC_inw_nodebug(hc, reg))
#define HFC_wait(hc) (hc->HFC_wait(hc))
#define HFC_wait_nodebug(hc) (hc->HFC_wait_nodebug(hc))
#endif
#ifdef CONFIG_MISDN_HFCMULTI_8xx
#include "hfc_multi_8xx.h"
#endif
/* HFC_IO_MODE_PCIMEM */
static void
#ifdef HFC_REGISTER_DEBUG
HFC_outb_pcimem(struct hfc_multi *hc, u_char reg, u_char val,
const char *function, int line)
#else
HFC_outb_pcimem(struct hfc_multi *hc, u_char reg, u_char val)
#endif
{
writeb(val, hc->pci_membase + reg);
}
static u_char
#ifdef HFC_REGISTER_DEBUG
HFC_inb_pcimem(struct hfc_multi *hc, u_char reg, const char *function, int line)
#else
HFC_inb_pcimem(struct hfc_multi *hc, u_char reg)
#endif
{
return readb(hc->pci_membase + reg);
}
static u_short
#ifdef HFC_REGISTER_DEBUG
HFC_inw_pcimem(struct hfc_multi *hc, u_char reg, const char *function, int line)
#else
HFC_inw_pcimem(struct hfc_multi *hc, u_char reg)
#endif
{
return readw(hc->pci_membase + reg);
}
static void
#ifdef HFC_REGISTER_DEBUG
HFC_wait_pcimem(struct hfc_multi *hc, const char *function, int line)
#else
HFC_wait_pcimem(struct hfc_multi *hc)
#endif
{
while (readb(hc->pci_membase + R_STATUS) & V_BUSY)
cpu_relax();
}
/* HFC_IO_MODE_REGIO */
static void
#ifdef HFC_REGISTER_DEBUG
HFC_outb_regio(struct hfc_multi *hc, u_char reg, u_char val,
const char *function, int line)
#else
HFC_outb_regio(struct hfc_multi *hc, u_char reg, u_char val)
#endif
{
outb(reg, hc->pci_iobase + 4);
outb(val, hc->pci_iobase);
}
static u_char
#ifdef HFC_REGISTER_DEBUG
HFC_inb_regio(struct hfc_multi *hc, u_char reg, const char *function, int line)
#else
HFC_inb_regio(struct hfc_multi *hc, u_char reg)
#endif
{
outb(reg, hc->pci_iobase + 4);
return inb(hc->pci_iobase);
}
static u_short
#ifdef HFC_REGISTER_DEBUG
HFC_inw_regio(struct hfc_multi *hc, u_char reg, const char *function, int line)
#else
HFC_inw_regio(struct hfc_multi *hc, u_char reg)
#endif
{
outb(reg, hc->pci_iobase + 4);
return inw(hc->pci_iobase);
}
static void
#ifdef HFC_REGISTER_DEBUG
HFC_wait_regio(struct hfc_multi *hc, const char *function, int line)
#else
HFC_wait_regio(struct hfc_multi *hc)
#endif
{
outb(R_STATUS, hc->pci_iobase + 4);
while (inb(hc->pci_iobase) & V_BUSY)
cpu_relax();
}
#ifdef HFC_REGISTER_DEBUG
static void
HFC_outb_debug(struct hfc_multi *hc, u_char reg, u_char val,
const char *function, int line)
{
char regname[256] = "", bits[9] = "xxxxxxxx";
int i;
i = -1;
while (hfc_register_names[++i].name) {
if (hfc_register_names[i].reg == reg)
strcat(regname, hfc_register_names[i].name);
}
if (regname[0] == '\0')
strcpy(regname, "register");
bits[7] = '0' + (!!(val & 1));
bits[6] = '0' + (!!(val & 2));
bits[5] = '0' + (!!(val & 4));
bits[4] = '0' + (!!(val & 8));
bits[3] = '0' + (!!(val & 16));
bits[2] = '0' + (!!(val & 32));
bits[1] = '0' + (!!(val & 64));
bits[0] = '0' + (!!(val & 128));
printk(KERN_DEBUG
"HFC_outb(chip %d, %02x=%s, 0x%02x=%s); in %s() line %d\n",
hc->id, reg, regname, val, bits, function, line);
HFC_outb_nodebug(hc, reg, val);
}
static u_char
HFC_inb_debug(struct hfc_multi *hc, u_char reg, const char *function, int line)
{
char regname[256] = "", bits[9] = "xxxxxxxx";
u_char val = HFC_inb_nodebug(hc, reg);
int i;
i = 0;
while (hfc_register_names[i++].name)
;
while (hfc_register_names[++i].name) {
if (hfc_register_names[i].reg == reg)
strcat(regname, hfc_register_names[i].name);
}
if (regname[0] == '\0')
strcpy(regname, "register");
bits[7] = '0' + (!!(val & 1));
bits[6] = '0' + (!!(val & 2));
bits[5] = '0' + (!!(val & 4));
bits[4] = '0' + (!!(val & 8));
bits[3] = '0' + (!!(val & 16));
bits[2] = '0' + (!!(val & 32));
bits[1] = '0' + (!!(val & 64));
bits[0] = '0' + (!!(val & 128));
printk(KERN_DEBUG
"HFC_inb(chip %d, %02x=%s) = 0x%02x=%s; in %s() line %d\n",
hc->id, reg, regname, val, bits, function, line);
return val;
}
static u_short
HFC_inw_debug(struct hfc_multi *hc, u_char reg, const char *function, int line)
{
char regname[256] = "";
u_short val = HFC_inw_nodebug(hc, reg);
int i;
i = 0;
while (hfc_register_names[i++].name)
;
while (hfc_register_names[++i].name) {
if (hfc_register_names[i].reg == reg)
strcat(regname, hfc_register_names[i].name);
}
if (regname[0] == '\0')
strcpy(regname, "register");
printk(KERN_DEBUG
"HFC_inw(chip %d, %02x=%s) = 0x%04x; in %s() line %d\n",
hc->id, reg, regname, val, function, line);
return val;
}
static void
HFC_wait_debug(struct hfc_multi *hc, const char *function, int line)
{
printk(KERN_DEBUG "HFC_wait(chip %d); in %s() line %d\n",
hc->id, function, line);
HFC_wait_nodebug(hc);
}
#endif
/* write fifo data (REGIO) */
static void
write_fifo_regio(struct hfc_multi *hc, u_char *data, int len)
{
outb(A_FIFO_DATA0, (hc->pci_iobase) + 4);
while (len >> 2) {
outl(cpu_to_le32(*(u32 *)data), hc->pci_iobase);
data += 4;
len -= 4;
}
while (len >> 1) {
outw(cpu_to_le16(*(u16 *)data), hc->pci_iobase);
data += 2;
len -= 2;
}
while (len) {
outb(*data, hc->pci_iobase);
data++;
len--;
}
}
/* write fifo data (PCIMEM) */
static void
write_fifo_pcimem(struct hfc_multi *hc, u_char *data, int len)
{
while (len >> 2) {
writel(cpu_to_le32(*(u32 *)data),
hc->pci_membase + A_FIFO_DATA0);
data += 4;
len -= 4;
}
while (len >> 1) {
writew(cpu_to_le16(*(u16 *)data),
hc->pci_membase + A_FIFO_DATA0);
data += 2;
len -= 2;
}
while (len) {
writeb(*data, hc->pci_membase + A_FIFO_DATA0);
data++;
len--;
}
}
/* read fifo data (REGIO) */
static void
read_fifo_regio(struct hfc_multi *hc, u_char *data, int len)
{
outb(A_FIFO_DATA0, (hc->pci_iobase) + 4);
while (len >> 2) {
*(u32 *)data = le32_to_cpu(inl(hc->pci_iobase));
data += 4;
len -= 4;
}
while (len >> 1) {
*(u16 *)data = le16_to_cpu(inw(hc->pci_iobase));
data += 2;
len -= 2;
}
while (len) {
*data = inb(hc->pci_iobase);
data++;
len--;
}
}
/* read fifo data (PCIMEM) */
static void
read_fifo_pcimem(struct hfc_multi *hc, u_char *data, int len)
{
while (len >> 2) {
*(u32 *)data =
le32_to_cpu(readl(hc->pci_membase + A_FIFO_DATA0));
data += 4;
len -= 4;
}
while (len >> 1) {
*(u16 *)data =
le16_to_cpu(readw(hc->pci_membase + A_FIFO_DATA0));
data += 2;
len -= 2;
}
while (len) {
*data = readb(hc->pci_membase + A_FIFO_DATA0);
data++;
len--;
}
}
static void
enable_hwirq(struct hfc_multi *hc)
{
hc->hw.r_irq_ctrl |= V_GLOB_IRQ_EN;
HFC_outb(hc, R_IRQ_CTRL, hc->hw.r_irq_ctrl);
}
static void
disable_hwirq(struct hfc_multi *hc)
{
hc->hw.r_irq_ctrl &= ~((u_char)V_GLOB_IRQ_EN);
HFC_outb(hc, R_IRQ_CTRL, hc->hw.r_irq_ctrl);
}
#define NUM_EC 2
#define MAX_TDM_CHAN 32
static inline void
enablepcibridge(struct hfc_multi *c)
{
HFC_outb(c, R_BRG_PCM_CFG, (0x0 << 6) | 0x3); /* was _io before */
}
static inline void
disablepcibridge(struct hfc_multi *c)
{
HFC_outb(c, R_BRG_PCM_CFG, (0x0 << 6) | 0x2); /* was _io before */
}
static inline unsigned char
readpcibridge(struct hfc_multi *hc, unsigned char address)
{
unsigned short cipv;
unsigned char data;
if (!hc->pci_iobase)
return 0;
/* slow down a PCI read access by 1 PCI clock cycle */
HFC_outb(hc, R_CTRL, 0x4); /*was _io before*/
if (address == 0)
cipv = 0x4000;
else
cipv = 0x5800;
/* select local bridge port address by writing to CIP port */
/* data = HFC_inb(c, cipv); * was _io before */
outw(cipv, hc->pci_iobase + 4);
data = inb(hc->pci_iobase);
/* restore R_CTRL for normal PCI read cycle speed */
HFC_outb(hc, R_CTRL, 0x0); /* was _io before */
return data;
}
static inline void
writepcibridge(struct hfc_multi *hc, unsigned char address, unsigned char data)
{
unsigned short cipv;
unsigned int datav;
if (!hc->pci_iobase)
return;
if (address == 0)
cipv = 0x4000;
else
cipv = 0x5800;
/* select local bridge port address by writing to CIP port */
outw(cipv, hc->pci_iobase + 4);
/* define a 32 bit dword with 4 identical bytes for write sequence */
datav = data | ((__u32) data << 8) | ((__u32) data << 16) |
((__u32) data << 24);
/*
* write this 32 bit dword to the bridge data port
* this will initiate a write sequence of up to 4 writes to the same
* address on the local bus interface the number of write accesses
* is undefined but >=1 and depends on the next PCI transaction
* during write sequence on the local bus
*/
outl(datav, hc->pci_iobase);
}
static inline void
cpld_set_reg(struct hfc_multi *hc, unsigned char reg)
{
/* Do data pin read low byte */
HFC_outb(hc, R_GPIO_OUT1, reg);
}
static inline void
cpld_write_reg(struct hfc_multi *hc, unsigned char reg, unsigned char val)
{
cpld_set_reg(hc, reg);
enablepcibridge(hc);
writepcibridge(hc, 1, val);
disablepcibridge(hc);
return;
}
static inline void
vpm_write_address(struct hfc_multi *hc, unsigned short addr)
{
cpld_write_reg(hc, 0, 0xff & addr);
cpld_write_reg(hc, 1, 0x01 & (addr >> 8));
}
static inline unsigned char
vpm_in(struct hfc_multi *c, int which, unsigned short addr)
{
unsigned char res;
vpm_write_address(c, addr);
if (!which)
cpld_set_reg(c, 2);
else
cpld_set_reg(c, 3);
enablepcibridge(c);
res = readpcibridge(c, 1);
disablepcibridge(c);
cpld_set_reg(c, 0);
return res;
}
static inline void
vpm_out(struct hfc_multi *c, int which, unsigned short addr,
unsigned char data)
{
vpm_write_address(c, addr);
enablepcibridge(c);
if (!which)
cpld_set_reg(c, 2);
else
cpld_set_reg(c, 3);
writepcibridge(c, 1, data);
cpld_set_reg(c, 0);
disablepcibridge(c);
{
unsigned char regin;
regin = vpm_in(c, which, addr);
if (regin != data)
printk(KERN_DEBUG "Wrote 0x%x to register 0x%x but got back "
"0x%x\n", data, addr, regin);
}
}
static void
vpm_init(struct hfc_multi *wc)
{
unsigned char reg;
unsigned int mask;
unsigned int i, x, y;
unsigned int ver;
for (x = 0; x < NUM_EC; x++) {
/* Setup GPIO's */
if (!x) {
ver = vpm_in(wc, x, 0x1a0);
printk(KERN_DEBUG "VPM: Chip %d: ver %02x\n", x, ver);
}
for (y = 0; y < 4; y++) {
vpm_out(wc, x, 0x1a8 + y, 0x00); /* GPIO out */
vpm_out(wc, x, 0x1ac + y, 0x00); /* GPIO dir */
vpm_out(wc, x, 0x1b0 + y, 0x00); /* GPIO sel */
}
/* Setup TDM path - sets fsync and tdm_clk as inputs */
reg = vpm_in(wc, x, 0x1a3); /* misc_con */
vpm_out(wc, x, 0x1a3, reg & ~2);
/* Setup Echo length (256 taps) */
vpm_out(wc, x, 0x022, 1);
vpm_out(wc, x, 0x023, 0xff);
/* Setup timeslots */
vpm_out(wc, x, 0x02f, 0x00);
mask = 0x02020202 << (x * 4);
/* Setup the tdm channel masks for all chips */
for (i = 0; i < 4; i++)
vpm_out(wc, x, 0x33 - i, (mask >> (i << 3)) & 0xff);
/* Setup convergence rate */
printk(KERN_DEBUG "VPM: A-law mode\n");
reg = 0x00 | 0x10 | 0x01;
vpm_out(wc, x, 0x20, reg);
printk(KERN_DEBUG "VPM reg 0x20 is %x\n", reg);
/*vpm_out(wc, x, 0x20, (0x00 | 0x08 | 0x20 | 0x10)); */
vpm_out(wc, x, 0x24, 0x02);
reg = vpm_in(wc, x, 0x24);
printk(KERN_DEBUG "NLP Thresh is set to %d (0x%x)\n", reg, reg);
/* Initialize echo cans */
for (i = 0; i < MAX_TDM_CHAN; i++) {
if (mask & (0x00000001 << i))
vpm_out(wc, x, i, 0x00);
}
/*
* ARM arch at least disallows a udelay of
* more than 2ms... it gives a fake "__bad_udelay"
* reference at link-time.
* long delays in kernel code are pretty sucky anyway
* for now work around it using 5 x 2ms instead of 1 x 10ms
*/
udelay(2000);
udelay(2000);
udelay(2000);
udelay(2000);
udelay(2000);
/* Put in bypass mode */
for (i = 0; i < MAX_TDM_CHAN; i++) {
if (mask & (0x00000001 << i))
vpm_out(wc, x, i, 0x01);
}
/* Enable bypass */
for (i = 0; i < MAX_TDM_CHAN; i++) {
if (mask & (0x00000001 << i))
vpm_out(wc, x, 0x78 + i, 0x01);
}
}
}
#ifdef UNUSED
static void
vpm_check(struct hfc_multi *hctmp)
{
unsigned char gpi2;
gpi2 = HFC_inb(hctmp, R_GPI_IN2);
if ((gpi2 & 0x3) != 0x3)
printk(KERN_DEBUG "Got interrupt 0x%x from VPM!\n", gpi2);
}
#endif /* UNUSED */
/*
* Interface to enable/disable the HW Echocan
*
* these functions are called within a spin_lock_irqsave on
* the channel instance lock, so we are not disturbed by irqs
*
* we can later easily change the interface to make other
* things configurable, for now we configure the taps
*
*/
static void
vpm_echocan_on(struct hfc_multi *hc, int ch, int taps)
{
unsigned int timeslot;
unsigned int unit;
struct bchannel *bch = hc->chan[ch].bch;
#ifdef TXADJ
int txadj = -4;
struct sk_buff *skb;
#endif
if (hc->chan[ch].protocol != ISDN_P_B_RAW)
return;
if (!bch)
return;
#ifdef TXADJ
skb = _alloc_mISDN_skb(PH_CONTROL_IND, HFC_VOL_CHANGE_TX,
sizeof(int), &txadj, GFP_ATOMIC);
if (skb)
recv_Bchannel_skb(bch, skb);
#endif
timeslot = ((ch / 4) * 8) + ((ch % 4) * 4) + 1;
unit = ch % 4;
printk(KERN_NOTICE "vpm_echocan_on called taps [%d] on timeslot %d\n",
taps, timeslot);
vpm_out(hc, unit, timeslot, 0x7e);
}
static void
vpm_echocan_off(struct hfc_multi *hc, int ch)
{
unsigned int timeslot;
unsigned int unit;
struct bchannel *bch = hc->chan[ch].bch;
#ifdef TXADJ
int txadj = 0;
struct sk_buff *skb;
#endif
if (hc->chan[ch].protocol != ISDN_P_B_RAW)
return;
if (!bch)
return;
#ifdef TXADJ
skb = _alloc_mISDN_skb(PH_CONTROL_IND, HFC_VOL_CHANGE_TX,
sizeof(int), &txadj, GFP_ATOMIC);
if (skb)
recv_Bchannel_skb(bch, skb);
#endif
timeslot = ((ch / 4) * 8) + ((ch % 4) * 4) + 1;
unit = ch % 4;
printk(KERN_NOTICE "vpm_echocan_off called on timeslot %d\n",
timeslot);
/* FILLME */
vpm_out(hc, unit, timeslot, 0x01);
}
/*
* Speech Design resync feature
* NOTE: This is called sometimes outside interrupt handler.
* We must lock irqsave, so no other interrupt (other card) will occur!
* Also multiple interrupts may nest, so must lock each access (lists, card)!
*/
static inline void
hfcmulti_resync(struct hfc_multi *locked, struct hfc_multi *newmaster, int rm)
{
struct hfc_multi *hc, *next, *pcmmaster = NULL;
void __iomem *plx_acc_32;
u_int pv;
u_long flags;
spin_lock_irqsave(&HFClock, flags);
spin_lock(&plx_lock); /* must be locked inside other locks */
if (debug & DEBUG_HFCMULTI_PLXSD)
printk(KERN_DEBUG "%s: RESYNC(syncmaster=0x%p)\n",
__func__, syncmaster);
/* select new master */
if (newmaster) {
if (debug & DEBUG_HFCMULTI_PLXSD)
printk(KERN_DEBUG "using provided controller\n");
} else {
list_for_each_entry_safe(hc, next, &HFClist, list) {
if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
if (hc->syncronized) {
newmaster = hc;
break;
}
}
}
}
/* Disable sync of all cards */
list_for_each_entry_safe(hc, next, &HFClist, list) {
if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
plx_acc_32 = hc->plx_membase + PLX_GPIOC;
pv = readl(plx_acc_32);
pv &= ~PLX_SYNC_O_EN;
writel(pv, plx_acc_32);
if (test_bit(HFC_CHIP_PCM_MASTER, &hc->chip)) {
pcmmaster = hc;
if (hc->ctype == HFC_TYPE_E1) {
if (debug & DEBUG_HFCMULTI_PLXSD)
printk(KERN_DEBUG
"Schedule SYNC_I\n");
hc->e1_resync |= 1; /* get SYNC_I */
}
}
}
}
if (newmaster) {
hc = newmaster;
if (debug & DEBUG_HFCMULTI_PLXSD)
printk(KERN_DEBUG "id=%d (0x%p) = syncronized with "
"interface.\n", hc->id, hc);
/* Enable new sync master */
plx_acc_32 = hc->plx_membase + PLX_GPIOC;
pv = readl(plx_acc_32);
pv |= PLX_SYNC_O_EN;
writel(pv, plx_acc_32);
/* switch to jatt PLL, if not disabled by RX_SYNC */
if (hc->ctype == HFC_TYPE_E1
&& !test_bit(HFC_CHIP_RX_SYNC, &hc->chip)) {
if (debug & DEBUG_HFCMULTI_PLXSD)
printk(KERN_DEBUG "Schedule jatt PLL\n");
hc->e1_resync |= 2; /* switch to jatt */
}
} else {
if (pcmmaster) {
hc = pcmmaster;
if (debug & DEBUG_HFCMULTI_PLXSD)
printk(KERN_DEBUG
"id=%d (0x%p) = PCM master syncronized "
"with QUARTZ\n", hc->id, hc);
if (hc->ctype == HFC_TYPE_E1) {
/* Use the crystal clock for the PCM
master card */
if (debug & DEBUG_HFCMULTI_PLXSD)
printk(KERN_DEBUG
"Schedule QUARTZ for HFC-E1\n");
hc->e1_resync |= 4; /* switch quartz */
} else {
if (debug & DEBUG_HFCMULTI_PLXSD)
printk(KERN_DEBUG
"QUARTZ is automatically "
"enabled by HFC-%dS\n", hc->ctype);
}
plx_acc_32 = hc->plx_membase + PLX_GPIOC;
pv = readl(plx_acc_32);
pv |= PLX_SYNC_O_EN;
writel(pv, plx_acc_32);
} else
if (!rm)
printk(KERN_ERR "%s no pcm master, this MUST "
"not happen!\n", __func__);
}
syncmaster = newmaster;
spin_unlock(&plx_lock);
spin_unlock_irqrestore(&HFClock, flags);
}
/* This must be called AND hc must be locked irqsave!!! */
static inline void
plxsd_checksync(struct hfc_multi *hc, int rm)
{
if (hc->syncronized) {
if (syncmaster == NULL) {
if (debug & DEBUG_HFCMULTI_PLXSD)
printk(KERN_DEBUG "%s: GOT sync on card %d"
" (id=%d)\n", __func__, hc->id + 1,
hc->id);
hfcmulti_resync(hc, hc, rm);
}
} else {
if (syncmaster == hc) {
if (debug & DEBUG_HFCMULTI_PLXSD)
printk(KERN_DEBUG "%s: LOST sync on card %d"
" (id=%d)\n", __func__, hc->id + 1,
hc->id);
hfcmulti_resync(hc, NULL, rm);
}
}
}
/*
* free hardware resources used by driver
*/
static void
release_io_hfcmulti(struct hfc_multi *hc)
{
void __iomem *plx_acc_32;
u_int pv;
u_long plx_flags;
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: entered\n", __func__);
/* soft reset also masks all interrupts */
hc->hw.r_cirm |= V_SRES;
HFC_outb(hc, R_CIRM, hc->hw.r_cirm);
udelay(1000);
hc->hw.r_cirm &= ~V_SRES;
HFC_outb(hc, R_CIRM, hc->hw.r_cirm);
udelay(1000); /* instead of 'wait' that may cause locking */
/* release Speech Design card, if PLX was initialized */
if (test_bit(HFC_CHIP_PLXSD, &hc->chip) && hc->plx_membase) {
if (debug & DEBUG_HFCMULTI_PLXSD)
printk(KERN_DEBUG "%s: release PLXSD card %d\n",
__func__, hc->id + 1);
spin_lock_irqsave(&plx_lock, plx_flags);
plx_acc_32 = hc->plx_membase + PLX_GPIOC;
writel(PLX_GPIOC_INIT, plx_acc_32);
pv = readl(plx_acc_32);
/* Termination off */
pv &= ~PLX_TERM_ON;
/* Disconnect the PCM */
pv |= PLX_SLAVE_EN_N;
pv &= ~PLX_MASTER_EN;
pv &= ~PLX_SYNC_O_EN;
/* Put the DSP in Reset */
pv &= ~PLX_DSP_RES_N;
writel(pv, plx_acc_32);
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: PCM off: PLX_GPIO=%x\n",
__func__, pv);
spin_unlock_irqrestore(&plx_lock, plx_flags);
}
/* disable memory mapped ports / io ports */
test_and_clear_bit(HFC_CHIP_PLXSD, &hc->chip); /* prevent resync */
if (hc->pci_dev)
pci_write_config_word(hc->pci_dev, PCI_COMMAND, 0);
if (hc->pci_membase)
iounmap(hc->pci_membase);
if (hc->plx_membase)
iounmap(hc->plx_membase);
if (hc->pci_iobase)
release_region(hc->pci_iobase, 8);
if (hc->xhfc_membase)
iounmap((void *)hc->xhfc_membase);
if (hc->pci_dev) {
pci_disable_device(hc->pci_dev);
pci_set_drvdata(hc->pci_dev, NULL);
}
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: done\n", __func__);
}
/*
* function called to reset the HFC chip. A complete software reset of chip
* and fifos is done. All configuration of the chip is done.
*/
static int
init_chip(struct hfc_multi *hc)
{
u_long flags, val, val2 = 0, rev;
int i, err = 0;
u_char r_conf_en, rval;
void __iomem *plx_acc_32;
u_int pv;
u_long plx_flags, hfc_flags;
int plx_count;
struct hfc_multi *pos, *next, *plx_last_hc;
spin_lock_irqsave(&hc->lock, flags);
/* reset all registers */
memset(&hc->hw, 0, sizeof(struct hfcm_hw));
/* revision check */
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: entered\n", __func__);
val = HFC_inb(hc, R_CHIP_ID);
if ((val >> 4) != 0x8 && (val >> 4) != 0xc && (val >> 4) != 0xe &&
(val >> 1) != 0x31) {
printk(KERN_INFO "HFC_multi: unknown CHIP_ID:%x\n", (u_int)val);
err = -EIO;
goto out;
}
rev = HFC_inb(hc, R_CHIP_RV);
printk(KERN_INFO
"HFC_multi: detected HFC with chip ID=0x%lx revision=%ld%s\n",
val, rev, (rev == 0 && (hc->ctype != HFC_TYPE_XHFC)) ?
" (old FIFO handling)" : "");
if (hc->ctype != HFC_TYPE_XHFC && rev == 0) {
test_and_set_bit(HFC_CHIP_REVISION0, &hc->chip);
printk(KERN_WARNING
"HFC_multi: NOTE: Your chip is revision 0, "
"ask Cologne Chip for update. Newer chips "
"have a better FIFO handling. Old chips "
"still work but may have slightly lower "
"HDLC transmit performance.\n");
}
if (rev > 1) {
printk(KERN_WARNING "HFC_multi: WARNING: This driver doesn't "
"consider chip revision = %ld. The chip / "
"bridge may not work.\n", rev);
}
/* set s-ram size */
hc->Flen = 0x10;
hc->Zmin = 0x80;
hc->Zlen = 384;
hc->DTMFbase = 0x1000;
if (test_bit(HFC_CHIP_EXRAM_128, &hc->chip)) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: changing to 128K external RAM\n",
__func__);
hc->hw.r_ctrl |= V_EXT_RAM;
hc->hw.r_ram_sz = 1;
hc->Flen = 0x20;
hc->Zmin = 0xc0;
hc->Zlen = 1856;
hc->DTMFbase = 0x2000;
}
if (test_bit(HFC_CHIP_EXRAM_512, &hc->chip)) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: changing to 512K external RAM\n",
__func__);
hc->hw.r_ctrl |= V_EXT_RAM;
hc->hw.r_ram_sz = 2;
hc->Flen = 0x20;
hc->Zmin = 0xc0;
hc->Zlen = 8000;
hc->DTMFbase = 0x2000;
}
if (hc->ctype == HFC_TYPE_XHFC) {
hc->Flen = 0x8;
hc->Zmin = 0x0;
hc->Zlen = 64;
hc->DTMFbase = 0x0;
}
hc->max_trans = poll << 1;
if (hc->max_trans > hc->Zlen)
hc->max_trans = hc->Zlen;
/* Speech Design PLX bridge */
if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
if (debug & DEBUG_HFCMULTI_PLXSD)
printk(KERN_DEBUG "%s: initializing PLXSD card %d\n",
__func__, hc->id + 1);
spin_lock_irqsave(&plx_lock, plx_flags);
plx_acc_32 = hc->plx_membase + PLX_GPIOC;
writel(PLX_GPIOC_INIT, plx_acc_32);
pv = readl(plx_acc_32);
/* The first and the last cards are terminating the PCM bus */
pv |= PLX_TERM_ON; /* hc is currently the last */
/* Disconnect the PCM */
pv |= PLX_SLAVE_EN_N;
pv &= ~PLX_MASTER_EN;
pv &= ~PLX_SYNC_O_EN;
/* Put the DSP in Reset */
pv &= ~PLX_DSP_RES_N;
writel(pv, plx_acc_32);
spin_unlock_irqrestore(&plx_lock, plx_flags);
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: slave/term: PLX_GPIO=%x\n",
__func__, pv);
/*
* If we are the 3rd PLXSD card or higher, we must turn
* termination of last PLXSD card off.
*/
spin_lock_irqsave(&HFClock, hfc_flags);
plx_count = 0;
plx_last_hc = NULL;
list_for_each_entry_safe(pos, next, &HFClist, list) {
if (test_bit(HFC_CHIP_PLXSD, &pos->chip)) {
plx_count++;
if (pos != hc)
plx_last_hc = pos;
}
}
if (plx_count >= 3) {
if (debug & DEBUG_HFCMULTI_PLXSD)
printk(KERN_DEBUG "%s: card %d is between, so "
"we disable termination\n",
__func__, plx_last_hc->id + 1);
spin_lock_irqsave(&plx_lock, plx_flags);
plx_acc_32 = plx_last_hc->plx_membase + PLX_GPIOC;
pv = readl(plx_acc_32);
pv &= ~PLX_TERM_ON;
writel(pv, plx_acc_32);
spin_unlock_irqrestore(&plx_lock, plx_flags);
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG
"%s: term off: PLX_GPIO=%x\n",
__func__, pv);
}
spin_unlock_irqrestore(&HFClock, hfc_flags);
hc->hw.r_pcm_md0 = V_F0_LEN; /* shift clock for DSP */
}
if (test_bit(HFC_CHIP_EMBSD, &hc->chip))
hc->hw.r_pcm_md0 = V_F0_LEN; /* shift clock for DSP */
/* we only want the real Z2 read-pointer for revision > 0 */
if (!test_bit(HFC_CHIP_REVISION0, &hc->chip))
hc->hw.r_ram_sz |= V_FZ_MD;
/* select pcm mode */
if (test_bit(HFC_CHIP_PCM_SLAVE, &hc->chip)) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: setting PCM into slave mode\n",
__func__);
} else
if (test_bit(HFC_CHIP_PCM_MASTER, &hc->chip) && !plxsd_master) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: setting PCM into master mode\n",
__func__);
hc->hw.r_pcm_md0 |= V_PCM_MD;
} else {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: performing PCM auto detect\n",
__func__);
}
/* soft reset */
HFC_outb(hc, R_CTRL, hc->hw.r_ctrl);
if (hc->ctype == HFC_TYPE_XHFC)
HFC_outb(hc, 0x0C /* R_FIFO_THRES */,
0x11 /* 16 Bytes TX/RX */);
else
HFC_outb(hc, R_RAM_SZ, hc->hw.r_ram_sz);
HFC_outb(hc, R_FIFO_MD, 0);
if (hc->ctype == HFC_TYPE_XHFC)
hc->hw.r_cirm = V_SRES | V_HFCRES | V_PCMRES | V_STRES;
else
hc->hw.r_cirm = V_SRES | V_HFCRES | V_PCMRES | V_STRES
| V_RLD_EPR;
HFC_outb(hc, R_CIRM, hc->hw.r_cirm);
udelay(100);
hc->hw.r_cirm = 0;
HFC_outb(hc, R_CIRM, hc->hw.r_cirm);
udelay(100);
if (hc->ctype != HFC_TYPE_XHFC)
HFC_outb(hc, R_RAM_SZ, hc->hw.r_ram_sz);
/* Speech Design PLX bridge pcm and sync mode */
if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
spin_lock_irqsave(&plx_lock, plx_flags);
plx_acc_32 = hc->plx_membase + PLX_GPIOC;
pv = readl(plx_acc_32);
/* Connect PCM */
if (hc->hw.r_pcm_md0 & V_PCM_MD) {
pv |= PLX_MASTER_EN | PLX_SLAVE_EN_N;
pv |= PLX_SYNC_O_EN;
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: master: PLX_GPIO=%x\n",
__func__, pv);
} else {
pv &= ~(PLX_MASTER_EN | PLX_SLAVE_EN_N);
pv &= ~PLX_SYNC_O_EN;
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: slave: PLX_GPIO=%x\n",
__func__, pv);
}
writel(pv, plx_acc_32);
spin_unlock_irqrestore(&plx_lock, plx_flags);
}
/* PCM setup */
HFC_outb(hc, R_PCM_MD0, hc->hw.r_pcm_md0 | 0x90);
if (hc->slots == 32)
HFC_outb(hc, R_PCM_MD1, 0x00);
if (hc->slots == 64)
HFC_outb(hc, R_PCM_MD1, 0x10);
if (hc->slots == 128)
HFC_outb(hc, R_PCM_MD1, 0x20);
HFC_outb(hc, R_PCM_MD0, hc->hw.r_pcm_md0 | 0xa0);
if (test_bit(HFC_CHIP_PLXSD, &hc->chip))
HFC_outb(hc, R_PCM_MD2, V_SYNC_SRC); /* sync via SYNC_I / O */
else if (test_bit(HFC_CHIP_EMBSD, &hc->chip))
HFC_outb(hc, R_PCM_MD2, 0x10); /* V_C2O_EN */
else
HFC_outb(hc, R_PCM_MD2, 0x00); /* sync from interface */
HFC_outb(hc, R_PCM_MD0, hc->hw.r_pcm_md0 | 0x00);
for (i = 0; i < 256; i++) {
HFC_outb_nodebug(hc, R_SLOT, i);
HFC_outb_nodebug(hc, A_SL_CFG, 0);
if (hc->ctype != HFC_TYPE_XHFC)
HFC_outb_nodebug(hc, A_CONF, 0);
hc->slot_owner[i] = -1;
}
/* set clock speed */
if (test_bit(HFC_CHIP_CLOCK2, &hc->chip)) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG
"%s: setting double clock\n", __func__);
HFC_outb(hc, R_BRG_PCM_CFG, V_PCM_CLK);
}
if (test_bit(HFC_CHIP_EMBSD, &hc->chip))
HFC_outb(hc, 0x02 /* R_CLK_CFG */, 0x40 /* V_CLKO_OFF */);
/* B410P GPIO */
if (test_bit(HFC_CHIP_B410P, &hc->chip)) {
printk(KERN_NOTICE "Setting GPIOs\n");
HFC_outb(hc, R_GPIO_SEL, 0x30);
HFC_outb(hc, R_GPIO_EN1, 0x3);
udelay(1000);
printk(KERN_NOTICE "calling vpm_init\n");
vpm_init(hc);
}
/* check if R_F0_CNT counts (8 kHz frame count) */
val = HFC_inb(hc, R_F0_CNTL);
val += HFC_inb(hc, R_F0_CNTH) << 8;
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG
"HFC_multi F0_CNT %ld after reset\n", val);
spin_unlock_irqrestore(&hc->lock, flags);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout((HZ / 100) ? : 1); /* Timeout minimum 10ms */
spin_lock_irqsave(&hc->lock, flags);
val2 = HFC_inb(hc, R_F0_CNTL);
val2 += HFC_inb(hc, R_F0_CNTH) << 8;
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG
"HFC_multi F0_CNT %ld after 10 ms (1st try)\n",
val2);
if (val2 >= val + 8) { /* 1 ms */
/* it counts, so we keep the pcm mode */
if (test_bit(HFC_CHIP_PCM_MASTER, &hc->chip))
printk(KERN_INFO "controller is PCM bus MASTER\n");
else
if (test_bit(HFC_CHIP_PCM_SLAVE, &hc->chip))
printk(KERN_INFO "controller is PCM bus SLAVE\n");
else {
test_and_set_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
printk(KERN_INFO "controller is PCM bus SLAVE "
"(auto detected)\n");
}
} else {
/* does not count */
if (test_bit(HFC_CHIP_PCM_MASTER, &hc->chip)) {
controller_fail:
printk(KERN_ERR "HFC_multi ERROR, getting no 125us "
"pulse. Seems that controller fails.\n");
err = -EIO;
goto out;
}
if (test_bit(HFC_CHIP_PCM_SLAVE, &hc->chip)) {
printk(KERN_INFO "controller is PCM bus SLAVE "
"(ignoring missing PCM clock)\n");
} else {
/* only one pcm master */
if (test_bit(HFC_CHIP_PLXSD, &hc->chip)
&& plxsd_master) {
printk(KERN_ERR "HFC_multi ERROR, no clock "
"on another Speech Design card found. "
"Please be sure to connect PCM cable.\n");
err = -EIO;
goto out;
}
/* retry with master clock */
if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
spin_lock_irqsave(&plx_lock, plx_flags);
plx_acc_32 = hc->plx_membase + PLX_GPIOC;
pv = readl(plx_acc_32);
pv |= PLX_MASTER_EN | PLX_SLAVE_EN_N;
pv |= PLX_SYNC_O_EN;
writel(pv, plx_acc_32);
spin_unlock_irqrestore(&plx_lock, plx_flags);
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: master: "
"PLX_GPIO=%x\n", __func__, pv);
}
hc->hw.r_pcm_md0 |= V_PCM_MD;
HFC_outb(hc, R_PCM_MD0, hc->hw.r_pcm_md0 | 0x00);
spin_unlock_irqrestore(&hc->lock, flags);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout((HZ / 100) ?: 1); /* Timeout min. 10ms */
spin_lock_irqsave(&hc->lock, flags);
val2 = HFC_inb(hc, R_F0_CNTL);
val2 += HFC_inb(hc, R_F0_CNTH) << 8;
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "HFC_multi F0_CNT %ld after "
"10 ms (2nd try)\n", val2);
if (val2 >= val + 8) { /* 1 ms */
test_and_set_bit(HFC_CHIP_PCM_MASTER,
&hc->chip);
printk(KERN_INFO "controller is PCM bus MASTER "
"(auto detected)\n");
} else
goto controller_fail;
}
}
/* Release the DSP Reset */
if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
if (test_bit(HFC_CHIP_PCM_MASTER, &hc->chip))
plxsd_master = 1;
spin_lock_irqsave(&plx_lock, plx_flags);
plx_acc_32 = hc->plx_membase + PLX_GPIOC;
pv = readl(plx_acc_32);
pv |= PLX_DSP_RES_N;
writel(pv, plx_acc_32);
spin_unlock_irqrestore(&plx_lock, plx_flags);
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: reset off: PLX_GPIO=%x\n",
__func__, pv);
}
/* pcm id */
if (hc->pcm)
printk(KERN_INFO "controller has given PCM BUS ID %d\n",
hc->pcm);
else {
if (test_bit(HFC_CHIP_PCM_MASTER, &hc->chip)
|| test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
PCM_cnt++; /* SD has proprietary bridging */
}
hc->pcm = PCM_cnt;
printk(KERN_INFO "controller has PCM BUS ID %d "
"(auto selected)\n", hc->pcm);
}
/* set up timer */
HFC_outb(hc, R_TI_WD, poll_timer);
hc->hw.r_irqmsk_misc |= V_TI_IRQMSK;
/* set E1 state machine IRQ */
if (hc->ctype == HFC_TYPE_E1)
hc->hw.r_irqmsk_misc |= V_STA_IRQMSK;
/* set DTMF detection */
if (test_bit(HFC_CHIP_DTMF, &hc->chip)) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: enabling DTMF detection "
"for all B-channel\n", __func__);
hc->hw.r_dtmf = V_DTMF_EN | V_DTMF_STOP;
if (test_bit(HFC_CHIP_ULAW, &hc->chip))
hc->hw.r_dtmf |= V_ULAW_SEL;
HFC_outb(hc, R_DTMF_N, 102 - 1);
hc->hw.r_irqmsk_misc |= V_DTMF_IRQMSK;
}
/* conference engine */
if (test_bit(HFC_CHIP_ULAW, &hc->chip))
r_conf_en = V_CONF_EN | V_ULAW;
else
r_conf_en = V_CONF_EN;
if (hc->ctype != HFC_TYPE_XHFC)
HFC_outb(hc, R_CONF_EN, r_conf_en);
/* setting leds */
switch (hc->leds) {
case 1: /* HFC-E1 OEM */
if (test_bit(HFC_CHIP_WATCHDOG, &hc->chip))
HFC_outb(hc, R_GPIO_SEL, 0x32);
else
HFC_outb(hc, R_GPIO_SEL, 0x30);
HFC_outb(hc, R_GPIO_EN1, 0x0f);
HFC_outb(hc, R_GPIO_OUT1, 0x00);
HFC_outb(hc, R_GPIO_EN0, V_GPIO_EN2 | V_GPIO_EN3);
break;
case 2: /* HFC-4S OEM */
case 3:
HFC_outb(hc, R_GPIO_SEL, 0xf0);
HFC_outb(hc, R_GPIO_EN1, 0xff);
HFC_outb(hc, R_GPIO_OUT1, 0x00);
break;
}
if (test_bit(HFC_CHIP_EMBSD, &hc->chip)) {
hc->hw.r_st_sync = 0x10; /* V_AUTO_SYNCI */
HFC_outb(hc, R_ST_SYNC, hc->hw.r_st_sync);
}
/* set master clock */
if (hc->masterclk >= 0) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: setting ST master clock "
"to port %d (0..%d)\n",
__func__, hc->masterclk, hc->ports - 1);
hc->hw.r_st_sync |= (hc->masterclk | V_AUTO_SYNC);
HFC_outb(hc, R_ST_SYNC, hc->hw.r_st_sync);
}
/* setting misc irq */
HFC_outb(hc, R_IRQMSK_MISC, hc->hw.r_irqmsk_misc);
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "r_irqmsk_misc.2: 0x%x\n",
hc->hw.r_irqmsk_misc);
/* RAM access test */
HFC_outb(hc, R_RAM_ADDR0, 0);
HFC_outb(hc, R_RAM_ADDR1, 0);
HFC_outb(hc, R_RAM_ADDR2, 0);
for (i = 0; i < 256; i++) {
HFC_outb_nodebug(hc, R_RAM_ADDR0, i);
HFC_outb_nodebug(hc, R_RAM_DATA, ((i * 3) & 0xff));
}
for (i = 0; i < 256; i++) {
HFC_outb_nodebug(hc, R_RAM_ADDR0, i);
HFC_inb_nodebug(hc, R_RAM_DATA);
rval = HFC_inb_nodebug(hc, R_INT_DATA);
if (rval != ((i * 3) & 0xff)) {
printk(KERN_DEBUG
"addr:%x val:%x should:%x\n", i, rval,
(i * 3) & 0xff);
err++;
}
}
if (err) {
printk(KERN_DEBUG "aborting - %d RAM access errors\n", err);
err = -EIO;
goto out;
}
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: done\n", __func__);
out:
spin_unlock_irqrestore(&hc->lock, flags);
return err;
}
/*
* control the watchdog
*/
static void
hfcmulti_watchdog(struct hfc_multi *hc)
{
hc->wdcount++;
if (hc->wdcount > 10) {
hc->wdcount = 0;
hc->wdbyte = hc->wdbyte == V_GPIO_OUT2 ?
V_GPIO_OUT3 : V_GPIO_OUT2;
/* printk("Sending Watchdog Kill %x\n",hc->wdbyte); */
HFC_outb(hc, R_GPIO_EN0, V_GPIO_EN2 | V_GPIO_EN3);
HFC_outb(hc, R_GPIO_OUT0, hc->wdbyte);
}
}
/*
* output leds
*/
static void
hfcmulti_leds(struct hfc_multi *hc)
{
unsigned long lled;
unsigned long leddw;
int i, state, active, leds;
struct dchannel *dch;
int led[4];
switch (hc->leds) {
case 1: /* HFC-E1 OEM */
/* 2 red steady: LOS
* 1 red steady: L1 not active
* 2 green steady: L1 active
* 1st green flashing: activity on TX
* 2nd green flashing: activity on RX
*/
led[0] = 0;
led[1] = 0;
led[2] = 0;
led[3] = 0;
dch = hc->chan[hc->dnum[0]].dch;
if (dch) {
if (hc->chan[hc->dnum[0]].los)
led[1] = 1;
if (hc->e1_state != 1) {
led[0] = 1;
hc->flash[2] = 0;
hc->flash[3] = 0;
} else {
led[2] = 1;
led[3] = 1;
if (!hc->flash[2] && hc->activity_tx)
hc->flash[2] = poll;
if (!hc->flash[3] && hc->activity_rx)
hc->flash[3] = poll;
if (hc->flash[2] && hc->flash[2] < 1024)
led[2] = 0;
if (hc->flash[3] && hc->flash[3] < 1024)
led[3] = 0;
if (hc->flash[2] >= 2048)
hc->flash[2] = 0;
if (hc->flash[3] >= 2048)
hc->flash[3] = 0;
if (hc->flash[2])
hc->flash[2] += poll;
if (hc->flash[3])
hc->flash[3] += poll;
}
}
leds = (led[0] | (led[1]<<2) | (led[2]<<1) | (led[3]<<3))^0xF;
/* leds are inverted */
if (leds != (int)hc->ledstate) {
HFC_outb_nodebug(hc, R_GPIO_OUT1, leds);
hc->ledstate = leds;
}
break;
case 2: /* HFC-4S OEM */
/* red steady: PH_DEACTIVATE
* green steady: PH_ACTIVATE
* green flashing: activity on TX
*/
for (i = 0; i < 4; i++) {
state = 0;
active = -1;
dch = hc->chan[(i << 2) | 2].dch;
if (dch) {
state = dch->state;
if (dch->dev.D.protocol == ISDN_P_NT_S0)
active = 3;
else
active = 7;
}
if (state) {
if (state == active) {
led[i] = 1; /* led green */
hc->activity_tx |= hc->activity_rx;
if (!hc->flash[i] &&
(hc->activity_tx & (1 << i)))
hc->flash[i] = poll;
if (hc->flash[i] && hc->flash[i] < 1024)
led[i] = 0; /* led off */
if (hc->flash[i] >= 2048)
hc->flash[i] = 0;
if (hc->flash[i])
hc->flash[i] += poll;
} else {
led[i] = 2; /* led red */
hc->flash[i] = 0;
}
} else
led[i] = 0; /* led off */
}
if (test_bit(HFC_CHIP_B410P, &hc->chip)) {
leds = 0;
for (i = 0; i < 4; i++) {
if (led[i] == 1) {
/*green*/
leds |= (0x2 << (i * 2));
} else if (led[i] == 2) {
/*red*/
leds |= (0x1 << (i * 2));
}
}
if (leds != (int)hc->ledstate) {
vpm_out(hc, 0, 0x1a8 + 3, leds);
hc->ledstate = leds;
}
} else {
leds = ((led[3] > 0) << 0) | ((led[1] > 0) << 1) |
((led[0] > 0) << 2) | ((led[2] > 0) << 3) |
((led[3] & 1) << 4) | ((led[1] & 1) << 5) |
((led[0] & 1) << 6) | ((led[2] & 1) << 7);
if (leds != (int)hc->ledstate) {
HFC_outb_nodebug(hc, R_GPIO_EN1, leds & 0x0F);
HFC_outb_nodebug(hc, R_GPIO_OUT1, leds >> 4);
hc->ledstate = leds;
}
}
break;
case 3: /* HFC 1S/2S Beronet */
/* red steady: PH_DEACTIVATE
* green steady: PH_ACTIVATE
* green flashing: activity on TX
*/
for (i = 0; i < 2; i++) {
state = 0;
active = -1;
dch = hc->chan[(i << 2) | 2].dch;
if (dch) {
state = dch->state;
if (dch->dev.D.protocol == ISDN_P_NT_S0)
active = 3;
else
active = 7;
}
if (state) {
if (state == active) {
led[i] = 1; /* led green */
hc->activity_tx |= hc->activity_rx;
if (!hc->flash[i] &&
(hc->activity_tx & (1 << i)))
hc->flash[i] = poll;
if (hc->flash[i] < 1024)
led[i] = 0; /* led off */
if (hc->flash[i] >= 2048)
hc->flash[i] = 0;
if (hc->flash[i])
hc->flash[i] += poll;
} else {
led[i] = 2; /* led red */
hc->flash[i] = 0;
}
} else
led[i] = 0; /* led off */
}
leds = (led[0] > 0) | ((led[1] > 0) << 1) | ((led[0]&1) << 2)
| ((led[1]&1) << 3);
if (leds != (int)hc->ledstate) {
HFC_outb_nodebug(hc, R_GPIO_EN1,
((led[0] > 0) << 2) | ((led[1] > 0) << 3));
HFC_outb_nodebug(hc, R_GPIO_OUT1,
((led[0] & 1) << 2) | ((led[1] & 1) << 3));
hc->ledstate = leds;
}
break;
case 8: /* HFC 8S+ Beronet */
/* off: PH_DEACTIVATE
* steady: PH_ACTIVATE
* flashing: activity on TX
*/
lled = 0xff; /* leds off */
for (i = 0; i < 8; i++) {
state = 0;
active = -1;
dch = hc->chan[(i << 2) | 2].dch;
if (dch) {
state = dch->state;
if (dch->dev.D.protocol == ISDN_P_NT_S0)
active = 3;
else
active = 7;
}
if (state) {
if (state == active) {
lled &= ~(1 << i); /* led on */
hc->activity_tx |= hc->activity_rx;
if (!hc->flash[i] &&
(hc->activity_tx & (1 << i)))
hc->flash[i] = poll;
if (hc->flash[i] < 1024)
lled |= 1 << i; /* led off */
if (hc->flash[i] >= 2048)
hc->flash[i] = 0;
if (hc->flash[i])
hc->flash[i] += poll;
} else
hc->flash[i] = 0;
}
}
leddw = lled << 24 | lled << 16 | lled << 8 | lled;
if (leddw != hc->ledstate) {
/* HFC_outb(hc, R_BRG_PCM_CFG, 1);
HFC_outb(c, R_BRG_PCM_CFG, (0x0 << 6) | 0x3); */
/* was _io before */
HFC_outb_nodebug(hc, R_BRG_PCM_CFG, 1 | V_PCM_CLK);
outw(0x4000, hc->pci_iobase + 4);
outl(leddw, hc->pci_iobase);
HFC_outb_nodebug(hc, R_BRG_PCM_CFG, V_PCM_CLK);
hc->ledstate = leddw;
}
break;
}
hc->activity_tx = 0;
hc->activity_rx = 0;
}
/*
* read dtmf coefficients
*/
static void
hfcmulti_dtmf(struct hfc_multi *hc)
{
s32 *coeff;
u_int mantissa;
int co, ch;
struct bchannel *bch = NULL;
u8 exponent;
int dtmf = 0;
int addr;
u16 w_float;
struct sk_buff *skb;
struct mISDNhead *hh;
if (debug & DEBUG_HFCMULTI_DTMF)
printk(KERN_DEBUG "%s: dtmf detection irq\n", __func__);
for (ch = 0; ch <= 31; ch++) {
/* only process enabled B-channels */
bch = hc->chan[ch].bch;
if (!bch)
continue;
if (!hc->created[hc->chan[ch].port])
continue;
if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
continue;
if (debug & DEBUG_HFCMULTI_DTMF)
printk(KERN_DEBUG "%s: dtmf channel %d:",
__func__, ch);
coeff = &(hc->chan[ch].coeff[hc->chan[ch].coeff_count * 16]);
dtmf = 1;
for (co = 0; co < 8; co++) {
/* read W(n-1) coefficient */
addr = hc->DTMFbase + ((co << 7) | (ch << 2));
HFC_outb_nodebug(hc, R_RAM_ADDR0, addr);
HFC_outb_nodebug(hc, R_RAM_ADDR1, addr >> 8);
HFC_outb_nodebug(hc, R_RAM_ADDR2, (addr >> 16)
| V_ADDR_INC);
w_float = HFC_inb_nodebug(hc, R_RAM_DATA);
w_float |= (HFC_inb_nodebug(hc, R_RAM_DATA) << 8);
if (debug & DEBUG_HFCMULTI_DTMF)
printk(" %04x", w_float);
/* decode float (see chip doc) */
mantissa = w_float & 0x0fff;
if (w_float & 0x8000)
mantissa |= 0xfffff000;
exponent = (w_float >> 12) & 0x7;
if (exponent) {
mantissa ^= 0x1000;
mantissa <<= (exponent - 1);
}
/* store coefficient */
coeff[co << 1] = mantissa;
/* read W(n) coefficient */
w_float = HFC_inb_nodebug(hc, R_RAM_DATA);
w_float |= (HFC_inb_nodebug(hc, R_RAM_DATA) << 8);
if (debug & DEBUG_HFCMULTI_DTMF)
printk(" %04x", w_float);
/* decode float (see chip doc) */
mantissa = w_float & 0x0fff;
if (w_float & 0x8000)
mantissa |= 0xfffff000;
exponent = (w_float >> 12) & 0x7;
if (exponent) {
mantissa ^= 0x1000;
mantissa <<= (exponent - 1);
}
/* store coefficient */
coeff[(co << 1) | 1] = mantissa;
}
if (debug & DEBUG_HFCMULTI_DTMF)
printk(" DTMF ready %08x %08x %08x %08x "
"%08x %08x %08x %08x\n",
coeff[0], coeff[1], coeff[2], coeff[3],
coeff[4], coeff[5], coeff[6], coeff[7]);
hc->chan[ch].coeff_count++;
if (hc->chan[ch].coeff_count == 8) {
hc->chan[ch].coeff_count = 0;
skb = mI_alloc_skb(512, GFP_ATOMIC);
if (!skb) {
printk(KERN_DEBUG "%s: No memory for skb\n",
__func__);
continue;
}
hh = mISDN_HEAD_P(skb);
hh->prim = PH_CONTROL_IND;
hh->id = DTMF_HFC_COEF;
skb_put_data(skb, hc->chan[ch].coeff, 512);
recv_Bchannel_skb(bch, skb);
}
}
/* restart DTMF processing */
hc->dtmf = dtmf;
if (dtmf)
HFC_outb_nodebug(hc, R_DTMF, hc->hw.r_dtmf | V_RST_DTMF);
}
/*
* fill fifo as much as possible
*/
static void
hfcmulti_tx(struct hfc_multi *hc, int ch)
{
int i, ii, temp, len = 0;
int Zspace, z1, z2; /* must be int for calculation */
int Fspace, f1, f2;
u_char *d;
int *txpending, slot_tx;
struct bchannel *bch;
struct dchannel *dch;
struct sk_buff **sp = NULL;
int *idxp;
bch = hc->chan[ch].bch;
dch = hc->chan[ch].dch;
if ((!dch) && (!bch))
return;
txpending = &hc->chan[ch].txpending;
slot_tx = hc->chan[ch].slot_tx;
if (dch) {
if (!test_bit(FLG_ACTIVE, &dch->Flags))
return;
sp = &dch->tx_skb;
idxp = &dch->tx_idx;
} else {
if (!test_bit(FLG_ACTIVE, &bch->Flags))
return;
sp = &bch->tx_skb;
idxp = &bch->tx_idx;
}
if (*sp)
len = (*sp)->len;
if ((!len) && *txpending != 1)
return; /* no data */
if (test_bit(HFC_CHIP_B410P, &hc->chip) &&
(hc->chan[ch].protocol == ISDN_P_B_RAW) &&
(hc->chan[ch].slot_rx < 0) &&
(hc->chan[ch].slot_tx < 0))
HFC_outb_nodebug(hc, R_FIFO, 0x20 | (ch << 1));
else
HFC_outb_nodebug(hc, R_FIFO, ch << 1);
HFC_wait_nodebug(hc);
if (*txpending == 2) {
/* reset fifo */
HFC_outb_nodebug(hc, R_INC_RES_FIFO, V_RES_F);
HFC_wait_nodebug(hc);
HFC_outb(hc, A_SUBCH_CFG, 0);
*txpending = 1;
}
next_frame:
if (dch || test_bit(FLG_HDLC, &bch->Flags)) {
f1 = HFC_inb_nodebug(hc, A_F1);
f2 = HFC_inb_nodebug(hc, A_F2);
while (f2 != (temp = HFC_inb_nodebug(hc, A_F2))) {
if (debug & DEBUG_HFCMULTI_FIFO)
printk(KERN_DEBUG
"%s(card %d): reread f2 because %d!=%d\n",
__func__, hc->id + 1, temp, f2);
f2 = temp; /* repeat until F2 is equal */
}
Fspace = f2 - f1 - 1;
if (Fspace < 0)
Fspace += hc->Flen;
/*
* Old FIFO handling doesn't give us the current Z2 read
* pointer, so we cannot send the next frame before the fifo
* is empty. It makes no difference except for a slightly
* lower performance.
*/
if (test_bit(HFC_CHIP_REVISION0, &hc->chip)) {
if (f1 != f2)
Fspace = 0;
else
Fspace = 1;
}
/* one frame only for ST D-channels, to allow resending */
if (hc->ctype != HFC_TYPE_E1 && dch) {
if (f1 != f2)
Fspace = 0;
}
/* F-counter full condition */
if (Fspace == 0)
return;
}
z1 = HFC_inw_nodebug(hc, A_Z1) - hc->Zmin;
z2 = HFC_inw_nodebug(hc, A_Z2) - hc->Zmin;
while (z2 != (temp = (HFC_inw_nodebug(hc, A_Z2) - hc->Zmin))) {
if (debug & DEBUG_HFCMULTI_FIFO)
printk(KERN_DEBUG "%s(card %d): reread z2 because "
"%d!=%d\n", __func__, hc->id + 1, temp, z2);
z2 = temp; /* repeat unti Z2 is equal */
}
hc->chan[ch].Zfill = z1 - z2;
if (hc->chan[ch].Zfill < 0)
hc->chan[ch].Zfill += hc->Zlen;
Zspace = z2 - z1;
if (Zspace <= 0)
Zspace += hc->Zlen;
Zspace -= 4; /* keep not too full, so pointers will not overrun */
/* fill transparent data only to maxinum transparent load (minus 4) */
if (bch && test_bit(FLG_TRANSPARENT, &bch->Flags))
Zspace = Zspace - hc->Zlen + hc->max_trans;
if (Zspace <= 0) /* no space of 4 bytes */
return;
/* if no data */
if (!len) {
if (z1 == z2) { /* empty */
/* if done with FIFO audio data during PCM connection */
if (bch && (!test_bit(FLG_HDLC, &bch->Flags)) &&
*txpending && slot_tx >= 0) {
if (debug & DEBUG_HFCMULTI_MODE)
printk(KERN_DEBUG
"%s: reconnecting PCM due to no "
"more FIFO data: channel %d "
"slot_tx %d\n",
__func__, ch, slot_tx);
/* connect slot */
if (hc->ctype == HFC_TYPE_XHFC)
HFC_outb(hc, A_CON_HDLC, 0xc0
| 0x07 << 2 | V_HDLC_TRP | V_IFF);
/* Enable FIFO, no interrupt */
else
HFC_outb(hc, A_CON_HDLC, 0xc0 | 0x00 |
V_HDLC_TRP | V_IFF);
HFC_outb_nodebug(hc, R_FIFO, ch << 1 | 1);
HFC_wait_nodebug(hc);
if (hc->ctype == HFC_TYPE_XHFC)
HFC_outb(hc, A_CON_HDLC, 0xc0
| 0x07 << 2 | V_HDLC_TRP | V_IFF);
/* Enable FIFO, no interrupt */
else
HFC_outb(hc, A_CON_HDLC, 0xc0 | 0x00 |
V_HDLC_TRP | V_IFF);
HFC_outb_nodebug(hc, R_FIFO, ch << 1);
HFC_wait_nodebug(hc);
}
*txpending = 0;
}
return; /* no data */
}
/* "fill fifo if empty" feature */
if (bch && test_bit(FLG_FILLEMPTY, &bch->Flags)
&& !test_bit(FLG_HDLC, &bch->Flags) && z2 == z1) {
if (debug & DEBUG_HFCMULTI_FILL)
printk(KERN_DEBUG "%s: buffer empty, so we have "
"underrun\n", __func__);
/* fill buffer, to prevent future underrun */
hc->write_fifo(hc, hc->silence_data, poll >> 1);
Zspace -= (poll >> 1);
}
/* if audio data and connected slot */
if (bch && (!test_bit(FLG_HDLC, &bch->Flags)) && (!*txpending)
&& slot_tx >= 0) {
if (debug & DEBUG_HFCMULTI_MODE)
printk(KERN_DEBUG "%s: disconnecting PCM due to "
"FIFO data: channel %d slot_tx %d\n",
__func__, ch, slot_tx);
/* disconnect slot */
if (hc->ctype == HFC_TYPE_XHFC)
HFC_outb(hc, A_CON_HDLC, 0x80
| 0x07 << 2 | V_HDLC_TRP | V_IFF);
/* Enable FIFO, no interrupt */
else
HFC_outb(hc, A_CON_HDLC, 0x80 | 0x00 |
V_HDLC_TRP | V_IFF);
HFC_outb_nodebug(hc, R_FIFO, ch << 1 | 1);
HFC_wait_nodebug(hc);
if (hc->ctype == HFC_TYPE_XHFC)
HFC_outb(hc, A_CON_HDLC, 0x80
| 0x07 << 2 | V_HDLC_TRP | V_IFF);
/* Enable FIFO, no interrupt */
else
HFC_outb(hc, A_CON_HDLC, 0x80 | 0x00 |
V_HDLC_TRP | V_IFF);
HFC_outb_nodebug(hc, R_FIFO, ch << 1);
HFC_wait_nodebug(hc);
}
*txpending = 1;
/* show activity */
if (dch)
hc->activity_tx |= 1 << hc->chan[ch].port;
/* fill fifo to what we have left */
ii = len;
if (dch || test_bit(FLG_HDLC, &bch->Flags))
temp = 1;
else
temp = 0;
i = *idxp;
d = (*sp)->data + i;
if (ii - i > Zspace)
ii = Zspace + i;
if (debug & DEBUG_HFCMULTI_FIFO)
printk(KERN_DEBUG "%s(card %d): fifo(%d) has %d bytes space "
"left (z1=%04x, z2=%04x) sending %d of %d bytes %s\n",
__func__, hc->id + 1, ch, Zspace, z1, z2, ii-i, len-i,
temp ? "HDLC" : "TRANS");
/* Have to prep the audio data */
hc->write_fifo(hc, d, ii - i);
hc->chan[ch].Zfill += ii - i;
*idxp = ii;
/* if not all data has been written */
if (ii != len) {
/* NOTE: fifo is started by the calling function */
return;
}
/* if all data has been written, terminate frame */
if (dch || test_bit(FLG_HDLC, &bch->Flags)) {
/* increment f-counter */
HFC_outb_nodebug(hc, R_INC_RES_FIFO, V_INC_F);
HFC_wait_nodebug(hc);
}
dev_kfree_skb(*sp);
/* check for next frame */
if (bch && get_next_bframe(bch)) {
len = (*sp)->len;
goto next_frame;
}
if (dch && get_next_dframe(dch)) {
len = (*sp)->len;
goto next_frame;
}
/*
* now we have no more data, so in case of transparent,
* we set the last byte in fifo to 'silence' in case we will get
* no more data at all. this prevents sending an undefined value.
*/
if (bch && test_bit(FLG_TRANSPARENT, &bch->Flags))
HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence);
}
/* NOTE: only called if E1 card is in active state */
static void
hfcmulti_rx(struct hfc_multi *hc, int ch)
{
int temp;
int Zsize, z1, z2 = 0; /* = 0, to make GCC happy */
int f1 = 0, f2 = 0; /* = 0, to make GCC happy */
int again = 0;
struct bchannel *bch;
struct dchannel *dch = NULL;
struct sk_buff *skb, **sp = NULL;
int maxlen;
bch = hc->chan[ch].bch;
if (bch) {
if (!test_bit(FLG_ACTIVE, &bch->Flags))
return;
} else if (hc->chan[ch].dch) {
dch = hc->chan[ch].dch;
if (!test_bit(FLG_ACTIVE, &dch->Flags))
return;
} else {
return;
}
next_frame:
/* on first AND before getting next valid frame, R_FIFO must be written
to. */
if (test_bit(HFC_CHIP_B410P, &hc->chip) &&
(hc->chan[ch].protocol == ISDN_P_B_RAW) &&
(hc->chan[ch].slot_rx < 0) &&
(hc->chan[ch].slot_tx < 0))
HFC_outb_nodebug(hc, R_FIFO, 0x20 | (ch << 1) | 1);
else
HFC_outb_nodebug(hc, R_FIFO, (ch << 1) | 1);
HFC_wait_nodebug(hc);
/* ignore if rx is off BUT change fifo (above) to start pending TX */
if (hc->chan[ch].rx_off) {
if (bch)
bch->dropcnt += poll; /* not exact but fair enough */
return;
}
if (dch || test_bit(FLG_HDLC, &bch->Flags)) {
f1 = HFC_inb_nodebug(hc, A_F1);
while (f1 != (temp = HFC_inb_nodebug(hc, A_F1))) {
if (debug & DEBUG_HFCMULTI_FIFO)
printk(KERN_DEBUG
"%s(card %d): reread f1 because %d!=%d\n",
__func__, hc->id + 1, temp, f1);
f1 = temp; /* repeat until F1 is equal */
}
f2 = HFC_inb_nodebug(hc, A_F2);
}
z1 = HFC_inw_nodebug(hc, A_Z1) - hc->Zmin;
while (z1 != (temp = (HFC_inw_nodebug(hc, A_Z1) - hc->Zmin))) {
if (debug & DEBUG_HFCMULTI_FIFO)
printk(KERN_DEBUG "%s(card %d): reread z2 because "
"%d!=%d\n", __func__, hc->id + 1, temp, z2);
z1 = temp; /* repeat until Z1 is equal */
}
z2 = HFC_inw_nodebug(hc, A_Z2) - hc->Zmin;
Zsize = z1 - z2;
if ((dch || test_bit(FLG_HDLC, &bch->Flags)) && f1 != f2)
/* complete hdlc frame */
Zsize++;
if (Zsize < 0)
Zsize += hc->Zlen;
/* if buffer is empty */
if (Zsize <= 0)
return;
if (bch) {
maxlen = bchannel_get_rxbuf(bch, Zsize);
if (maxlen < 0) {
pr_warn("card%d.B%d: No bufferspace for %d bytes\n",
hc->id + 1, bch->nr, Zsize);
return;
}
sp = &bch->rx_skb;
maxlen = bch->maxlen;
} else { /* Dchannel */
sp = &dch->rx_skb;
maxlen = dch->maxlen + 3;
if (*sp == NULL) {
*sp = mI_alloc_skb(maxlen, GFP_ATOMIC);
if (*sp == NULL) {
pr_warn("card%d: No mem for dch rx_skb\n",
hc->id + 1);
return;
}
}
}
/* show activity */
if (dch)
hc->activity_rx |= 1 << hc->chan[ch].port;
/* empty fifo with what we have */
if (dch || test_bit(FLG_HDLC, &bch->Flags)) {
if (debug & DEBUG_HFCMULTI_FIFO)
printk(KERN_DEBUG "%s(card %d): fifo(%d) reading %d "
"bytes (z1=%04x, z2=%04x) HDLC %s (f1=%d, f2=%d) "
"got=%d (again %d)\n", __func__, hc->id + 1, ch,
Zsize, z1, z2, (f1 == f2) ? "fragment" : "COMPLETE",
f1, f2, Zsize + (*sp)->len, again);
/* HDLC */
if ((Zsize + (*sp)->len) > maxlen) {
if (debug & DEBUG_HFCMULTI_FIFO)
printk(KERN_DEBUG
"%s(card %d): hdlc-frame too large.\n",
__func__, hc->id + 1);
skb_trim(*sp, 0);
HFC_outb_nodebug(hc, R_INC_RES_FIFO, V_RES_F);
HFC_wait_nodebug(hc);
return;
}
hc->read_fifo(hc, skb_put(*sp, Zsize), Zsize);
if (f1 != f2) {
/* increment Z2,F2-counter */
HFC_outb_nodebug(hc, R_INC_RES_FIFO, V_INC_F);
HFC_wait_nodebug(hc);
/* check size */
if ((*sp)->len < 4) {
if (debug & DEBUG_HFCMULTI_FIFO)
printk(KERN_DEBUG
"%s(card %d): Frame below minimum "
"size\n", __func__, hc->id + 1);
skb_trim(*sp, 0);
goto next_frame;
}
/* there is at least one complete frame, check crc */
if ((*sp)->data[(*sp)->len - 1]) {
if (debug & DEBUG_HFCMULTI_CRC)
printk(KERN_DEBUG
"%s: CRC-error\n", __func__);
skb_trim(*sp, 0);
goto next_frame;
}
skb_trim(*sp, (*sp)->len - 3);
if ((*sp)->len < MISDN_COPY_SIZE) {
skb = *sp;
*sp = mI_alloc_skb(skb->len, GFP_ATOMIC);
if (*sp) {
skb_put_data(*sp, skb->data, skb->len);
skb_trim(skb, 0);
} else {
printk(KERN_DEBUG "%s: No mem\n",
__func__);
*sp = skb;
skb = NULL;
}
} else {
skb = NULL;
}
if (debug & DEBUG_HFCMULTI_FIFO) {
printk(KERN_DEBUG "%s(card %d):",
__func__, hc->id + 1);
temp = 0;
while (temp < (*sp)->len)
printk(" %02x", (*sp)->data[temp++]);
printk("\n");
}
if (dch)
recv_Dchannel(dch);
else
recv_Bchannel(bch, MISDN_ID_ANY, false);
*sp = skb;
again++;
goto next_frame;
}
/* there is an incomplete frame */
} else {
/* transparent */
hc->read_fifo(hc, skb_put(*sp, Zsize), Zsize);
if (debug & DEBUG_HFCMULTI_FIFO)
printk(KERN_DEBUG
"%s(card %d): fifo(%d) reading %d bytes "
"(z1=%04x, z2=%04x) TRANS\n",
__func__, hc->id + 1, ch, Zsize, z1, z2);
/* only bch is transparent */
recv_Bchannel(bch, hc->chan[ch].Zfill, false);
}
}
/*
* Interrupt handler
*/
static void
signal_state_up(struct dchannel *dch, int info, char *msg)
{
struct sk_buff *skb;
int id, data = info;
if (debug & DEBUG_HFCMULTI_STATE)
printk(KERN_DEBUG "%s: %s\n", __func__, msg);
id = TEI_SAPI | (GROUP_TEI << 8); /* manager address */
skb = _alloc_mISDN_skb(MPH_INFORMATION_IND, id, sizeof(data), &data,
GFP_ATOMIC);
if (!skb)
return;
recv_Dchannel_skb(dch, skb);
}
static inline void
handle_timer_irq(struct hfc_multi *hc)
{
int ch, temp;
struct dchannel *dch;
u_long flags;
/* process queued resync jobs */
if (hc->e1_resync) {
/* lock, so e1_resync gets not changed */
spin_lock_irqsave(&HFClock, flags);
if (hc->e1_resync & 1) {
if (debug & DEBUG_HFCMULTI_PLXSD)
printk(KERN_DEBUG "Enable SYNC_I\n");
HFC_outb(hc, R_SYNC_CTRL, V_EXT_CLK_SYNC);
/* disable JATT, if RX_SYNC is set */
if (test_bit(HFC_CHIP_RX_SYNC, &hc->chip))
HFC_outb(hc, R_SYNC_OUT, V_SYNC_E1_RX);
}
if (hc->e1_resync & 2) {
if (debug & DEBUG_HFCMULTI_PLXSD)
printk(KERN_DEBUG "Enable jatt PLL\n");
HFC_outb(hc, R_SYNC_CTRL, V_SYNC_OFFS);
}
if (hc->e1_resync & 4) {
if (debug & DEBUG_HFCMULTI_PLXSD)
printk(KERN_DEBUG
"Enable QUARTZ for HFC-E1\n");
/* set jatt to quartz */
HFC_outb(hc, R_SYNC_CTRL, V_EXT_CLK_SYNC
| V_JATT_OFF);
/* switch to JATT, in case it is not already */
HFC_outb(hc, R_SYNC_OUT, 0);
}
hc->e1_resync = 0;
spin_unlock_irqrestore(&HFClock, flags);
}
if (hc->ctype != HFC_TYPE_E1 || hc->e1_state == 1)
for (ch = 0; ch <= 31; ch++) {
if (hc->created[hc->chan[ch].port]) {
hfcmulti_tx(hc, ch);
/* fifo is started when switching to rx-fifo */
hfcmulti_rx(hc, ch);
if (hc->chan[ch].dch &&
hc->chan[ch].nt_timer > -1) {
dch = hc->chan[ch].dch;
if (!(--hc->chan[ch].nt_timer)) {
schedule_event(dch,
FLG_PHCHANGE);
if (debug &
DEBUG_HFCMULTI_STATE)
printk(KERN_DEBUG
"%s: nt_timer at "
"state %x\n",
__func__,
dch->state);
}
}
}
}
if (hc->ctype == HFC_TYPE_E1 && hc->created[0]) {
dch = hc->chan[hc->dnum[0]].dch;
/* LOS */
temp = HFC_inb_nodebug(hc, R_SYNC_STA) & V_SIG_LOS;
hc->chan[hc->dnum[0]].los = temp;
if (test_bit(HFC_CFG_REPORT_LOS, &hc->chan[hc->dnum[0]].cfg)) {
if (!temp && hc->chan[hc->dnum[0]].los)
signal_state_up(dch, L1_SIGNAL_LOS_ON,
"LOS detected");
if (temp && !hc->chan[hc->dnum[0]].los)
signal_state_up(dch, L1_SIGNAL_LOS_OFF,
"LOS gone");
}
if (test_bit(HFC_CFG_REPORT_AIS, &hc->chan[hc->dnum[0]].cfg)) {
/* AIS */
temp = HFC_inb_nodebug(hc, R_SYNC_STA) & V_AIS;
if (!temp && hc->chan[hc->dnum[0]].ais)
signal_state_up(dch, L1_SIGNAL_AIS_ON,
"AIS detected");
if (temp && !hc->chan[hc->dnum[0]].ais)
signal_state_up(dch, L1_SIGNAL_AIS_OFF,
"AIS gone");
hc->chan[hc->dnum[0]].ais = temp;
}
if (test_bit(HFC_CFG_REPORT_SLIP, &hc->chan[hc->dnum[0]].cfg)) {
/* SLIP */
temp = HFC_inb_nodebug(hc, R_SLIP) & V_FOSLIP_RX;
if (!temp && hc->chan[hc->dnum[0]].slip_rx)
signal_state_up(dch, L1_SIGNAL_SLIP_RX,
" bit SLIP detected RX");
hc->chan[hc->dnum[0]].slip_rx = temp;
temp = HFC_inb_nodebug(hc, R_SLIP) & V_FOSLIP_TX;
if (!temp && hc->chan[hc->dnum[0]].slip_tx)
signal_state_up(dch, L1_SIGNAL_SLIP_TX,
" bit SLIP detected TX");
hc->chan[hc->dnum[0]].slip_tx = temp;
}
if (test_bit(HFC_CFG_REPORT_RDI, &hc->chan[hc->dnum[0]].cfg)) {
/* RDI */
temp = HFC_inb_nodebug(hc, R_RX_SL0_0) & V_A;
if (!temp && hc->chan[hc->dnum[0]].rdi)
signal_state_up(dch, L1_SIGNAL_RDI_ON,
"RDI detected");
if (temp && !hc->chan[hc->dnum[0]].rdi)
signal_state_up(dch, L1_SIGNAL_RDI_OFF,
"RDI gone");
hc->chan[hc->dnum[0]].rdi = temp;
}
temp = HFC_inb_nodebug(hc, R_JATT_DIR);
switch (hc->chan[hc->dnum[0]].sync) {
case 0:
if ((temp & 0x60) == 0x60) {
if (debug & DEBUG_HFCMULTI_SYNC)
printk(KERN_DEBUG
"%s: (id=%d) E1 now "
"in clock sync\n",
__func__, hc->id);
HFC_outb(hc, R_RX_OFF,
hc->chan[hc->dnum[0]].jitter | V_RX_INIT);
HFC_outb(hc, R_TX_OFF,
hc->chan[hc->dnum[0]].jitter | V_RX_INIT);
hc->chan[hc->dnum[0]].sync = 1;
goto check_framesync;
}
break;
case 1:
if ((temp & 0x60) != 0x60) {
if (debug & DEBUG_HFCMULTI_SYNC)
printk(KERN_DEBUG
"%s: (id=%d) E1 "
"lost clock sync\n",
__func__, hc->id);
hc->chan[hc->dnum[0]].sync = 0;
break;
}
check_framesync:
temp = HFC_inb_nodebug(hc, R_SYNC_STA);
if (temp == 0x27) {
if (debug & DEBUG_HFCMULTI_SYNC)
printk(KERN_DEBUG
"%s: (id=%d) E1 "
"now in frame sync\n",
__func__, hc->id);
hc->chan[hc->dnum[0]].sync = 2;
}
break;
case 2:
if ((temp & 0x60) != 0x60) {
if (debug & DEBUG_HFCMULTI_SYNC)
printk(KERN_DEBUG
"%s: (id=%d) E1 lost "
"clock & frame sync\n",
__func__, hc->id);
hc->chan[hc->dnum[0]].sync = 0;
break;
}
temp = HFC_inb_nodebug(hc, R_SYNC_STA);
if (temp != 0x27) {
if (debug & DEBUG_HFCMULTI_SYNC)
printk(KERN_DEBUG
"%s: (id=%d) E1 "
"lost frame sync\n",
__func__, hc->id);
hc->chan[hc->dnum[0]].sync = 1;
}
break;
}
}
if (test_bit(HFC_CHIP_WATCHDOG, &hc->chip))
hfcmulti_watchdog(hc);
if (hc->leds)
hfcmulti_leds(hc);
}
static void
ph_state_irq(struct hfc_multi *hc, u_char r_irq_statech)
{
struct dchannel *dch;
int ch;
int active;
u_char st_status, temp;
/* state machine */
for (ch = 0; ch <= 31; ch++) {
if (hc->chan[ch].dch) {
dch = hc->chan[ch].dch;
if (r_irq_statech & 1) {
HFC_outb_nodebug(hc, R_ST_SEL,
hc->chan[ch].port);
/* undocumented: delay after R_ST_SEL */
udelay(1);
/* undocumented: status changes during read */
st_status = HFC_inb_nodebug(hc, A_ST_RD_STATE);
while (st_status != (temp =
HFC_inb_nodebug(hc, A_ST_RD_STATE))) {
if (debug & DEBUG_HFCMULTI_STATE)
printk(KERN_DEBUG "%s: reread "
"STATE because %d!=%d\n",
__func__, temp,
st_status);
st_status = temp; /* repeat */
}
/* Speech Design TE-sync indication */
if (test_bit(HFC_CHIP_PLXSD, &hc->chip) &&
dch->dev.D.protocol == ISDN_P_TE_S0) {
if (st_status & V_FR_SYNC_ST)
hc->syncronized |=
(1 << hc->chan[ch].port);
else
hc->syncronized &=
~(1 << hc->chan[ch].port);
}
dch->state = st_status & 0x0f;
if (dch->dev.D.protocol == ISDN_P_NT_S0)
active = 3;
else
active = 7;
if (dch->state == active) {
HFC_outb_nodebug(hc, R_FIFO,
(ch << 1) | 1);
HFC_wait_nodebug(hc);
HFC_outb_nodebug(hc,
R_INC_RES_FIFO, V_RES_F);
HFC_wait_nodebug(hc);
dch->tx_idx = 0;
}
schedule_event(dch, FLG_PHCHANGE);
if (debug & DEBUG_HFCMULTI_STATE)
printk(KERN_DEBUG
"%s: S/T newstate %x port %d\n",
__func__, dch->state,
hc->chan[ch].port);
}
r_irq_statech >>= 1;
}
}
if (test_bit(HFC_CHIP_PLXSD, &hc->chip))
plxsd_checksync(hc, 0);
}
static void
fifo_irq(struct hfc_multi *hc, int block)
{
int ch, j;
struct dchannel *dch;
struct bchannel *bch;
u_char r_irq_fifo_bl;
r_irq_fifo_bl = HFC_inb_nodebug(hc, R_IRQ_FIFO_BL0 + block);
j = 0;
while (j < 8) {
ch = (block << 2) + (j >> 1);
dch = hc->chan[ch].dch;
bch = hc->chan[ch].bch;
if (((!dch) && (!bch)) || (!hc->created[hc->chan[ch].port])) {
j += 2;
continue;
}
if (dch && (r_irq_fifo_bl & (1 << j)) &&
test_bit(FLG_ACTIVE, &dch->Flags)) {
hfcmulti_tx(hc, ch);
/* start fifo */
HFC_outb_nodebug(hc, R_FIFO, 0);
HFC_wait_nodebug(hc);
}
if (bch && (r_irq_fifo_bl & (1 << j)) &&
test_bit(FLG_ACTIVE, &bch->Flags)) {
hfcmulti_tx(hc, ch);
/* start fifo */
HFC_outb_nodebug(hc, R_FIFO, 0);
HFC_wait_nodebug(hc);
}
j++;
if (dch && (r_irq_fifo_bl & (1 << j)) &&
test_bit(FLG_ACTIVE, &dch->Flags)) {
hfcmulti_rx(hc, ch);
}
if (bch && (r_irq_fifo_bl & (1 << j)) &&
test_bit(FLG_ACTIVE, &bch->Flags)) {
hfcmulti_rx(hc, ch);
}
j++;
}
}
#ifdef IRQ_DEBUG
int irqsem;
#endif
static irqreturn_t
hfcmulti_interrupt(int intno, void *dev_id)
{
#ifdef IRQCOUNT_DEBUG
static int iq1 = 0, iq2 = 0, iq3 = 0, iq4 = 0,
iq5 = 0, iq6 = 0, iqcnt = 0;
#endif
struct hfc_multi *hc = dev_id;
struct dchannel *dch;
u_char r_irq_statech, status, r_irq_misc, r_irq_oview;
int i;
void __iomem *plx_acc;
u_short wval;
u_char e1_syncsta, temp, temp2;
u_long flags;
if (!hc) {
printk(KERN_ERR "HFC-multi: Spurious interrupt!\n");
return IRQ_NONE;
}
spin_lock(&hc->lock);
#ifdef IRQ_DEBUG
if (irqsem)
printk(KERN_ERR "irq for card %d during irq from "
"card %d, this is no bug.\n", hc->id + 1, irqsem);
irqsem = hc->id + 1;
#endif
#ifdef CONFIG_MISDN_HFCMULTI_8xx
if (hc->immap->im_cpm.cp_pbdat & hc->pb_irqmsk)
goto irq_notforus;
#endif
if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
spin_lock_irqsave(&plx_lock, flags);
plx_acc = hc->plx_membase + PLX_INTCSR;
wval = readw(plx_acc);
spin_unlock_irqrestore(&plx_lock, flags);
if (!(wval & PLX_INTCSR_LINTI1_STATUS))
goto irq_notforus;
}
status = HFC_inb_nodebug(hc, R_STATUS);
r_irq_statech = HFC_inb_nodebug(hc, R_IRQ_STATECH);
#ifdef IRQCOUNT_DEBUG
if (r_irq_statech)
iq1++;
if (status & V_DTMF_STA)
iq2++;
if (status & V_LOST_STA)
iq3++;
if (status & V_EXT_IRQSTA)
iq4++;
if (status & V_MISC_IRQSTA)
iq5++;
if (status & V_FR_IRQSTA)
iq6++;
if (iqcnt++ > 5000) {
printk(KERN_ERR "iq1:%x iq2:%x iq3:%x iq4:%x iq5:%x iq6:%x\n",
iq1, iq2, iq3, iq4, iq5, iq6);
iqcnt = 0;
}
#endif
if (!r_irq_statech &&
!(status & (V_DTMF_STA | V_LOST_STA | V_EXT_IRQSTA |
V_MISC_IRQSTA | V_FR_IRQSTA))) {
/* irq is not for us */
goto irq_notforus;
}
hc->irqcnt++;
if (r_irq_statech) {
if (hc->ctype != HFC_TYPE_E1)
ph_state_irq(hc, r_irq_statech);
}
if (status & V_LOST_STA) {
/* LOST IRQ */
HFC_outb(hc, R_INC_RES_FIFO, V_RES_LOST); /* clear irq! */
}
if (status & V_MISC_IRQSTA) {
/* misc IRQ */
r_irq_misc = HFC_inb_nodebug(hc, R_IRQ_MISC);
r_irq_misc &= hc->hw.r_irqmsk_misc; /* ignore disabled irqs */
if (r_irq_misc & V_STA_IRQ) {
if (hc->ctype == HFC_TYPE_E1) {
/* state machine */
dch = hc->chan[hc->dnum[0]].dch;
e1_syncsta = HFC_inb_nodebug(hc, R_SYNC_STA);
if (test_bit(HFC_CHIP_PLXSD, &hc->chip)
&& hc->e1_getclock) {
if (e1_syncsta & V_FR_SYNC_E1)
hc->syncronized = 1;
else
hc->syncronized = 0;
}
/* undocumented: status changes during read */
temp = HFC_inb_nodebug(hc, R_E1_RD_STA);
while (temp != (temp2 =
HFC_inb_nodebug(hc, R_E1_RD_STA))) {
if (debug & DEBUG_HFCMULTI_STATE)
printk(KERN_DEBUG "%s: reread "
"STATE because %d!=%d\n",
__func__, temp, temp2);
temp = temp2; /* repeat */
}
/* broadcast state change to all fragments */
if (debug & DEBUG_HFCMULTI_STATE)
printk(KERN_DEBUG
"%s: E1 (id=%d) newstate %x\n",
__func__, hc->id, temp & 0x7);
for (i = 0; i < hc->ports; i++) {
dch = hc->chan[hc->dnum[i]].dch;
dch->state = temp & 0x7;
schedule_event(dch, FLG_PHCHANGE);
}
if (test_bit(HFC_CHIP_PLXSD, &hc->chip))
plxsd_checksync(hc, 0);
}
}
if (r_irq_misc & V_TI_IRQ) {
if (hc->iclock_on)
mISDN_clock_update(hc->iclock, poll, NULL);
handle_timer_irq(hc);
}
if (r_irq_misc & V_DTMF_IRQ)
hfcmulti_dtmf(hc);
if (r_irq_misc & V_IRQ_PROC) {
static int irq_proc_cnt;
if (!irq_proc_cnt++)
printk(KERN_DEBUG "%s: got V_IRQ_PROC -"
" this should not happen\n", __func__);
}
}
if (status & V_FR_IRQSTA) {
/* FIFO IRQ */
r_irq_oview = HFC_inb_nodebug(hc, R_IRQ_OVIEW);
for (i = 0; i < 8; i++) {
if (r_irq_oview & (1 << i))
fifo_irq(hc, i);
}
}
#ifdef IRQ_DEBUG
irqsem = 0;
#endif
spin_unlock(&hc->lock);
return IRQ_HANDLED;
irq_notforus:
#ifdef IRQ_DEBUG
irqsem = 0;
#endif
spin_unlock(&hc->lock);
return IRQ_NONE;
}
/*
* timer callback for D-chan busy resolution. Currently no function
*/
static void
hfcmulti_dbusy_timer(struct timer_list *t)
{
}
/*
* activate/deactivate hardware for selected channels and mode
*
* configure B-channel with the given protocol
* ch eqals to the HFC-channel (0-31)
* ch is the number of channel (0-4,4-7,8-11,12-15,16-19,20-23,24-27,28-31
* for S/T, 1-31 for E1)
* the hdlc interrupts will be set/unset
*/
static int
mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
int bank_tx, int slot_rx, int bank_rx)
{
int flow_tx = 0, flow_rx = 0, routing = 0;
int oslot_tx, oslot_rx;
int conf;
if (ch < 0 || ch > 31)
return -EINVAL;
oslot_tx = hc->chan[ch].slot_tx;
oslot_rx = hc->chan[ch].slot_rx;
conf = hc->chan[ch].conf;
if (debug & DEBUG_HFCMULTI_MODE)
printk(KERN_DEBUG
"%s: card %d channel %d protocol %x slot old=%d new=%d "
"bank new=%d (TX) slot old=%d new=%d bank new=%d (RX)\n",
__func__, hc->id, ch, protocol, oslot_tx, slot_tx,
bank_tx, oslot_rx, slot_rx, bank_rx);
if (oslot_tx >= 0 && slot_tx != oslot_tx) {
/* remove from slot */
if (debug & DEBUG_HFCMULTI_MODE)
printk(KERN_DEBUG "%s: remove from slot %d (TX)\n",
__func__, oslot_tx);
if (hc->slot_owner[oslot_tx << 1] == ch) {
HFC_outb(hc, R_SLOT, oslot_tx << 1);
HFC_outb(hc, A_SL_CFG, 0);
if (hc->ctype != HFC_TYPE_XHFC)
HFC_outb(hc, A_CONF, 0);
hc->slot_owner[oslot_tx << 1] = -1;
} else {
if (debug & DEBUG_HFCMULTI_MODE)
printk(KERN_DEBUG
"%s: we are not owner of this tx slot "
"anymore, channel %d is.\n",
__func__, hc->slot_owner[oslot_tx << 1]);
}
}
if (oslot_rx >= 0 && slot_rx != oslot_rx) {
/* remove from slot */
if (debug & DEBUG_HFCMULTI_MODE)
printk(KERN_DEBUG
"%s: remove from slot %d (RX)\n",
__func__, oslot_rx);
if (hc->slot_owner[(oslot_rx << 1) | 1] == ch) {
HFC_outb(hc, R_SLOT, (oslot_rx << 1) | V_SL_DIR);
HFC_outb(hc, A_SL_CFG, 0);
hc->slot_owner[(oslot_rx << 1) | 1] = -1;
} else {
if (debug & DEBUG_HFCMULTI_MODE)
printk(KERN_DEBUG
"%s: we are not owner of this rx slot "
"anymore, channel %d is.\n",
__func__,
hc->slot_owner[(oslot_rx << 1) | 1]);
}
}
if (slot_tx < 0) {
flow_tx = 0x80; /* FIFO->ST */
/* disable pcm slot */
hc->chan[ch].slot_tx = -1;
hc->chan[ch].bank_tx = 0;
} else {
/* set pcm slot */
if (hc->chan[ch].txpending)
flow_tx = 0x80; /* FIFO->ST */
else
flow_tx = 0xc0; /* PCM->ST */
/* put on slot */
routing = bank_tx ? 0xc0 : 0x80;
if (conf >= 0 || bank_tx > 1)
routing = 0x40; /* loop */
if (debug & DEBUG_HFCMULTI_MODE)
printk(KERN_DEBUG "%s: put channel %d to slot %d bank"
" %d flow %02x routing %02x conf %d (TX)\n",
__func__, ch, slot_tx, bank_tx,
flow_tx, routing, conf);
HFC_outb(hc, R_SLOT, slot_tx << 1);
HFC_outb(hc, A_SL_CFG, (ch << 1) | routing);
if (hc->ctype != HFC_TYPE_XHFC)
HFC_outb(hc, A_CONF,
(conf < 0) ? 0 : (conf | V_CONF_SL));
hc->slot_owner[slot_tx << 1] = ch;
hc->chan[ch].slot_tx = slot_tx;
hc->chan[ch].bank_tx = bank_tx;
}
if (slot_rx < 0) {
/* disable pcm slot */
flow_rx = 0x80; /* ST->FIFO */
hc->chan[ch].slot_rx = -1;
hc->chan[ch].bank_rx = 0;
} else {
/* set pcm slot */
if (hc->chan[ch].txpending)
flow_rx = 0x80; /* ST->FIFO */
else
flow_rx = 0xc0; /* ST->(FIFO,PCM) */
/* put on slot */
routing = bank_rx ? 0x80 : 0xc0; /* reversed */
if (conf >= 0 || bank_rx > 1)
routing = 0x40; /* loop */
if (debug & DEBUG_HFCMULTI_MODE)
printk(KERN_DEBUG "%s: put channel %d to slot %d bank"
" %d flow %02x routing %02x conf %d (RX)\n",
__func__, ch, slot_rx, bank_rx,
flow_rx, routing, conf);
HFC_outb(hc, R_SLOT, (slot_rx << 1) | V_SL_DIR);
HFC_outb(hc, A_SL_CFG, (ch << 1) | V_CH_DIR | routing);
hc->slot_owner[(slot_rx << 1) | 1] = ch;
hc->chan[ch].slot_rx = slot_rx;
hc->chan[ch].bank_rx = bank_rx;
}
switch (protocol) {
case (ISDN_P_NONE):
/* disable TX fifo */
HFC_outb(hc, R_FIFO, ch << 1);
HFC_wait(hc);
HFC_outb(hc, A_CON_HDLC, flow_tx | 0x00 | V_IFF);
HFC_outb(hc, A_SUBCH_CFG, 0);
HFC_outb(hc, A_IRQ_MSK, 0);
HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
HFC_wait(hc);
/* disable RX fifo */
HFC_outb(hc, R_FIFO, (ch << 1) | 1);
HFC_wait(hc);
HFC_outb(hc, A_CON_HDLC, flow_rx | 0x00);
HFC_outb(hc, A_SUBCH_CFG, 0);
HFC_outb(hc, A_IRQ_MSK, 0);
HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
HFC_wait(hc);
if (hc->chan[ch].bch && hc->ctype != HFC_TYPE_E1) {
hc->hw.a_st_ctrl0[hc->chan[ch].port] &=
((ch & 0x3) == 0) ? ~V_B1_EN : ~V_B2_EN;
HFC_outb(hc, R_ST_SEL, hc->chan[ch].port);
/* undocumented: delay after R_ST_SEL */
udelay(1);
HFC_outb(hc, A_ST_CTRL0,
hc->hw.a_st_ctrl0[hc->chan[ch].port]);
}
if (hc->chan[ch].bch) {
test_and_clear_bit(FLG_HDLC, &hc->chan[ch].bch->Flags);
test_and_clear_bit(FLG_TRANSPARENT,
&hc->chan[ch].bch->Flags);
}
break;
case (ISDN_P_B_RAW): /* B-channel */
if (test_bit(HFC_CHIP_B410P, &hc->chip) &&
(hc->chan[ch].slot_rx < 0) &&
(hc->chan[ch].slot_tx < 0)) {
printk(KERN_DEBUG
"Setting B-channel %d to echo cancelable "
"state on PCM slot %d\n", ch,
((ch / 4) * 8) + ((ch % 4) * 4) + 1);
printk(KERN_DEBUG
"Enabling pass through for channel\n");
vpm_out(hc, ch, ((ch / 4) * 8) +
((ch % 4) * 4) + 1, 0x01);
/* rx path */
/* S/T -> PCM */
HFC_outb(hc, R_FIFO, (ch << 1));
HFC_wait(hc);
HFC_outb(hc, A_CON_HDLC, 0xc0 | V_HDLC_TRP | V_IFF);
HFC_outb(hc, R_SLOT, (((ch / 4) * 8) +
((ch % 4) * 4) + 1) << 1);
HFC_outb(hc, A_SL_CFG, 0x80 | (ch << 1));
/* PCM -> FIFO */
HFC_outb(hc, R_FIFO, 0x20 | (ch << 1) | 1);
HFC_wait(hc);
HFC_outb(hc, A_CON_HDLC, 0x20 | V_HDLC_TRP | V_IFF);
HFC_outb(hc, A_SUBCH_CFG, 0);
HFC_outb(hc, A_IRQ_MSK, 0);
if (hc->chan[ch].protocol != protocol) {
HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
HFC_wait(hc);
}
HFC_outb(hc, R_SLOT, ((((ch / 4) * 8) +
((ch % 4) * 4) + 1) << 1) | 1);
HFC_outb(hc, A_SL_CFG, 0x80 | 0x20 | (ch << 1) | 1);
/* tx path */
/* PCM -> S/T */
HFC_outb(hc, R_FIFO, (ch << 1) | 1);
HFC_wait(hc);
HFC_outb(hc, A_CON_HDLC, 0xc0 | V_HDLC_TRP | V_IFF);
HFC_outb(hc, R_SLOT, ((((ch / 4) * 8) +
((ch % 4) * 4)) << 1) | 1);
HFC_outb(hc, A_SL_CFG, 0x80 | 0x40 | (ch << 1) | 1);
/* FIFO -> PCM */
HFC_outb(hc, R_FIFO, 0x20 | (ch << 1));
HFC_wait(hc);
HFC_outb(hc, A_CON_HDLC, 0x20 | V_HDLC_TRP | V_IFF);
HFC_outb(hc, A_SUBCH_CFG, 0);
HFC_outb(hc, A_IRQ_MSK, 0);
if (hc->chan[ch].protocol != protocol) {
HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
HFC_wait(hc);
}
/* tx silence */
HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence);
HFC_outb(hc, R_SLOT, (((ch / 4) * 8) +
((ch % 4) * 4)) << 1);
HFC_outb(hc, A_SL_CFG, 0x80 | 0x20 | (ch << 1));
} else {
/* enable TX fifo */
HFC_outb(hc, R_FIFO, ch << 1);
HFC_wait(hc);
if (hc->ctype == HFC_TYPE_XHFC)
HFC_outb(hc, A_CON_HDLC, flow_tx | 0x07 << 2 |
V_HDLC_TRP | V_IFF);
/* Enable FIFO, no interrupt */
else
HFC_outb(hc, A_CON_HDLC, flow_tx | 0x00 |
V_HDLC_TRP | V_IFF);
HFC_outb(hc, A_SUBCH_CFG, 0);
HFC_outb(hc, A_IRQ_MSK, 0);
if (hc->chan[ch].protocol != protocol) {
HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
HFC_wait(hc);
}
/* tx silence */
HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence);
/* enable RX fifo */
HFC_outb(hc, R_FIFO, (ch << 1) | 1);
HFC_wait(hc);
if (hc->ctype == HFC_TYPE_XHFC)
HFC_outb(hc, A_CON_HDLC, flow_rx | 0x07 << 2 |
V_HDLC_TRP);
/* Enable FIFO, no interrupt*/
else
HFC_outb(hc, A_CON_HDLC, flow_rx | 0x00 |
V_HDLC_TRP);
HFC_outb(hc, A_SUBCH_CFG, 0);
HFC_outb(hc, A_IRQ_MSK, 0);
if (hc->chan[ch].protocol != protocol) {
HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
HFC_wait(hc);
}
}
if (hc->ctype != HFC_TYPE_E1) {
hc->hw.a_st_ctrl0[hc->chan[ch].port] |=
((ch & 0x3) == 0) ? V_B1_EN : V_B2_EN;
HFC_outb(hc, R_ST_SEL, hc->chan[ch].port);
/* undocumented: delay after R_ST_SEL */
udelay(1);
HFC_outb(hc, A_ST_CTRL0,
hc->hw.a_st_ctrl0[hc->chan[ch].port]);
}
if (hc->chan[ch].bch)
test_and_set_bit(FLG_TRANSPARENT,
&hc->chan[ch].bch->Flags);
break;
case (ISDN_P_B_HDLC): /* B-channel */
case (ISDN_P_TE_S0): /* D-channel */
case (ISDN_P_NT_S0):
case (ISDN_P_TE_E1):
case (ISDN_P_NT_E1):
/* enable TX fifo */
HFC_outb(hc, R_FIFO, ch << 1);
HFC_wait(hc);
if (hc->ctype == HFC_TYPE_E1 || hc->chan[ch].bch) {
/* E1 or B-channel */
HFC_outb(hc, A_CON_HDLC, flow_tx | 0x04);
HFC_outb(hc, A_SUBCH_CFG, 0);
} else {
/* D-Channel without HDLC fill flags */
HFC_outb(hc, A_CON_HDLC, flow_tx | 0x04 | V_IFF);
HFC_outb(hc, A_SUBCH_CFG, 2);
}
HFC_outb(hc, A_IRQ_MSK, V_IRQ);
HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
HFC_wait(hc);
/* enable RX fifo */
HFC_outb(hc, R_FIFO, (ch << 1) | 1);
HFC_wait(hc);
HFC_outb(hc, A_CON_HDLC, flow_rx | 0x04);
if (hc->ctype == HFC_TYPE_E1 || hc->chan[ch].bch)
HFC_outb(hc, A_SUBCH_CFG, 0); /* full 8 bits */
else
HFC_outb(hc, A_SUBCH_CFG, 2); /* 2 bits dchannel */
HFC_outb(hc, A_IRQ_MSK, V_IRQ);
HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
HFC_wait(hc);
if (hc->chan[ch].bch) {
test_and_set_bit(FLG_HDLC, &hc->chan[ch].bch->Flags);
if (hc->ctype != HFC_TYPE_E1) {
hc->hw.a_st_ctrl0[hc->chan[ch].port] |=
((ch & 0x3) == 0) ? V_B1_EN : V_B2_EN;
HFC_outb(hc, R_ST_SEL, hc->chan[ch].port);
/* undocumented: delay after R_ST_SEL */
udelay(1);
HFC_outb(hc, A_ST_CTRL0,
hc->hw.a_st_ctrl0[hc->chan[ch].port]);
}
}
break;
default:
printk(KERN_DEBUG "%s: protocol not known %x\n",
__func__, protocol);
hc->chan[ch].protocol = ISDN_P_NONE;
return -ENOPROTOOPT;
}
hc->chan[ch].protocol = protocol;
return 0;
}
/*
* connect/disconnect PCM
*/
static void
hfcmulti_pcm(struct hfc_multi *hc, int ch, int slot_tx, int bank_tx,
int slot_rx, int bank_rx)
{
if (slot_tx < 0 || slot_rx < 0 || bank_tx < 0 || bank_rx < 0) {
/* disable PCM */
mode_hfcmulti(hc, ch, hc->chan[ch].protocol, -1, 0, -1, 0);
return;
}
/* enable pcm */
mode_hfcmulti(hc, ch, hc->chan[ch].protocol, slot_tx, bank_tx,
slot_rx, bank_rx);
}
/*
* set/disable conference
*/
static void
hfcmulti_conf(struct hfc_multi *hc, int ch, int num)
{
if (num >= 0 && num <= 7)
hc->chan[ch].conf = num;
else
hc->chan[ch].conf = -1;
mode_hfcmulti(hc, ch, hc->chan[ch].protocol, hc->chan[ch].slot_tx,
hc->chan[ch].bank_tx, hc->chan[ch].slot_rx,
hc->chan[ch].bank_rx);
}
/*
* set/disable sample loop
*/
/* NOTE: this function is experimental and therefore disabled */
/*
* Layer 1 callback function
*/
static int
hfcm_l1callback(struct dchannel *dch, u_int cmd)
{
struct hfc_multi *hc = dch->hw;
struct sk_buff_head free_queue;
u_long flags;
switch (cmd) {
case INFO3_P8:
case INFO3_P10:
break;
case HW_RESET_REQ:
/* start activation */
spin_lock_irqsave(&hc->lock, flags);
if (hc->ctype == HFC_TYPE_E1) {
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG
"%s: HW_RESET_REQ no BRI\n",
__func__);
} else {
HFC_outb(hc, R_ST_SEL, hc->chan[dch->slot].port);
/* undocumented: delay after R_ST_SEL */
udelay(1);
HFC_outb(hc, A_ST_WR_STATE, V_ST_LD_STA | 3); /* F3 */
udelay(6); /* wait at least 5,21us */
HFC_outb(hc, A_ST_WR_STATE, 3);
HFC_outb(hc, A_ST_WR_STATE, 3 | (V_ST_ACT * 3));
/* activate */
}
spin_unlock_irqrestore(&hc->lock, flags);
l1_event(dch->l1, HW_POWERUP_IND);
break;
case HW_DEACT_REQ:
__skb_queue_head_init(&free_queue);
/* start deactivation */
spin_lock_irqsave(&hc->lock, flags);
if (hc->ctype == HFC_TYPE_E1) {
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG
"%s: HW_DEACT_REQ no BRI\n",
__func__);
} else {
HFC_outb(hc, R_ST_SEL, hc->chan[dch->slot].port);
/* undocumented: delay after R_ST_SEL */
udelay(1);
HFC_outb(hc, A_ST_WR_STATE, V_ST_ACT * 2);
/* deactivate */
if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
hc->syncronized &=
~(1 << hc->chan[dch->slot].port);
plxsd_checksync(hc, 0);
}
}
skb_queue_splice_init(&dch->squeue, &free_queue);
if (dch->tx_skb) {
__skb_queue_tail(&free_queue, dch->tx_skb);
dch->tx_skb = NULL;
}
dch->tx_idx = 0;
if (dch->rx_skb) {
__skb_queue_tail(&free_queue, dch->rx_skb);
dch->rx_skb = NULL;
}
test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
del_timer(&dch->timer);
spin_unlock_irqrestore(&hc->lock, flags);
__skb_queue_purge(&free_queue);
break;
case HW_POWERUP_REQ:
spin_lock_irqsave(&hc->lock, flags);
if (hc->ctype == HFC_TYPE_E1) {
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG
"%s: HW_POWERUP_REQ no BRI\n",
__func__);
} else {
HFC_outb(hc, R_ST_SEL, hc->chan[dch->slot].port);
/* undocumented: delay after R_ST_SEL */
udelay(1);
HFC_outb(hc, A_ST_WR_STATE, 3 | 0x10); /* activate */
udelay(6); /* wait at least 5,21us */
HFC_outb(hc, A_ST_WR_STATE, 3); /* activate */
}
spin_unlock_irqrestore(&hc->lock, flags);
break;
case PH_ACTIVATE_IND:
test_and_set_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
GFP_ATOMIC);
break;
case PH_DEACTIVATE_IND:
test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
GFP_ATOMIC);
break;
default:
if (dch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: unknown command %x\n",
__func__, cmd);
return -1;
}
return 0;
}
/*
* Layer2 -> Layer 1 Transfer
*/
static int
handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
struct dchannel *dch = container_of(dev, struct dchannel, dev);
struct hfc_multi *hc = dch->hw;
struct mISDNhead *hh = mISDN_HEAD_P(skb);
int ret = -EINVAL;
unsigned int id;
u_long flags;
switch (hh->prim) {
case PH_DATA_REQ:
if (skb->len < 1)
break;
spin_lock_irqsave(&hc->lock, flags);
ret = dchannel_senddata(dch, skb);
if (ret > 0) { /* direct TX */
id = hh->id; /* skb can be freed */
hfcmulti_tx(hc, dch->slot);
ret = 0;
/* start fifo */
HFC_outb(hc, R_FIFO, 0);
HFC_wait(hc);
spin_unlock_irqrestore(&hc->lock, flags);
queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
} else
spin_unlock_irqrestore(&hc->lock, flags);
return ret;
case PH_ACTIVATE_REQ:
if (dch->dev.D.protocol != ISDN_P_TE_S0) {
spin_lock_irqsave(&hc->lock, flags);
ret = 0;
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG
"%s: PH_ACTIVATE port %d (0..%d)\n",
__func__, hc->chan[dch->slot].port,
hc->ports - 1);
/* start activation */
if (hc->ctype == HFC_TYPE_E1) {
ph_state_change(dch);
if (debug & DEBUG_HFCMULTI_STATE)
printk(KERN_DEBUG
"%s: E1 report state %x \n",
__func__, dch->state);
} else {
HFC_outb(hc, R_ST_SEL,
hc->chan[dch->slot].port);
/* undocumented: delay after R_ST_SEL */
udelay(1);
HFC_outb(hc, A_ST_WR_STATE, V_ST_LD_STA | 1);
/* G1 */
udelay(6); /* wait at least 5,21us */
HFC_outb(hc, A_ST_WR_STATE, 1);
HFC_outb(hc, A_ST_WR_STATE, 1 |
(V_ST_ACT * 3)); /* activate */
dch->state = 1;
}
spin_unlock_irqrestore(&hc->lock, flags);
} else
ret = l1_event(dch->l1, hh->prim);
break;
case PH_DEACTIVATE_REQ:
test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
if (dch->dev.D.protocol != ISDN_P_TE_S0) {
struct sk_buff_head free_queue;
__skb_queue_head_init(&free_queue);
spin_lock_irqsave(&hc->lock, flags);
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG
"%s: PH_DEACTIVATE port %d (0..%d)\n",
__func__, hc->chan[dch->slot].port,
hc->ports - 1);
/* start deactivation */
if (hc->ctype == HFC_TYPE_E1) {
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG
"%s: PH_DEACTIVATE no BRI\n",
__func__);
} else {
HFC_outb(hc, R_ST_SEL,
hc->chan[dch->slot].port);
/* undocumented: delay after R_ST_SEL */
udelay(1);
HFC_outb(hc, A_ST_WR_STATE, V_ST_ACT * 2);
/* deactivate */
dch->state = 1;
}
skb_queue_splice_init(&dch->squeue, &free_queue);
if (dch->tx_skb) {
__skb_queue_tail(&free_queue, dch->tx_skb);
dch->tx_skb = NULL;
}
dch->tx_idx = 0;
if (dch->rx_skb) {
__skb_queue_tail(&free_queue, dch->rx_skb);
dch->rx_skb = NULL;
}
test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
del_timer(&dch->timer);
#ifdef FIXME
if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags))
dchannel_sched_event(&hc->dch, D_CLEARBUSY);
#endif
ret = 0;
spin_unlock_irqrestore(&hc->lock, flags);
__skb_queue_purge(&free_queue);
} else
ret = l1_event(dch->l1, hh->prim);
break;
}
if (!ret)
dev_kfree_skb(skb);
return ret;
}
static void
deactivate_bchannel(struct bchannel *bch)
{
struct hfc_multi *hc = bch->hw;
u_long flags;
spin_lock_irqsave(&hc->lock, flags);
mISDN_clear_bchannel(bch);
hc->chan[bch->slot].coeff_count = 0;
hc->chan[bch->slot].rx_off = 0;
hc->chan[bch->slot].conf = -1;
mode_hfcmulti(hc, bch->slot, ISDN_P_NONE, -1, 0, -1, 0);
spin_unlock_irqrestore(&hc->lock, flags);
}
static int
handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct bchannel *bch = container_of(ch, struct bchannel, ch);
struct hfc_multi *hc = bch->hw;
int ret = -EINVAL;
struct mISDNhead *hh = mISDN_HEAD_P(skb);
unsigned long flags;
switch (hh->prim) {
case PH_DATA_REQ:
if (!skb->len)
break;
spin_lock_irqsave(&hc->lock, flags);
ret = bchannel_senddata(bch, skb);
if (ret > 0) { /* direct TX */
hfcmulti_tx(hc, bch->slot);
ret = 0;
/* start fifo */
HFC_outb_nodebug(hc, R_FIFO, 0);
HFC_wait_nodebug(hc);
}
spin_unlock_irqrestore(&hc->lock, flags);
return ret;
case PH_ACTIVATE_REQ:
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG "%s: PH_ACTIVATE ch %d (0..32)\n",
__func__, bch->slot);
spin_lock_irqsave(&hc->lock, flags);
/* activate B-channel if not already activated */
if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags)) {
hc->chan[bch->slot].txpending = 0;
ret = mode_hfcmulti(hc, bch->slot,
ch->protocol,
hc->chan[bch->slot].slot_tx,
hc->chan[bch->slot].bank_tx,
hc->chan[bch->slot].slot_rx,
hc->chan[bch->slot].bank_rx);
if (!ret) {
if (ch->protocol == ISDN_P_B_RAW && !hc->dtmf
&& test_bit(HFC_CHIP_DTMF, &hc->chip)) {
/* start decoder */
hc->dtmf = 1;
if (debug & DEBUG_HFCMULTI_DTMF)
printk(KERN_DEBUG
"%s: start dtmf decoder\n",
__func__);
HFC_outb(hc, R_DTMF, hc->hw.r_dtmf |
V_RST_DTMF);
}
}
} else
ret = 0;
spin_unlock_irqrestore(&hc->lock, flags);
if (!ret)
_queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0, NULL,
GFP_KERNEL);
break;
case PH_CONTROL_REQ:
spin_lock_irqsave(&hc->lock, flags);
switch (hh->id) {
case HFC_SPL_LOOP_ON: /* set sample loop */
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG
"%s: HFC_SPL_LOOP_ON (len = %d)\n",
__func__, skb->len);
ret = 0;
break;
case HFC_SPL_LOOP_OFF: /* set silence */
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG "%s: HFC_SPL_LOOP_OFF\n",
__func__);
ret = 0;
break;
default:
printk(KERN_ERR
"%s: unknown PH_CONTROL_REQ info %x\n",
__func__, hh->id);
ret = -EINVAL;
}
spin_unlock_irqrestore(&hc->lock, flags);
break;
case PH_DEACTIVATE_REQ:
deactivate_bchannel(bch); /* locked there */
_queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0, NULL,
GFP_KERNEL);
ret = 0;
break;
}
if (!ret)
dev_kfree_skb(skb);
return ret;
}
/*
* bchannel control function
*/
static int
channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
{
int ret = 0;
struct dsp_features *features =
(struct dsp_features *)(*((u_long *)&cq->p1));
struct hfc_multi *hc = bch->hw;
int slot_tx;
int bank_tx;
int slot_rx;
int bank_rx;
int num;
switch (cq->op) {
case MISDN_CTRL_GETOP:
ret = mISDN_ctrl_bchannel(bch, cq);
cq->op |= MISDN_CTRL_HFC_OP | MISDN_CTRL_HW_FEATURES_OP;
break;
case MISDN_CTRL_RX_OFF: /* turn off / on rx stream */
ret = mISDN_ctrl_bchannel(bch, cq);
hc->chan[bch->slot].rx_off = !!cq->p1;
if (!hc->chan[bch->slot].rx_off) {
/* reset fifo on rx on */
HFC_outb_nodebug(hc, R_FIFO, (bch->slot << 1) | 1);
HFC_wait_nodebug(hc);
HFC_outb_nodebug(hc, R_INC_RES_FIFO, V_RES_F);
HFC_wait_nodebug(hc);
}
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG "%s: RX_OFF request (nr=%d off=%d)\n",
__func__, bch->nr, hc->chan[bch->slot].rx_off);
break;
case MISDN_CTRL_FILL_EMPTY:
ret = mISDN_ctrl_bchannel(bch, cq);
hc->silence = bch->fill[0];
memset(hc->silence_data, hc->silence, sizeof(hc->silence_data));
break;
case MISDN_CTRL_HW_FEATURES: /* fill features structure */
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG "%s: HW_FEATURE request\n",
__func__);
/* create confirm */
features->hfc_id = hc->id;
if (test_bit(HFC_CHIP_DTMF, &hc->chip))
features->hfc_dtmf = 1;
if (test_bit(HFC_CHIP_CONF, &hc->chip))
features->hfc_conf = 1;
features->hfc_loops = 0;
if (test_bit(HFC_CHIP_B410P, &hc->chip)) {
features->hfc_echocanhw = 1;
} else {
features->pcm_id = hc->pcm;
features->pcm_slots = hc->slots;
features->pcm_banks = 2;
}
break;
case MISDN_CTRL_HFC_PCM_CONN: /* connect to pcm timeslot (0..N) */
slot_tx = cq->p1 & 0xff;
bank_tx = cq->p1 >> 8;
slot_rx = cq->p2 & 0xff;
bank_rx = cq->p2 >> 8;
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG
"%s: HFC_PCM_CONN slot %d bank %d (TX) "
"slot %d bank %d (RX)\n",
__func__, slot_tx, bank_tx,
slot_rx, bank_rx);
if (slot_tx < hc->slots && bank_tx <= 2 &&
slot_rx < hc->slots && bank_rx <= 2)
hfcmulti_pcm(hc, bch->slot,
slot_tx, bank_tx, slot_rx, bank_rx);
else {
printk(KERN_WARNING
"%s: HFC_PCM_CONN slot %d bank %d (TX) "
"slot %d bank %d (RX) out of range\n",
__func__, slot_tx, bank_tx,
slot_rx, bank_rx);
ret = -EINVAL;
}
break;
case MISDN_CTRL_HFC_PCM_DISC: /* release interface from pcm timeslot */
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG "%s: HFC_PCM_DISC\n",
__func__);
hfcmulti_pcm(hc, bch->slot, -1, 0, -1, 0);
break;
case MISDN_CTRL_HFC_CONF_JOIN: /* join conference (0..7) */
num = cq->p1 & 0xff;
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG "%s: HFC_CONF_JOIN conf %d\n",
__func__, num);
if (num <= 7)
hfcmulti_conf(hc, bch->slot, num);
else {
printk(KERN_WARNING
"%s: HW_CONF_JOIN conf %d out of range\n",
__func__, num);
ret = -EINVAL;
}
break;
case MISDN_CTRL_HFC_CONF_SPLIT: /* split conference */
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG "%s: HFC_CONF_SPLIT\n", __func__);
hfcmulti_conf(hc, bch->slot, -1);
break;
case MISDN_CTRL_HFC_ECHOCAN_ON:
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG "%s: HFC_ECHOCAN_ON\n", __func__);
if (test_bit(HFC_CHIP_B410P, &hc->chip))
vpm_echocan_on(hc, bch->slot, cq->p1);
else
ret = -EINVAL;
break;
case MISDN_CTRL_HFC_ECHOCAN_OFF:
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG "%s: HFC_ECHOCAN_OFF\n",
__func__);
if (test_bit(HFC_CHIP_B410P, &hc->chip))
vpm_echocan_off(hc, bch->slot);
else
ret = -EINVAL;
break;
default:
ret = mISDN_ctrl_bchannel(bch, cq);
break;
}
return ret;
}
static int
hfcm_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
{
struct bchannel *bch = container_of(ch, struct bchannel, ch);
struct hfc_multi *hc = bch->hw;
int err = -EINVAL;
u_long flags;
if (bch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: cmd:%x %p\n",
__func__, cmd, arg);
switch (cmd) {
case CLOSE_CHANNEL:
test_and_clear_bit(FLG_OPEN, &bch->Flags);
deactivate_bchannel(bch); /* locked there */
ch->protocol = ISDN_P_NONE;
ch->peer = NULL;
module_put(THIS_MODULE);
err = 0;
break;
case CONTROL_CHANNEL:
spin_lock_irqsave(&hc->lock, flags);
err = channel_bctrl(bch, arg);
spin_unlock_irqrestore(&hc->lock, flags);
break;
default:
printk(KERN_WARNING "%s: unknown prim(%x)\n",
__func__, cmd);
}
return err;
}
/*
* handle D-channel events
*
* handle state change event
*/
static void
ph_state_change(struct dchannel *dch)
{
struct hfc_multi *hc;
int ch, i;
if (!dch) {
printk(KERN_WARNING "%s: ERROR given dch is NULL\n", __func__);
return;
}
hc = dch->hw;
ch = dch->slot;
if (hc->ctype == HFC_TYPE_E1) {
if (dch->dev.D.protocol == ISDN_P_TE_E1) {
if (debug & DEBUG_HFCMULTI_STATE)
printk(KERN_DEBUG
"%s: E1 TE (id=%d) newstate %x\n",
__func__, hc->id, dch->state);
} else {
if (debug & DEBUG_HFCMULTI_STATE)
printk(KERN_DEBUG
"%s: E1 NT (id=%d) newstate %x\n",
__func__, hc->id, dch->state);
}
switch (dch->state) {
case (1):
if (hc->e1_state != 1) {
for (i = 1; i <= 31; i++) {
/* reset fifos on e1 activation */
HFC_outb_nodebug(hc, R_FIFO,
(i << 1) | 1);
HFC_wait_nodebug(hc);
HFC_outb_nodebug(hc, R_INC_RES_FIFO,
V_RES_F);
HFC_wait_nodebug(hc);
}
}
test_and_set_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, PH_ACTIVATE_IND,
MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
break;
default:
if (hc->e1_state != 1)
return;
test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, PH_DEACTIVATE_IND,
MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
}
hc->e1_state = dch->state;
} else {
if (dch->dev.D.protocol == ISDN_P_TE_S0) {
if (debug & DEBUG_HFCMULTI_STATE)
printk(KERN_DEBUG
"%s: S/T TE newstate %x\n",
__func__, dch->state);
switch (dch->state) {
case (0):
l1_event(dch->l1, HW_RESET_IND);
break;
case (3):
l1_event(dch->l1, HW_DEACT_IND);
break;
case (5):
case (8):
l1_event(dch->l1, ANYSIGNAL);
break;
case (6):
l1_event(dch->l1, INFO2);
break;
case (7):
l1_event(dch->l1, INFO4_P8);
break;
}
} else {
if (debug & DEBUG_HFCMULTI_STATE)
printk(KERN_DEBUG "%s: S/T NT newstate %x\n",
__func__, dch->state);
switch (dch->state) {
case (2):
if (hc->chan[ch].nt_timer == 0) {
hc->chan[ch].nt_timer = -1;
HFC_outb(hc, R_ST_SEL,
hc->chan[ch].port);
/* undocumented: delay after R_ST_SEL */
udelay(1);
HFC_outb(hc, A_ST_WR_STATE, 4 |
V_ST_LD_STA); /* G4 */
udelay(6); /* wait at least 5,21us */
HFC_outb(hc, A_ST_WR_STATE, 4);
dch->state = 4;
} else {
/* one extra count for the next event */
hc->chan[ch].nt_timer =
nt_t1_count[poll_timer] + 1;
HFC_outb(hc, R_ST_SEL,
hc->chan[ch].port);
/* undocumented: delay after R_ST_SEL */
udelay(1);
/* allow G2 -> G3 transition */
HFC_outb(hc, A_ST_WR_STATE, 2 |
V_SET_G2_G3);
}
break;
case (1):
hc->chan[ch].nt_timer = -1;
test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, PH_DEACTIVATE_IND,
MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
break;
case (4):
hc->chan[ch].nt_timer = -1;
break;
case (3):
hc->chan[ch].nt_timer = -1;
test_and_set_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, PH_ACTIVATE_IND,
MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
break;
}
}
}
}
/*
* called for card mode init message
*/
static void
hfcmulti_initmode(struct dchannel *dch)
{
struct hfc_multi *hc = dch->hw;
u_char a_st_wr_state, r_e1_wr_sta;
int i, pt;
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: entered\n", __func__);
i = dch->slot;
pt = hc->chan[i].port;
if (hc->ctype == HFC_TYPE_E1) {
/* E1 */
hc->chan[hc->dnum[pt]].slot_tx = -1;
hc->chan[hc->dnum[pt]].slot_rx = -1;
hc->chan[hc->dnum[pt]].conf = -1;
if (hc->dnum[pt]) {
mode_hfcmulti(hc, dch->slot, dch->dev.D.protocol,
-1, 0, -1, 0);
timer_setup(&dch->timer, hfcmulti_dbusy_timer, 0);
}
for (i = 1; i <= 31; i++) {
if (!((1 << i) & hc->bmask[pt])) /* skip unused chan */
continue;
hc->chan[i].slot_tx = -1;
hc->chan[i].slot_rx = -1;
hc->chan[i].conf = -1;
mode_hfcmulti(hc, i, ISDN_P_NONE, -1, 0, -1, 0);
}
}
if (hc->ctype == HFC_TYPE_E1 && pt == 0) {
/* E1, port 0 */
dch = hc->chan[hc->dnum[0]].dch;
if (test_bit(HFC_CFG_REPORT_LOS, &hc->chan[hc->dnum[0]].cfg)) {
HFC_outb(hc, R_LOS0, 255); /* 2 ms */
HFC_outb(hc, R_LOS1, 255); /* 512 ms */
}
if (test_bit(HFC_CFG_OPTICAL, &hc->chan[hc->dnum[0]].cfg)) {
HFC_outb(hc, R_RX0, 0);
hc->hw.r_tx0 = 0 | V_OUT_EN;
} else {
HFC_outb(hc, R_RX0, 1);
hc->hw.r_tx0 = 1 | V_OUT_EN;
}
hc->hw.r_tx1 = V_ATX | V_NTRI;
HFC_outb(hc, R_TX0, hc->hw.r_tx0);
HFC_outb(hc, R_TX1, hc->hw.r_tx1);
HFC_outb(hc, R_TX_FR0, 0x00);
HFC_outb(hc, R_TX_FR1, 0xf8);
if (test_bit(HFC_CFG_CRC4, &hc->chan[hc->dnum[0]].cfg))
HFC_outb(hc, R_TX_FR2, V_TX_MF | V_TX_E | V_NEG_E);
HFC_outb(hc, R_RX_FR0, V_AUTO_RESYNC | V_AUTO_RECO | 0);
if (test_bit(HFC_CFG_CRC4, &hc->chan[hc->dnum[0]].cfg))
HFC_outb(hc, R_RX_FR1, V_RX_MF | V_RX_MF_SYNC);
if (dch->dev.D.protocol == ISDN_P_NT_E1) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: E1 port is NT-mode\n",
__func__);
r_e1_wr_sta = 0; /* G0 */
hc->e1_getclock = 0;
} else {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: E1 port is TE-mode\n",
__func__);
r_e1_wr_sta = 0; /* F0 */
hc->e1_getclock = 1;
}
if (test_bit(HFC_CHIP_RX_SYNC, &hc->chip))
HFC_outb(hc, R_SYNC_OUT, V_SYNC_E1_RX);
else
HFC_outb(hc, R_SYNC_OUT, 0);
if (test_bit(HFC_CHIP_E1CLOCK_GET, &hc->chip))
hc->e1_getclock = 1;
if (test_bit(HFC_CHIP_E1CLOCK_PUT, &hc->chip))
hc->e1_getclock = 0;
if (test_bit(HFC_CHIP_PCM_SLAVE, &hc->chip)) {
/* SLAVE (clock master) */
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG
"%s: E1 port is clock master "
"(clock from PCM)\n", __func__);
HFC_outb(hc, R_SYNC_CTRL, V_EXT_CLK_SYNC | V_PCM_SYNC);
} else {
if (hc->e1_getclock) {
/* MASTER (clock slave) */
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG
"%s: E1 port is clock slave "
"(clock to PCM)\n", __func__);
HFC_outb(hc, R_SYNC_CTRL, V_SYNC_OFFS);
} else {
/* MASTER (clock master) */
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: E1 port is "
"clock master "
"(clock from QUARTZ)\n",
__func__);
HFC_outb(hc, R_SYNC_CTRL, V_EXT_CLK_SYNC |
V_PCM_SYNC | V_JATT_OFF);
HFC_outb(hc, R_SYNC_OUT, 0);
}
}
HFC_outb(hc, R_JATT_ATT, 0x9c); /* undoc register */
HFC_outb(hc, R_PWM_MD, V_PWM0_MD);
HFC_outb(hc, R_PWM0, 0x50);
HFC_outb(hc, R_PWM1, 0xff);
/* state machine setup */
HFC_outb(hc, R_E1_WR_STA, r_e1_wr_sta | V_E1_LD_STA);
udelay(6); /* wait at least 5,21us */
HFC_outb(hc, R_E1_WR_STA, r_e1_wr_sta);
if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
hc->syncronized = 0;
plxsd_checksync(hc, 0);
}
}
if (hc->ctype != HFC_TYPE_E1) {
/* ST */
hc->chan[i].slot_tx = -1;
hc->chan[i].slot_rx = -1;
hc->chan[i].conf = -1;
mode_hfcmulti(hc, i, dch->dev.D.protocol, -1, 0, -1, 0);
timer_setup(&dch->timer, hfcmulti_dbusy_timer, 0);
hc->chan[i - 2].slot_tx = -1;
hc->chan[i - 2].slot_rx = -1;
hc->chan[i - 2].conf = -1;
mode_hfcmulti(hc, i - 2, ISDN_P_NONE, -1, 0, -1, 0);
hc->chan[i - 1].slot_tx = -1;
hc->chan[i - 1].slot_rx = -1;
hc->chan[i - 1].conf = -1;
mode_hfcmulti(hc, i - 1, ISDN_P_NONE, -1, 0, -1, 0);
/* select interface */
HFC_outb(hc, R_ST_SEL, pt);
/* undocumented: delay after R_ST_SEL */
udelay(1);
if (dch->dev.D.protocol == ISDN_P_NT_S0) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG
"%s: ST port %d is NT-mode\n",
__func__, pt);
/* clock delay */
HFC_outb(hc, A_ST_CLK_DLY, clockdelay_nt);
a_st_wr_state = 1; /* G1 */
hc->hw.a_st_ctrl0[pt] = V_ST_MD;
} else {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG
"%s: ST port %d is TE-mode\n",
__func__, pt);
/* clock delay */
HFC_outb(hc, A_ST_CLK_DLY, clockdelay_te);
a_st_wr_state = 2; /* F2 */
hc->hw.a_st_ctrl0[pt] = 0;
}
if (!test_bit(HFC_CFG_NONCAP_TX, &hc->chan[i].cfg))
hc->hw.a_st_ctrl0[pt] |= V_TX_LI;
if (hc->ctype == HFC_TYPE_XHFC) {
hc->hw.a_st_ctrl0[pt] |= 0x40 /* V_ST_PU_CTRL */;
HFC_outb(hc, 0x35 /* A_ST_CTRL3 */,
0x7c << 1 /* V_ST_PULSE */);
}
/* line setup */
HFC_outb(hc, A_ST_CTRL0, hc->hw.a_st_ctrl0[pt]);
/* disable E-channel */
if ((dch->dev.D.protocol == ISDN_P_NT_S0) ||
test_bit(HFC_CFG_DIS_ECHANNEL, &hc->chan[i].cfg))
HFC_outb(hc, A_ST_CTRL1, V_E_IGNO);
else
HFC_outb(hc, A_ST_CTRL1, 0);
/* enable B-channel receive */
HFC_outb(hc, A_ST_CTRL2, V_B1_RX_EN | V_B2_RX_EN);
/* state machine setup */
HFC_outb(hc, A_ST_WR_STATE, a_st_wr_state | V_ST_LD_STA);
udelay(6); /* wait at least 5,21us */
HFC_outb(hc, A_ST_WR_STATE, a_st_wr_state);
hc->hw.r_sci_msk |= 1 << pt;
/* state machine interrupts */
HFC_outb(hc, R_SCI_MSK, hc->hw.r_sci_msk);
/* unset sync on port */
if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
hc->syncronized &=
~(1 << hc->chan[dch->slot].port);
plxsd_checksync(hc, 0);
}
}
if (debug & DEBUG_HFCMULTI_INIT)
printk("%s: done\n", __func__);
}
static int
open_dchannel(struct hfc_multi *hc, struct dchannel *dch,
struct channel_req *rq)
{
int err = 0;
u_long flags;
if (debug & DEBUG_HW_OPEN)
printk(KERN_DEBUG "%s: dev(%d) open from %p\n", __func__,
dch->dev.id, __builtin_return_address(0));
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
if ((dch->dev.D.protocol != ISDN_P_NONE) &&
(dch->dev.D.protocol != rq->protocol)) {
if (debug & DEBUG_HFCMULTI_MODE)
printk(KERN_DEBUG "%s: change protocol %x to %x\n",
__func__, dch->dev.D.protocol, rq->protocol);
}
if ((dch->dev.D.protocol == ISDN_P_TE_S0) &&
(rq->protocol != ISDN_P_TE_S0))
l1_event(dch->l1, CLOSE_CHANNEL);
if (dch->dev.D.protocol != rq->protocol) {
if (rq->protocol == ISDN_P_TE_S0) {
err = create_l1(dch, hfcm_l1callback);
if (err)
return err;
}
dch->dev.D.protocol = rq->protocol;
spin_lock_irqsave(&hc->lock, flags);
hfcmulti_initmode(dch);
spin_unlock_irqrestore(&hc->lock, flags);
}
if (test_bit(FLG_ACTIVE, &dch->Flags))
_queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY,
0, NULL, GFP_KERNEL);
rq->ch = &dch->dev.D;
if (!try_module_get(THIS_MODULE))
printk(KERN_WARNING "%s:cannot get module\n", __func__);
return 0;
}
static int
open_bchannel(struct hfc_multi *hc, struct dchannel *dch,
struct channel_req *rq)
{
struct bchannel *bch;
int ch;
if (!test_channelmap(rq->adr.channel, dch->dev.channelmap))
return -EINVAL;
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
if (hc->ctype == HFC_TYPE_E1)
ch = rq->adr.channel;
else
ch = (rq->adr.channel - 1) + (dch->slot - 2);
bch = hc->chan[ch].bch;
if (!bch) {
printk(KERN_ERR "%s:internal error ch %d has no bch\n",
__func__, ch);
return -EINVAL;
}
if (test_and_set_bit(FLG_OPEN, &bch->Flags))
return -EBUSY; /* b-channel can be only open once */
bch->ch.protocol = rq->protocol;
hc->chan[ch].rx_off = 0;
rq->ch = &bch->ch;
if (!try_module_get(THIS_MODULE))
printk(KERN_WARNING "%s:cannot get module\n", __func__);
return 0;
}
/*
* device control function
*/
static int
channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq)
{
struct hfc_multi *hc = dch->hw;
int ret = 0;
int wd_mode, wd_cnt;
switch (cq->op) {
case MISDN_CTRL_GETOP:
cq->op = MISDN_CTRL_HFC_OP | MISDN_CTRL_L1_TIMER3;
break;
case MISDN_CTRL_HFC_WD_INIT: /* init the watchdog */
wd_cnt = cq->p1 & 0xf;
wd_mode = !!(cq->p1 >> 4);
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG "%s: MISDN_CTRL_HFC_WD_INIT mode %s"
", counter 0x%x\n", __func__,
wd_mode ? "AUTO" : "MANUAL", wd_cnt);
/* set the watchdog timer */
HFC_outb(hc, R_TI_WD, poll_timer | (wd_cnt << 4));
hc->hw.r_bert_wd_md = (wd_mode ? V_AUTO_WD_RES : 0);
if (hc->ctype == HFC_TYPE_XHFC)
hc->hw.r_bert_wd_md |= 0x40 /* V_WD_EN */;
/* init the watchdog register and reset the counter */
HFC_outb(hc, R_BERT_WD_MD, hc->hw.r_bert_wd_md | V_WD_RES);
if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
/* enable the watchdog output for Speech-Design */
HFC_outb(hc, R_GPIO_SEL, V_GPIO_SEL7);
HFC_outb(hc, R_GPIO_EN1, V_GPIO_EN15);
HFC_outb(hc, R_GPIO_OUT1, 0);
HFC_outb(hc, R_GPIO_OUT1, V_GPIO_OUT15);
}
break;
case MISDN_CTRL_HFC_WD_RESET: /* reset the watchdog counter */
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG "%s: MISDN_CTRL_HFC_WD_RESET\n",
__func__);
HFC_outb(hc, R_BERT_WD_MD, hc->hw.r_bert_wd_md | V_WD_RES);
break;
case MISDN_CTRL_L1_TIMER3:
ret = l1_event(dch->l1, HW_TIMER3_VALUE | (cq->p1 & 0xff));
break;
default:
printk(KERN_WARNING "%s: unknown Op %x\n",
__func__, cq->op);
ret = -EINVAL;
break;
}
return ret;
}
static int
hfcm_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
{
struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
struct dchannel *dch = container_of(dev, struct dchannel, dev);
struct hfc_multi *hc = dch->hw;
struct channel_req *rq;
int err = 0;
u_long flags;
if (dch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: cmd:%x %p\n",
__func__, cmd, arg);
switch (cmd) {
case OPEN_CHANNEL:
rq = arg;
switch (rq->protocol) {
case ISDN_P_TE_S0:
case ISDN_P_NT_S0:
if (hc->ctype == HFC_TYPE_E1) {
err = -EINVAL;
break;
}
err = open_dchannel(hc, dch, rq); /* locked there */
break;
case ISDN_P_TE_E1:
case ISDN_P_NT_E1:
if (hc->ctype != HFC_TYPE_E1) {
err = -EINVAL;
break;
}
err = open_dchannel(hc, dch, rq); /* locked there */
break;
default:
spin_lock_irqsave(&hc->lock, flags);
err = open_bchannel(hc, dch, rq);
spin_unlock_irqrestore(&hc->lock, flags);
}
break;
case CLOSE_CHANNEL:
if (debug & DEBUG_HW_OPEN)
printk(KERN_DEBUG "%s: dev(%d) close from %p\n",
__func__, dch->dev.id,
__builtin_return_address(0));
module_put(THIS_MODULE);
break;
case CONTROL_CHANNEL:
spin_lock_irqsave(&hc->lock, flags);
err = channel_dctrl(dch, arg);
spin_unlock_irqrestore(&hc->lock, flags);
break;
default:
if (dch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: unknown command %x\n",
__func__, cmd);
err = -EINVAL;
}
return err;
}
static int
clockctl(void *priv, int enable)
{
struct hfc_multi *hc = priv;
hc->iclock_on = enable;
return 0;
}
/*
* initialize the card
*/
/*
* start timer irq, wait some time and check if we have interrupts.
* if not, reset chip and try again.
*/
static int
init_card(struct hfc_multi *hc)
{
int err = -EIO;
u_long flags;
void __iomem *plx_acc;
u_long plx_flags;
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: entered\n", __func__);
spin_lock_irqsave(&hc->lock, flags);
/* set interrupts but leave global interrupt disabled */
hc->hw.r_irq_ctrl = V_FIFO_IRQ;
disable_hwirq(hc);
spin_unlock_irqrestore(&hc->lock, flags);
if (request_irq(hc->irq, hfcmulti_interrupt, IRQF_SHARED,
"HFC-multi", hc)) {
printk(KERN_WARNING "mISDN: Could not get interrupt %d.\n",
hc->irq);
hc->irq = 0;
return -EIO;
}
if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
spin_lock_irqsave(&plx_lock, plx_flags);
plx_acc = hc->plx_membase + PLX_INTCSR;
writew((PLX_INTCSR_PCIINT_ENABLE | PLX_INTCSR_LINTI1_ENABLE),
plx_acc); /* enable PCI & LINT1 irq */
spin_unlock_irqrestore(&plx_lock, plx_flags);
}
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: IRQ %d count %d\n",
__func__, hc->irq, hc->irqcnt);
err = init_chip(hc);
if (err)
goto error;
/*
* Finally enable IRQ output
* this is only allowed, if an IRQ routine is already
* established for this HFC, so don't do that earlier
*/
spin_lock_irqsave(&hc->lock, flags);
enable_hwirq(hc);
spin_unlock_irqrestore(&hc->lock, flags);
/* printk(KERN_DEBUG "no master irq set!!!\n"); */
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout((100 * HZ) / 1000); /* Timeout 100ms */
/* turn IRQ off until chip is completely initialized */
spin_lock_irqsave(&hc->lock, flags);
disable_hwirq(hc);
spin_unlock_irqrestore(&hc->lock, flags);
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: IRQ %d count %d\n",
__func__, hc->irq, hc->irqcnt);
if (hc->irqcnt) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: done\n", __func__);
return 0;
}
if (test_bit(HFC_CHIP_PCM_SLAVE, &hc->chip)) {
printk(KERN_INFO "ignoring missing interrupts\n");
return 0;
}
printk(KERN_ERR "HFC PCI: IRQ(%d) getting no interrupts during init.\n",
hc->irq);
err = -EIO;
error:
if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
spin_lock_irqsave(&plx_lock, plx_flags);
plx_acc = hc->plx_membase + PLX_INTCSR;
writew(0x00, plx_acc); /*disable IRQs*/
spin_unlock_irqrestore(&plx_lock, plx_flags);
}
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: free irq %d\n", __func__, hc->irq);
if (hc->irq) {
free_irq(hc->irq, hc);
hc->irq = 0;
}
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: done (err=%d)\n", __func__, err);
return err;
}
/*
* find pci device and set it up
*/
static int
setup_pci(struct hfc_multi *hc, struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct hm_map *m = (struct hm_map *)ent->driver_data;
printk(KERN_INFO
"HFC-multi: card manufacturer: '%s' card name: '%s' clock: %s\n",
m->vendor_name, m->card_name, m->clock2 ? "double" : "normal");
hc->pci_dev = pdev;
if (m->clock2)
test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip);
if (ent->vendor == PCI_VENDOR_ID_DIGIUM &&
ent->device == PCI_DEVICE_ID_DIGIUM_HFC4S) {
test_and_set_bit(HFC_CHIP_B410P, &hc->chip);
test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip);
test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
hc->slots = 32;
}
if (hc->pci_dev->irq <= 0) {
printk(KERN_WARNING "HFC-multi: No IRQ for PCI card found.\n");
return -EIO;
}
if (pci_enable_device(hc->pci_dev)) {
printk(KERN_WARNING "HFC-multi: Error enabling PCI card.\n");
return -EIO;
}
hc->leds = m->leds;
hc->ledstate = 0xAFFEAFFE;
hc->opticalsupport = m->opticalsupport;
hc->pci_iobase = 0;
hc->pci_membase = NULL;
hc->plx_membase = NULL;
/* set memory access methods */
if (m->io_mode) /* use mode from card config */
hc->io_mode = m->io_mode;
switch (hc->io_mode) {
case HFC_IO_MODE_PLXSD:
test_and_set_bit(HFC_CHIP_PLXSD, &hc->chip);
hc->slots = 128; /* required */
hc->HFC_outb = HFC_outb_pcimem;
hc->HFC_inb = HFC_inb_pcimem;
hc->HFC_inw = HFC_inw_pcimem;
hc->HFC_wait = HFC_wait_pcimem;
hc->read_fifo = read_fifo_pcimem;
hc->write_fifo = write_fifo_pcimem;
hc->plx_origmembase = hc->pci_dev->resource[0].start;
/* MEMBASE 1 is PLX PCI Bridge */
if (!hc->plx_origmembase) {
printk(KERN_WARNING
"HFC-multi: No IO-Memory for PCI PLX bridge found\n");
pci_disable_device(hc->pci_dev);
return -EIO;
}
hc->plx_membase = ioremap(hc->plx_origmembase, 0x80);
if (!hc->plx_membase) {
printk(KERN_WARNING
"HFC-multi: failed to remap plx address space. "
"(internal error)\n");
pci_disable_device(hc->pci_dev);
return -EIO;
}
printk(KERN_INFO
"HFC-multi: plx_membase:%#lx plx_origmembase:%#lx\n",
(u_long)hc->plx_membase, hc->plx_origmembase);
hc->pci_origmembase = hc->pci_dev->resource[2].start;
/* MEMBASE 1 is PLX PCI Bridge */
if (!hc->pci_origmembase) {
printk(KERN_WARNING
"HFC-multi: No IO-Memory for PCI card found\n");
pci_disable_device(hc->pci_dev);
return -EIO;
}
hc->pci_membase = ioremap(hc->pci_origmembase, 0x400);
if (!hc->pci_membase) {
printk(KERN_WARNING "HFC-multi: failed to remap io "
"address space. (internal error)\n");
pci_disable_device(hc->pci_dev);
return -EIO;
}
printk(KERN_INFO
"card %d: defined at MEMBASE %#lx (%#lx) IRQ %d HZ %d "
"leds-type %d\n",
hc->id, (u_long)hc->pci_membase, hc->pci_origmembase,
hc->pci_dev->irq, HZ, hc->leds);
pci_write_config_word(hc->pci_dev, PCI_COMMAND, PCI_ENA_MEMIO);
break;
case HFC_IO_MODE_PCIMEM:
hc->HFC_outb = HFC_outb_pcimem;
hc->HFC_inb = HFC_inb_pcimem;
hc->HFC_inw = HFC_inw_pcimem;
hc->HFC_wait = HFC_wait_pcimem;
hc->read_fifo = read_fifo_pcimem;
hc->write_fifo = write_fifo_pcimem;
hc->pci_origmembase = hc->pci_dev->resource[1].start;
if (!hc->pci_origmembase) {
printk(KERN_WARNING
"HFC-multi: No IO-Memory for PCI card found\n");
pci_disable_device(hc->pci_dev);
return -EIO;
}
hc->pci_membase = ioremap(hc->pci_origmembase, 256);
if (!hc->pci_membase) {
printk(KERN_WARNING
"HFC-multi: failed to remap io address space. "
"(internal error)\n");
pci_disable_device(hc->pci_dev);
return -EIO;
}
printk(KERN_INFO "card %d: defined at MEMBASE %#lx (%#lx) IRQ "
"%d HZ %d leds-type %d\n", hc->id, (u_long)hc->pci_membase,
hc->pci_origmembase, hc->pci_dev->irq, HZ, hc->leds);
pci_write_config_word(hc->pci_dev, PCI_COMMAND, PCI_ENA_MEMIO);
break;
case HFC_IO_MODE_REGIO:
hc->HFC_outb = HFC_outb_regio;
hc->HFC_inb = HFC_inb_regio;
hc->HFC_inw = HFC_inw_regio;
hc->HFC_wait = HFC_wait_regio;
hc->read_fifo = read_fifo_regio;
hc->write_fifo = write_fifo_regio;
hc->pci_iobase = (u_int) hc->pci_dev->resource[0].start;
if (!hc->pci_iobase) {
printk(KERN_WARNING
"HFC-multi: No IO for PCI card found\n");
pci_disable_device(hc->pci_dev);
return -EIO;
}
if (!request_region(hc->pci_iobase, 8, "hfcmulti")) {
printk(KERN_WARNING "HFC-multi: failed to request "
"address space at 0x%08lx (internal error)\n",
hc->pci_iobase);
pci_disable_device(hc->pci_dev);
return -EIO;
}
printk(KERN_INFO
"%s %s: defined at IOBASE %#x IRQ %d HZ %d leds-type %d\n",
m->vendor_name, m->card_name, (u_int) hc->pci_iobase,
hc->pci_dev->irq, HZ, hc->leds);
pci_write_config_word(hc->pci_dev, PCI_COMMAND, PCI_ENA_REGIO);
break;
default:
printk(KERN_WARNING "HFC-multi: Invalid IO mode.\n");
pci_disable_device(hc->pci_dev);
return -EIO;
}
pci_set_drvdata(hc->pci_dev, hc);
/* At this point the needed PCI config is done */
/* fifos are still not enabled */
return 0;
}
/*
* remove port
*/
static void
release_port(struct hfc_multi *hc, struct dchannel *dch)
{
int pt, ci, i = 0;
u_long flags;
struct bchannel *pb;
ci = dch->slot;
pt = hc->chan[ci].port;
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: entered for port %d\n",
__func__, pt + 1);
if (pt >= hc->ports) {
printk(KERN_WARNING "%s: ERROR port out of range (%d).\n",
__func__, pt + 1);
return;
}
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: releasing port=%d\n",
__func__, pt + 1);
if (dch->dev.D.protocol == ISDN_P_TE_S0)
l1_event(dch->l1, CLOSE_CHANNEL);
hc->chan[ci].dch = NULL;
if (hc->created[pt]) {
hc->created[pt] = 0;
mISDN_unregister_device(&dch->dev);
}
spin_lock_irqsave(&hc->lock, flags);
if (dch->timer.function) {
del_timer(&dch->timer);
dch->timer.function = NULL;
}
if (hc->ctype == HFC_TYPE_E1) { /* E1 */
/* remove sync */
if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
hc->syncronized = 0;
plxsd_checksync(hc, 1);
}
/* free channels */
for (i = 0; i <= 31; i++) {
if (!((1 << i) & hc->bmask[pt])) /* skip unused chan */
continue;
if (hc->chan[i].bch) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG
"%s: free port %d channel %d\n",
__func__, hc->chan[i].port + 1, i);
pb = hc->chan[i].bch;
hc->chan[i].bch = NULL;
spin_unlock_irqrestore(&hc->lock, flags);
mISDN_freebchannel(pb);
kfree(pb);
kfree(hc->chan[i].coeff);
spin_lock_irqsave(&hc->lock, flags);
}
}
} else {
/* remove sync */
if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
hc->syncronized &=
~(1 << hc->chan[ci].port);
plxsd_checksync(hc, 1);
}
/* free channels */
if (hc->chan[ci - 2].bch) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG
"%s: free port %d channel %d\n",
__func__, hc->chan[ci - 2].port + 1,
ci - 2);
pb = hc->chan[ci - 2].bch;
hc->chan[ci - 2].bch = NULL;
spin_unlock_irqrestore(&hc->lock, flags);
mISDN_freebchannel(pb);
kfree(pb);
kfree(hc->chan[ci - 2].coeff);
spin_lock_irqsave(&hc->lock, flags);
}
if (hc->chan[ci - 1].bch) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG
"%s: free port %d channel %d\n",
__func__, hc->chan[ci - 1].port + 1,
ci - 1);
pb = hc->chan[ci - 1].bch;
hc->chan[ci - 1].bch = NULL;
spin_unlock_irqrestore(&hc->lock, flags);
mISDN_freebchannel(pb);
kfree(pb);
kfree(hc->chan[ci - 1].coeff);
spin_lock_irqsave(&hc->lock, flags);
}
}
spin_unlock_irqrestore(&hc->lock, flags);
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: free port %d channel D(%d)\n", __func__,
pt+1, ci);
mISDN_freedchannel(dch);
kfree(dch);
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: done!\n", __func__);
}
static void
release_card(struct hfc_multi *hc)
{
u_long flags;
int ch;
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: release card (%d) entered\n",
__func__, hc->id);
/* unregister clock source */
if (hc->iclock)
mISDN_unregister_clock(hc->iclock);
/* disable and free irq */
spin_lock_irqsave(&hc->lock, flags);
disable_hwirq(hc);
spin_unlock_irqrestore(&hc->lock, flags);
udelay(1000);
if (hc->irq) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: free irq %d (hc=%p)\n",
__func__, hc->irq, hc);
free_irq(hc->irq, hc);
hc->irq = 0;
}
/* disable D-channels & B-channels */
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: disable all channels (d and b)\n",
__func__);
for (ch = 0; ch <= 31; ch++) {
if (hc->chan[ch].dch)
release_port(hc, hc->chan[ch].dch);
}
/* dimm leds */
if (hc->leds)
hfcmulti_leds(hc);
/* release hardware */
release_io_hfcmulti(hc);
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: remove instance from list\n",
__func__);
list_del(&hc->list);
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: delete instance\n", __func__);
if (hc == syncmaster)
syncmaster = NULL;
kfree(hc);
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: card successfully removed\n",
__func__);
}
static void
init_e1_port_hw(struct hfc_multi *hc, struct hm_map *m)
{
/* set optical line type */
if (port[Port_cnt] & 0x001) {
if (!m->opticalsupport) {
printk(KERN_INFO
"This board has no optical "
"support\n");
} else {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG
"%s: PORT set optical "
"interfacs: card(%d) "
"port(%d)\n",
__func__,
HFC_cnt + 1, 1);
test_and_set_bit(HFC_CFG_OPTICAL,
&hc->chan[hc->dnum[0]].cfg);
}
}
/* set LOS report */
if (port[Port_cnt] & 0x004) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: PORT set "
"LOS report: card(%d) port(%d)\n",
__func__, HFC_cnt + 1, 1);
test_and_set_bit(HFC_CFG_REPORT_LOS,
&hc->chan[hc->dnum[0]].cfg);
}
/* set AIS report */
if (port[Port_cnt] & 0x008) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: PORT set "
"AIS report: card(%d) port(%d)\n",
__func__, HFC_cnt + 1, 1);
test_and_set_bit(HFC_CFG_REPORT_AIS,
&hc->chan[hc->dnum[0]].cfg);
}
/* set SLIP report */
if (port[Port_cnt] & 0x010) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG
"%s: PORT set SLIP report: "
"card(%d) port(%d)\n",
__func__, HFC_cnt + 1, 1);
test_and_set_bit(HFC_CFG_REPORT_SLIP,
&hc->chan[hc->dnum[0]].cfg);
}
/* set RDI report */
if (port[Port_cnt] & 0x020) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG
"%s: PORT set RDI report: "
"card(%d) port(%d)\n",
__func__, HFC_cnt + 1, 1);
test_and_set_bit(HFC_CFG_REPORT_RDI,
&hc->chan[hc->dnum[0]].cfg);
}
/* set CRC-4 Mode */
if (!(port[Port_cnt] & 0x100)) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: PORT turn on CRC4 report:"
" card(%d) port(%d)\n",
__func__, HFC_cnt + 1, 1);
test_and_set_bit(HFC_CFG_CRC4,
&hc->chan[hc->dnum[0]].cfg);
} else {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: PORT turn off CRC4"
" report: card(%d) port(%d)\n",
__func__, HFC_cnt + 1, 1);
}
/* set forced clock */
if (port[Port_cnt] & 0x0200) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: PORT force getting clock from "
"E1: card(%d) port(%d)\n",
__func__, HFC_cnt + 1, 1);
test_and_set_bit(HFC_CHIP_E1CLOCK_GET, &hc->chip);
} else
if (port[Port_cnt] & 0x0400) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: PORT force putting clock to "
"E1: card(%d) port(%d)\n",
__func__, HFC_cnt + 1, 1);
test_and_set_bit(HFC_CHIP_E1CLOCK_PUT, &hc->chip);
}
/* set JATT PLL */
if (port[Port_cnt] & 0x0800) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: PORT disable JATT PLL on "
"E1: card(%d) port(%d)\n",
__func__, HFC_cnt + 1, 1);
test_and_set_bit(HFC_CHIP_RX_SYNC, &hc->chip);
}
/* set elastic jitter buffer */
if (port[Port_cnt] & 0x3000) {
hc->chan[hc->dnum[0]].jitter = (port[Port_cnt]>>12) & 0x3;
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG
"%s: PORT set elastic "
"buffer to %d: card(%d) port(%d)\n",
__func__, hc->chan[hc->dnum[0]].jitter,
HFC_cnt + 1, 1);
} else
hc->chan[hc->dnum[0]].jitter = 2; /* default */
}
static int
init_e1_port(struct hfc_multi *hc, struct hm_map *m, int pt)
{
struct dchannel *dch;
struct bchannel *bch;
int ch, ret = 0;
char name[MISDN_MAX_IDLEN];
int bcount = 0;
dch = kzalloc(sizeof(struct dchannel), GFP_KERNEL);
if (!dch)
return -ENOMEM;
dch->debug = debug;
mISDN_initdchannel(dch, MAX_DFRAME_LEN_L1, ph_state_change);
dch->hw = hc;
dch->dev.Dprotocols = (1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1);
dch->dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
(1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
dch->dev.D.send = handle_dmsg;
dch->dev.D.ctrl = hfcm_dctrl;
dch->slot = hc->dnum[pt];
hc->chan[hc->dnum[pt]].dch = dch;
hc->chan[hc->dnum[pt]].port = pt;
hc->chan[hc->dnum[pt]].nt_timer = -1;
for (ch = 1; ch <= 31; ch++) {
if (!((1 << ch) & hc->bmask[pt])) /* skip unused channel */
continue;
bch = kzalloc(sizeof(struct bchannel), GFP_KERNEL);
if (!bch) {
printk(KERN_ERR "%s: no memory for bchannel\n",
__func__);
ret = -ENOMEM;
goto free_chan;
}
hc->chan[ch].coeff = kzalloc(512, GFP_KERNEL);
if (!hc->chan[ch].coeff) {
printk(KERN_ERR "%s: no memory for coeffs\n",
__func__);
ret = -ENOMEM;
kfree(bch);
goto free_chan;
}
bch->nr = ch;
bch->slot = ch;
bch->debug = debug;
mISDN_initbchannel(bch, MAX_DATA_MEM, poll >> 1);
bch->hw = hc;
bch->ch.send = handle_bmsg;
bch->ch.ctrl = hfcm_bctrl;
bch->ch.nr = ch;
list_add(&bch->ch.list, &dch->dev.bchannels);
hc->chan[ch].bch = bch;
hc->chan[ch].port = pt;
set_channelmap(bch->nr, dch->dev.channelmap);
bcount++;
}
dch->dev.nrbchan = bcount;
if (pt == 0)
init_e1_port_hw(hc, m);
if (hc->ports > 1)
snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-e1.%d-%d",
HFC_cnt + 1, pt+1);
else
snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-e1.%d", HFC_cnt + 1);
ret = mISDN_register_device(&dch->dev, &hc->pci_dev->dev, name);
if (ret)
goto free_chan;
hc->created[pt] = 1;
return ret;
free_chan:
release_port(hc, dch);
return ret;
}
static int
init_multi_port(struct hfc_multi *hc, int pt)
{
struct dchannel *dch;
struct bchannel *bch;
int ch, i, ret = 0;
char name[MISDN_MAX_IDLEN];
dch = kzalloc(sizeof(struct dchannel), GFP_KERNEL);
if (!dch)
return -ENOMEM;
dch->debug = debug;
mISDN_initdchannel(dch, MAX_DFRAME_LEN_L1, ph_state_change);
dch->hw = hc;
dch->dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0);
dch->dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
(1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
dch->dev.D.send = handle_dmsg;
dch->dev.D.ctrl = hfcm_dctrl;
dch->dev.nrbchan = 2;
i = pt << 2;
dch->slot = i + 2;
hc->chan[i + 2].dch = dch;
hc->chan[i + 2].port = pt;
hc->chan[i + 2].nt_timer = -1;
for (ch = 0; ch < dch->dev.nrbchan; ch++) {
bch = kzalloc(sizeof(struct bchannel), GFP_KERNEL);
if (!bch) {
printk(KERN_ERR "%s: no memory for bchannel\n",
__func__);
ret = -ENOMEM;
goto free_chan;
}
hc->chan[i + ch].coeff = kzalloc(512, GFP_KERNEL);
if (!hc->chan[i + ch].coeff) {
printk(KERN_ERR "%s: no memory for coeffs\n",
__func__);
ret = -ENOMEM;
kfree(bch);
goto free_chan;
}
bch->nr = ch + 1;
bch->slot = i + ch;
bch->debug = debug;
mISDN_initbchannel(bch, MAX_DATA_MEM, poll >> 1);
bch->hw = hc;
bch->ch.send = handle_bmsg;
bch->ch.ctrl = hfcm_bctrl;
bch->ch.nr = ch + 1;
list_add(&bch->ch.list, &dch->dev.bchannels);
hc->chan[i + ch].bch = bch;
hc->chan[i + ch].port = pt;
set_channelmap(bch->nr, dch->dev.channelmap);
}
/* set master clock */
if (port[Port_cnt] & 0x001) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG
"%s: PROTOCOL set master clock: "
"card(%d) port(%d)\n",
__func__, HFC_cnt + 1, pt + 1);
if (dch->dev.D.protocol != ISDN_P_TE_S0) {
printk(KERN_ERR "Error: Master clock "
"for port(%d) of card(%d) is only"
" possible with TE-mode\n",
pt + 1, HFC_cnt + 1);
ret = -EINVAL;
goto free_chan;
}
if (hc->masterclk >= 0) {
printk(KERN_ERR "Error: Master clock "
"for port(%d) of card(%d) already "
"defined for port(%d)\n",
pt + 1, HFC_cnt + 1, hc->masterclk + 1);
ret = -EINVAL;
goto free_chan;
}
hc->masterclk = pt;
}
/* set transmitter line to non capacitive */
if (port[Port_cnt] & 0x002) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG
"%s: PROTOCOL set non capacitive "
"transmitter: card(%d) port(%d)\n",
__func__, HFC_cnt + 1, pt + 1);
test_and_set_bit(HFC_CFG_NONCAP_TX,
&hc->chan[i + 2].cfg);
}
/* disable E-channel */
if (port[Port_cnt] & 0x004) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG
"%s: PROTOCOL disable E-channel: "
"card(%d) port(%d)\n",
__func__, HFC_cnt + 1, pt + 1);
test_and_set_bit(HFC_CFG_DIS_ECHANNEL,
&hc->chan[i + 2].cfg);
}
if (hc->ctype == HFC_TYPE_XHFC) {
snprintf(name, MISDN_MAX_IDLEN - 1, "xhfc.%d-%d",
HFC_cnt + 1, pt + 1);
ret = mISDN_register_device(&dch->dev, NULL, name);
} else {
snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-%ds.%d-%d",
hc->ctype, HFC_cnt + 1, pt + 1);
ret = mISDN_register_device(&dch->dev, &hc->pci_dev->dev, name);
}
if (ret)
goto free_chan;
hc->created[pt] = 1;
return ret;
free_chan:
release_port(hc, dch);
return ret;
}
static int
hfcmulti_init(struct hm_map *m, struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int ret_err = 0;
int pt;
struct hfc_multi *hc;
u_long flags;
u_char dips = 0, pmj = 0; /* dip settings, port mode Jumpers */
int i, ch;
u_int maskcheck;
if (HFC_cnt >= MAX_CARDS) {
printk(KERN_ERR "too many cards (max=%d).\n",
MAX_CARDS);
return -EINVAL;
}
if ((type[HFC_cnt] & 0xff) && (type[HFC_cnt] & 0xff) != m->type) {
printk(KERN_WARNING "HFC-MULTI: Card '%s:%s' type %d found but "
"type[%d] %d was supplied as module parameter\n",
m->vendor_name, m->card_name, m->type, HFC_cnt,
type[HFC_cnt] & 0xff);
printk(KERN_WARNING "HFC-MULTI: Load module without parameters "
"first, to see cards and their types.");
return -EINVAL;
}
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: Registering %s:%s chip type %d (0x%x)\n",
__func__, m->vendor_name, m->card_name, m->type,
type[HFC_cnt]);
/* allocate card+fifo structure */
hc = kzalloc(sizeof(struct hfc_multi), GFP_KERNEL);
if (!hc) {
printk(KERN_ERR "No kmem for HFC-Multi card\n");
return -ENOMEM;
}
spin_lock_init(&hc->lock);
hc->mtyp = m;
hc->ctype = m->type;
hc->ports = m->ports;
hc->id = HFC_cnt;
hc->pcm = pcm[HFC_cnt];
hc->io_mode = iomode[HFC_cnt];
if (hc->ctype == HFC_TYPE_E1 && dmask[E1_cnt]) {
/* fragment card */
pt = 0;
maskcheck = 0;
for (ch = 0; ch <= 31; ch++) {
if (!((1 << ch) & dmask[E1_cnt]))
continue;
hc->dnum[pt] = ch;
hc->bmask[pt] = bmask[bmask_cnt++];
if ((maskcheck & hc->bmask[pt])
|| (dmask[E1_cnt] & hc->bmask[pt])) {
printk(KERN_INFO
"HFC-E1 #%d has overlapping B-channels on fragment #%d\n",
E1_cnt + 1, pt);
kfree(hc);
return -EINVAL;
}
maskcheck |= hc->bmask[pt];
printk(KERN_INFO
"HFC-E1 #%d uses D-channel on slot %d and a B-channel map of 0x%08x\n",
E1_cnt + 1, ch, hc->bmask[pt]);
pt++;
}
hc->ports = pt;
}
if (hc->ctype == HFC_TYPE_E1 && !dmask[E1_cnt]) {
/* default card layout */
hc->dnum[0] = 16;
hc->bmask[0] = 0xfffefffe;
hc->ports = 1;
}
/* set chip specific features */
hc->masterclk = -1;
if (type[HFC_cnt] & 0x100) {
test_and_set_bit(HFC_CHIP_ULAW, &hc->chip);
hc->silence = 0xff; /* ulaw silence */
} else
hc->silence = 0x2a; /* alaw silence */
if ((poll >> 1) > sizeof(hc->silence_data)) {
printk(KERN_ERR "HFCMULTI error: silence_data too small, "
"please fix\n");
kfree(hc);
return -EINVAL;
}
for (i = 0; i < (poll >> 1); i++)
hc->silence_data[i] = hc->silence;
if (hc->ctype != HFC_TYPE_XHFC) {
if (!(type[HFC_cnt] & 0x200))
test_and_set_bit(HFC_CHIP_DTMF, &hc->chip);
test_and_set_bit(HFC_CHIP_CONF, &hc->chip);
}
if (type[HFC_cnt] & 0x800)
test_and_set_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
if (type[HFC_cnt] & 0x1000) {
test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip);
test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
}
if (type[HFC_cnt] & 0x4000)
test_and_set_bit(HFC_CHIP_EXRAM_128, &hc->chip);
if (type[HFC_cnt] & 0x8000)
test_and_set_bit(HFC_CHIP_EXRAM_512, &hc->chip);
hc->slots = 32;
if (type[HFC_cnt] & 0x10000)
hc->slots = 64;
if (type[HFC_cnt] & 0x20000)
hc->slots = 128;
if (type[HFC_cnt] & 0x80000) {
test_and_set_bit(HFC_CHIP_WATCHDOG, &hc->chip);
hc->wdcount = 0;
hc->wdbyte = V_GPIO_OUT2;
printk(KERN_NOTICE "Watchdog enabled\n");
}
if (pdev && ent)
/* setup pci, hc->slots may change due to PLXSD */
ret_err = setup_pci(hc, pdev, ent);
else
#ifdef CONFIG_MISDN_HFCMULTI_8xx
ret_err = setup_embedded(hc, m);
#else
{
printk(KERN_WARNING "Embedded IO Mode not selected\n");
ret_err = -EIO;
}
#endif
if (ret_err) {
if (hc == syncmaster)
syncmaster = NULL;
kfree(hc);
return ret_err;
}
hc->HFC_outb_nodebug = hc->HFC_outb;
hc->HFC_inb_nodebug = hc->HFC_inb;
hc->HFC_inw_nodebug = hc->HFC_inw;
hc->HFC_wait_nodebug = hc->HFC_wait;
#ifdef HFC_REGISTER_DEBUG
hc->HFC_outb = HFC_outb_debug;
hc->HFC_inb = HFC_inb_debug;
hc->HFC_inw = HFC_inw_debug;
hc->HFC_wait = HFC_wait_debug;
#endif
/* create channels */
for (pt = 0; pt < hc->ports; pt++) {
if (Port_cnt >= MAX_PORTS) {
printk(KERN_ERR "too many ports (max=%d).\n",
MAX_PORTS);
ret_err = -EINVAL;
goto free_card;
}
if (hc->ctype == HFC_TYPE_E1)
ret_err = init_e1_port(hc, m, pt);
else
ret_err = init_multi_port(hc, pt);
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG
"%s: Registering D-channel, card(%d) port(%d) "
"result %d\n",
__func__, HFC_cnt + 1, pt + 1, ret_err);
if (ret_err) {
while (pt) { /* release already registered ports */
pt--;
if (hc->ctype == HFC_TYPE_E1)
release_port(hc,
hc->chan[hc->dnum[pt]].dch);
else
release_port(hc,
hc->chan[(pt << 2) + 2].dch);
}
goto free_card;
}
if (hc->ctype != HFC_TYPE_E1)
Port_cnt++; /* for each S0 port */
}
if (hc->ctype == HFC_TYPE_E1) {
Port_cnt++; /* for each E1 port */
E1_cnt++;
}
/* disp switches */
switch (m->dip_type) {
case DIP_4S:
/*
* Get DIP setting for beroNet 1S/2S/4S cards
* DIP Setting: (collect GPIO 13/14/15 (R_GPIO_IN1) +
* GPI 19/23 (R_GPI_IN2))
*/
dips = ((~HFC_inb(hc, R_GPIO_IN1) & 0xE0) >> 5) |
((~HFC_inb(hc, R_GPI_IN2) & 0x80) >> 3) |
(~HFC_inb(hc, R_GPI_IN2) & 0x08);
/* Port mode (TE/NT) jumpers */
pmj = ((HFC_inb(hc, R_GPI_IN3) >> 4) & 0xf);
if (test_bit(HFC_CHIP_B410P, &hc->chip))
pmj = ~pmj & 0xf;
printk(KERN_INFO "%s: %s DIPs(0x%x) jumpers(0x%x)\n",
m->vendor_name, m->card_name, dips, pmj);
break;
case DIP_8S:
/*
* Get DIP Setting for beroNet 8S0+ cards
* Enable PCI auxbridge function
*/
HFC_outb(hc, R_BRG_PCM_CFG, 1 | V_PCM_CLK);
/* prepare access to auxport */
outw(0x4000, hc->pci_iobase + 4);
/*
* some dummy reads are required to
* read valid DIP switch data
*/
dips = inb(hc->pci_iobase);
dips = inb(hc->pci_iobase);
dips = inb(hc->pci_iobase);
dips = ~inb(hc->pci_iobase) & 0x3F;
outw(0x0, hc->pci_iobase + 4);
/* disable PCI auxbridge function */
HFC_outb(hc, R_BRG_PCM_CFG, V_PCM_CLK);
printk(KERN_INFO "%s: %s DIPs(0x%x)\n",
m->vendor_name, m->card_name, dips);
break;
case DIP_E1:
/*
* get DIP Setting for beroNet E1 cards
* DIP Setting: collect GPI 4/5/6/7 (R_GPI_IN0)
*/
dips = (~HFC_inb(hc, R_GPI_IN0) & 0xF0) >> 4;
printk(KERN_INFO "%s: %s DIPs(0x%x)\n",
m->vendor_name, m->card_name, dips);
break;
}
/* add to list */
spin_lock_irqsave(&HFClock, flags);
list_add_tail(&hc->list, &HFClist);
spin_unlock_irqrestore(&HFClock, flags);
/* use as clock source */
if (clock == HFC_cnt + 1)
hc->iclock = mISDN_register_clock("HFCMulti", 0, clockctl, hc);
/* initialize hardware */
hc->irq = (m->irq) ? : hc->pci_dev->irq;
ret_err = init_card(hc);
if (ret_err) {
printk(KERN_ERR "init card returns %d\n", ret_err);
release_card(hc);
return ret_err;
}
/* start IRQ and return */
spin_lock_irqsave(&hc->lock, flags);
enable_hwirq(hc);
spin_unlock_irqrestore(&hc->lock, flags);
return 0;
free_card:
release_io_hfcmulti(hc);
if (hc == syncmaster)
syncmaster = NULL;
kfree(hc);
return ret_err;
}
static void hfc_remove_pci(struct pci_dev *pdev)
{
struct hfc_multi *card = pci_get_drvdata(pdev);
u_long flags;
if (debug)
printk(KERN_INFO "removing hfc_multi card vendor:%x "
"device:%x subvendor:%x subdevice:%x\n",
pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device);
if (card) {
spin_lock_irqsave(&HFClock, flags);
release_card(card);
spin_unlock_irqrestore(&HFClock, flags);
} else {
if (debug)
printk(KERN_DEBUG "%s: drvdata already removed\n",
__func__);
}
}
#define VENDOR_CCD "Cologne Chip AG"
#define VENDOR_BN "beroNet GmbH"
#define VENDOR_DIG "Digium Inc."
#define VENDOR_JH "Junghanns.NET GmbH"
#define VENDOR_PRIM "PrimuX"
static const struct hm_map hfcm_map[] = {
/*0*/ {VENDOR_BN, "HFC-1S Card (mini PCI)", 4, 1, 1, 3, 0, DIP_4S, 0, 0},
/*1*/ {VENDOR_BN, "HFC-2S Card", 4, 2, 1, 3, 0, DIP_4S, 0, 0},
/*2*/ {VENDOR_BN, "HFC-2S Card (mini PCI)", 4, 2, 1, 3, 0, DIP_4S, 0, 0},
/*3*/ {VENDOR_BN, "HFC-4S Card", 4, 4, 1, 2, 0, DIP_4S, 0, 0},
/*4*/ {VENDOR_BN, "HFC-4S Card (mini PCI)", 4, 4, 1, 2, 0, 0, 0, 0},
/*5*/ {VENDOR_CCD, "HFC-4S Eval (old)", 4, 4, 0, 0, 0, 0, 0, 0},
/*6*/ {VENDOR_CCD, "HFC-4S IOB4ST", 4, 4, 1, 2, 0, DIP_4S, 0, 0},
/*7*/ {VENDOR_CCD, "HFC-4S", 4, 4, 1, 2, 0, 0, 0, 0},
/*8*/ {VENDOR_DIG, "HFC-4S Card", 4, 4, 0, 2, 0, 0, HFC_IO_MODE_REGIO, 0},
/*9*/ {VENDOR_CCD, "HFC-4S Swyx 4xS0 SX2 QuadBri", 4, 4, 1, 2, 0, 0, 0, 0},
/*10*/ {VENDOR_JH, "HFC-4S (junghanns 2.0)", 4, 4, 1, 2, 0, 0, 0, 0},
/*11*/ {VENDOR_PRIM, "HFC-2S Primux Card", 4, 2, 0, 0, 0, 0, 0, 0},
/*12*/ {VENDOR_BN, "HFC-8S Card", 8, 8, 1, 0, 0, 0, 0, 0},
/*13*/ {VENDOR_BN, "HFC-8S Card (+)", 8, 8, 1, 8, 0, DIP_8S,
HFC_IO_MODE_REGIO, 0},
/*14*/ {VENDOR_CCD, "HFC-8S Eval (old)", 8, 8, 0, 0, 0, 0, 0, 0},
/*15*/ {VENDOR_CCD, "HFC-8S IOB4ST Recording", 8, 8, 1, 0, 0, 0, 0, 0},
/*16*/ {VENDOR_CCD, "HFC-8S IOB8ST", 8, 8, 1, 0, 0, 0, 0, 0},
/*17*/ {VENDOR_CCD, "HFC-8S", 8, 8, 1, 0, 0, 0, 0, 0},
/*18*/ {VENDOR_CCD, "HFC-8S", 8, 8, 1, 0, 0, 0, 0, 0},
/*19*/ {VENDOR_BN, "HFC-E1 Card", 1, 1, 0, 1, 0, DIP_E1, 0, 0},
/*20*/ {VENDOR_BN, "HFC-E1 Card (mini PCI)", 1, 1, 0, 1, 0, 0, 0, 0},
/*21*/ {VENDOR_BN, "HFC-E1+ Card (Dual)", 1, 1, 0, 1, 0, DIP_E1, 0, 0},
/*22*/ {VENDOR_BN, "HFC-E1 Card (Dual)", 1, 1, 0, 1, 0, DIP_E1, 0, 0},
/*23*/ {VENDOR_CCD, "HFC-E1 Eval (old)", 1, 1, 0, 0, 0, 0, 0, 0},
/*24*/ {VENDOR_CCD, "HFC-E1 IOB1E1", 1, 1, 0, 1, 0, 0, 0, 0},
/*25*/ {VENDOR_CCD, "HFC-E1", 1, 1, 0, 1, 0, 0, 0, 0},
/*26*/ {VENDOR_CCD, "HFC-4S Speech Design", 4, 4, 0, 0, 0, 0,
HFC_IO_MODE_PLXSD, 0},
/*27*/ {VENDOR_CCD, "HFC-E1 Speech Design", 1, 1, 0, 0, 0, 0,
HFC_IO_MODE_PLXSD, 0},
/*28*/ {VENDOR_CCD, "HFC-4S OpenVox", 4, 4, 1, 0, 0, 0, 0, 0},
/*29*/ {VENDOR_CCD, "HFC-2S OpenVox", 4, 2, 1, 0, 0, 0, 0, 0},
/*30*/ {VENDOR_CCD, "HFC-8S OpenVox", 8, 8, 1, 0, 0, 0, 0, 0},
/*31*/ {VENDOR_CCD, "XHFC-4S Speech Design", 5, 4, 0, 0, 0, 0,
HFC_IO_MODE_EMBSD, XHFC_IRQ},
/*32*/ {VENDOR_JH, "HFC-8S (junghanns)", 8, 8, 1, 0, 0, 0, 0, 0},
/*33*/ {VENDOR_BN, "HFC-2S Beronet Card PCIe", 4, 2, 1, 3, 0, DIP_4S, 0, 0},
/*34*/ {VENDOR_BN, "HFC-4S Beronet Card PCIe", 4, 4, 1, 2, 0, DIP_4S, 0, 0},
};
#undef H
#define H(x) ((unsigned long)&hfcm_map[x])
static const struct pci_device_id hfmultipci_ids[] = {
/* Cards with HFC-4S Chip */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_BN1SM, 0, 0, H(0)}, /* BN1S mini PCI */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_BN2S, 0, 0, H(1)}, /* BN2S */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_BN2SM, 0, 0, H(2)}, /* BN2S mini PCI */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_BN4S, 0, 0, H(3)}, /* BN4S */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_BN4SM, 0, 0, H(4)}, /* BN4S mini PCI */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
PCI_DEVICE_ID_CCD_HFC4S, 0, 0, H(5)}, /* Old Eval */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_IOB4ST, 0, 0, H(6)}, /* IOB4ST */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_HFC4S, 0, 0, H(7)}, /* 4S */
{ PCI_VENDOR_ID_DIGIUM, PCI_DEVICE_ID_DIGIUM_HFC4S,
PCI_VENDOR_ID_DIGIUM, PCI_DEVICE_ID_DIGIUM_HFC4S, 0, 0, H(8)},
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_SWYX4S, 0, 0, H(9)}, /* 4S Swyx */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_JH4S20, 0, 0, H(10)},
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_PMX2S, 0, 0, H(11)}, /* Primux */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_OV4S, 0, 0, H(28)}, /* OpenVox 4 */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_OV2S, 0, 0, H(29)}, /* OpenVox 2 */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
0xb761, 0, 0, H(33)}, /* BN2S PCIe */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
0xb762, 0, 0, H(34)}, /* BN4S PCIe */
/* Cards with HFC-8S Chip */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_BN8S, 0, 0, H(12)}, /* BN8S */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_BN8SP, 0, 0, H(13)}, /* BN8S+ */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
PCI_DEVICE_ID_CCD_HFC8S, 0, 0, H(14)}, /* old Eval */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_IOB8STR, 0, 0, H(15)}, /* IOB8ST Recording */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_IOB8ST, 0, 0, H(16)}, /* IOB8ST */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_IOB8ST_1, 0, 0, H(17)}, /* IOB8ST */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_HFC8S, 0, 0, H(18)}, /* 8S */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_OV8S, 0, 0, H(30)}, /* OpenVox 8 */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_JH8S, 0, 0, H(32)}, /* Junganns 8S */
/* Cards with HFC-E1 Chip */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_BNE1, 0, 0, H(19)}, /* BNE1 */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_BNE1M, 0, 0, H(20)}, /* BNE1 mini PCI */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_BNE1DP, 0, 0, H(21)}, /* BNE1 + (Dual) */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_BNE1D, 0, 0, H(22)}, /* BNE1 (Dual) */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD,
PCI_DEVICE_ID_CCD_HFCE1, 0, 0, H(23)}, /* Old Eval */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_IOB1E1, 0, 0, H(24)}, /* IOB1E1 */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_HFCE1, 0, 0, H(25)}, /* E1 */
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_SPD4S, 0, 0, H(26)}, /* PLX PCI Bridge */
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_SPDE1, 0, 0, H(27)}, /* PLX PCI Bridge */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_JHSE1, 0, 0, H(25)}, /* Junghanns E1 */
{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_HFC4S), 0 },
{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_HFC8S), 0 },
{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_HFCE1), 0 },
{0, }
};
#undef H
MODULE_DEVICE_TABLE(pci, hfmultipci_ids);
static int
hfcmulti_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct hm_map *m = (struct hm_map *)ent->driver_data;
int ret;
if (m == NULL && ent->vendor == PCI_VENDOR_ID_CCD && (
ent->device == PCI_DEVICE_ID_CCD_HFC4S ||
ent->device == PCI_DEVICE_ID_CCD_HFC8S ||
ent->device == PCI_DEVICE_ID_CCD_HFCE1)) {
printk(KERN_ERR
"Unknown HFC multiport controller (vendor:%04x device:%04x "
"subvendor:%04x subdevice:%04x)\n", pdev->vendor,
pdev->device, pdev->subsystem_vendor,
pdev->subsystem_device);
printk(KERN_ERR
"Please contact the driver maintainer for support.\n");
return -ENODEV;
}
ret = hfcmulti_init(m, pdev, ent);
if (ret)
return ret;
HFC_cnt++;
printk(KERN_INFO "%d devices registered\n", HFC_cnt);
return 0;
}
static struct pci_driver hfcmultipci_driver = {
.name = "hfc_multi",
.probe = hfcmulti_probe,
.remove = hfc_remove_pci,
.id_table = hfmultipci_ids,
};
static void __exit
HFCmulti_cleanup(void)
{
struct hfc_multi *card, *next;
/* get rid of all devices of this driver */
list_for_each_entry_safe(card, next, &HFClist, list)
release_card(card);
pci_unregister_driver(&hfcmultipci_driver);
}
static int __init
HFCmulti_init(void)
{
int err;
int i, xhfc = 0;
struct hm_map m;
printk(KERN_INFO "mISDN: HFC-multi driver %s\n", HFC_MULTI_VERSION);
#ifdef IRQ_DEBUG
printk(KERN_DEBUG "%s: IRQ_DEBUG IS ENABLED!\n", __func__);
#endif
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: init entered\n", __func__);
switch (poll) {
case 0:
poll_timer = 6;
poll = 128;
break;
case 8:
poll_timer = 2;
break;
case 16:
poll_timer = 3;
break;
case 32:
poll_timer = 4;
break;
case 64:
poll_timer = 5;
break;
case 128:
poll_timer = 6;
break;
case 256:
poll_timer = 7;
break;
default:
printk(KERN_ERR
"%s: Wrong poll value (%d).\n", __func__, poll);
err = -EINVAL;
return err;
}
if (!clock)
clock = 1;
/* Register the embedded devices.
* This should be done before the PCI cards registration */
switch (hwid) {
case HWID_MINIP4:
xhfc = 1;
m = hfcm_map[31];
break;
case HWID_MINIP8:
xhfc = 2;
m = hfcm_map[31];
break;
case HWID_MINIP16:
xhfc = 4;
m = hfcm_map[31];
break;
default:
xhfc = 0;
}
for (i = 0; i < xhfc; ++i) {
err = hfcmulti_init(&m, NULL, NULL);
if (err) {
printk(KERN_ERR "error registering embedded driver: "
"%x\n", err);
return err;
}
HFC_cnt++;
printk(KERN_INFO "%d devices registered\n", HFC_cnt);
}
/* Register the PCI cards */
err = pci_register_driver(&hfcmultipci_driver);
if (err < 0) {
printk(KERN_ERR "error registering pci driver: %x\n", err);
return err;
}
return 0;
}
module_init(HFCmulti_init);
module_exit(HFCmulti_cleanup);
| linux-master | drivers/isdn/hardware/mISDN/hfcmulti.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* mISDNinfineon.c
* Support for cards based on following Infineon ISDN chipsets
* - ISAC + HSCX
* - IPAC and IPAC-X
* - ISAC-SX + HSCX
*
* Supported cards:
* - Dialogic Diva 2.0
* - Dialogic Diva 2.0U
* - Dialogic Diva 2.01
* - Dialogic Diva 2.02
* - Sedlbauer Speedwin
* - HST Saphir3
* - Develo (former ELSA) Microlink PCI (Quickstep 1000)
* - Develo (former ELSA) Quickstep 3000
* - Berkom Scitel BRIX Quadro
* - Dr.Neuhaus (Sagem) Niccy
*
* Author Karsten Keil <[email protected]>
*
* Copyright 2009 by Karsten Keil <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/mISDNhw.h>
#include <linux/slab.h>
#include "ipac.h"
#define INFINEON_REV "1.0"
static int inf_cnt;
static u32 debug;
static u32 irqloops = 4;
enum inf_types {
INF_NONE,
INF_DIVA20,
INF_DIVA20U,
INF_DIVA201,
INF_DIVA202,
INF_SPEEDWIN,
INF_SAPHIR3,
INF_QS1000,
INF_QS3000,
INF_NICCY,
INF_SCT_1,
INF_SCT_2,
INF_SCT_3,
INF_SCT_4,
INF_GAZEL_R685,
INF_GAZEL_R753
};
enum addr_mode {
AM_NONE = 0,
AM_IO,
AM_MEMIO,
AM_IND_IO,
};
struct inf_cinfo {
enum inf_types typ;
const char *full;
const char *name;
enum addr_mode cfg_mode;
enum addr_mode addr_mode;
u8 cfg_bar;
u8 addr_bar;
void *irqfunc;
};
struct _ioaddr {
enum addr_mode mode;
union {
void __iomem *p;
struct _ioport io;
} a;
};
struct _iohandle {
enum addr_mode mode;
resource_size_t size;
resource_size_t start;
void __iomem *p;
};
struct inf_hw {
struct list_head list;
struct pci_dev *pdev;
const struct inf_cinfo *ci;
char name[MISDN_MAX_IDLEN];
u32 irq;
u32 irqcnt;
struct _iohandle cfg;
struct _iohandle addr;
struct _ioaddr isac;
struct _ioaddr hscx;
spinlock_t lock; /* HW access lock */
struct ipac_hw ipac;
struct inf_hw *sc[3]; /* slave cards */
};
#define PCI_SUBVENDOR_HST_SAPHIR3 0x52
#define PCI_SUBVENDOR_SEDLBAUER_PCI 0x53
#define PCI_SUB_ID_SEDLBAUER 0x01
static struct pci_device_id infineon_ids[] = {
{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA20), INF_DIVA20 },
{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA20_U), INF_DIVA20U },
{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA201), INF_DIVA201 },
{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA202), INF_DIVA202 },
{ PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_100,
PCI_SUBVENDOR_SEDLBAUER_PCI, PCI_SUB_ID_SEDLBAUER, 0, 0,
INF_SPEEDWIN },
{ PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_100,
PCI_SUBVENDOR_HST_SAPHIR3, PCI_SUB_ID_SEDLBAUER, 0, 0, INF_SAPHIR3 },
{ PCI_VDEVICE(ELSA, PCI_DEVICE_ID_ELSA_MICROLINK), INF_QS1000 },
{ PCI_VDEVICE(ELSA, PCI_DEVICE_ID_ELSA_QS3000), INF_QS3000 },
{ PCI_VDEVICE(SATSAGEM, PCI_DEVICE_ID_SATSAGEM_NICCY), INF_NICCY },
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO, 0, 0,
INF_SCT_1 },
{ PCI_VDEVICE(PLX, PCI_DEVICE_ID_PLX_R685), INF_GAZEL_R685 },
{ PCI_VDEVICE(PLX, PCI_DEVICE_ID_PLX_R753), INF_GAZEL_R753 },
{ PCI_VDEVICE(PLX, PCI_DEVICE_ID_PLX_DJINN_ITOO), INF_GAZEL_R753 },
{ PCI_VDEVICE(PLX, PCI_DEVICE_ID_PLX_OLITEC), INF_GAZEL_R753 },
{ }
};
MODULE_DEVICE_TABLE(pci, infineon_ids);
/* PCI interface specific defines */
/* Diva 2.0/2.0U */
#define DIVA_HSCX_PORT 0x00
#define DIVA_HSCX_ALE 0x04
#define DIVA_ISAC_PORT 0x08
#define DIVA_ISAC_ALE 0x0C
#define DIVA_PCI_CTRL 0x10
/* DIVA_PCI_CTRL bits */
#define DIVA_IRQ_BIT 0x01
#define DIVA_RESET_BIT 0x08
#define DIVA_EEPROM_CLK 0x40
#define DIVA_LED_A 0x10
#define DIVA_LED_B 0x20
#define DIVA_IRQ_CLR 0x80
/* Diva 2.01/2.02 */
/* Siemens PITA */
#define PITA_ICR_REG 0x00
#define PITA_INT0_STATUS 0x02
#define PITA_MISC_REG 0x1c
#define PITA_PARA_SOFTRESET 0x01000000
#define PITA_SER_SOFTRESET 0x02000000
#define PITA_PARA_MPX_MODE 0x04000000
#define PITA_INT0_ENABLE 0x00020000
/* TIGER 100 Registers */
#define TIGER_RESET_ADDR 0x00
#define TIGER_EXTERN_RESET 0x01
#define TIGER_AUX_CTRL 0x02
#define TIGER_AUX_DATA 0x03
#define TIGER_AUX_IRQMASK 0x05
#define TIGER_AUX_STATUS 0x07
/* Tiger AUX BITs */
#define TIGER_IOMASK 0xdd /* 1 and 5 are inputs */
#define TIGER_IRQ_BIT 0x02
#define TIGER_IPAC_ALE 0xC0
#define TIGER_IPAC_PORT 0xC8
/* ELSA (now Develo) PCI cards */
#define ELSA_IRQ_ADDR 0x4c
#define ELSA_IRQ_MASK 0x04
#define QS1000_IRQ_OFF 0x01
#define QS3000_IRQ_OFF 0x03
#define QS1000_IRQ_ON 0x41
#define QS3000_IRQ_ON 0x43
/* Dr Neuhaus/Sagem Niccy */
#define NICCY_ISAC_PORT 0x00
#define NICCY_HSCX_PORT 0x01
#define NICCY_ISAC_ALE 0x02
#define NICCY_HSCX_ALE 0x03
#define NICCY_IRQ_CTRL_REG 0x38
#define NICCY_IRQ_ENABLE 0x001f00
#define NICCY_IRQ_DISABLE 0xff0000
#define NICCY_IRQ_BIT 0x800000
/* Scitel PLX */
#define SCT_PLX_IRQ_ADDR 0x4c
#define SCT_PLX_RESET_ADDR 0x50
#define SCT_PLX_IRQ_ENABLE 0x41
#define SCT_PLX_RESET_BIT 0x04
/* Gazel */
#define GAZEL_IPAC_DATA_PORT 0x04
/* Gazel PLX */
#define GAZEL_CNTRL 0x50
#define GAZEL_RESET 0x04
#define GAZEL_RESET_9050 0x40000000
#define GAZEL_INCSR 0x4C
#define GAZEL_ISAC_EN 0x08
#define GAZEL_INT_ISAC 0x20
#define GAZEL_HSCX_EN 0x01
#define GAZEL_INT_HSCX 0x04
#define GAZEL_PCI_EN 0x40
#define GAZEL_IPAC_EN 0x03
static LIST_HEAD(Cards);
static DEFINE_RWLOCK(card_lock); /* protect Cards */
static void
_set_debug(struct inf_hw *card)
{
card->ipac.isac.dch.debug = debug;
card->ipac.hscx[0].bch.debug = debug;
card->ipac.hscx[1].bch.debug = debug;
}
static int
set_debug(const char *val, const struct kernel_param *kp)
{
int ret;
struct inf_hw *card;
ret = param_set_uint(val, kp);
if (!ret) {
read_lock(&card_lock);
list_for_each_entry(card, &Cards, list)
_set_debug(card);
read_unlock(&card_lock);
}
return ret;
}
MODULE_AUTHOR("Karsten Keil");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(INFINEON_REV);
module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "infineon debug mask");
module_param(irqloops, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(irqloops, "infineon maximal irqloops (default 4)");
/* Interface functions */
IOFUNC_IO(ISAC, inf_hw, isac.a.io)
IOFUNC_IO(IPAC, inf_hw, hscx.a.io)
IOFUNC_IND(ISAC, inf_hw, isac.a.io)
IOFUNC_IND(IPAC, inf_hw, hscx.a.io)
IOFUNC_MEMIO(ISAC, inf_hw, u32, isac.a.p)
IOFUNC_MEMIO(IPAC, inf_hw, u32, hscx.a.p)
static irqreturn_t
diva_irq(int intno, void *dev_id)
{
struct inf_hw *hw = dev_id;
u8 val;
spin_lock(&hw->lock);
val = inb((u32)hw->cfg.start + DIVA_PCI_CTRL);
if (!(val & DIVA_IRQ_BIT)) { /* for us or shared ? */
spin_unlock(&hw->lock);
return IRQ_NONE; /* shared */
}
hw->irqcnt++;
mISDNipac_irq(&hw->ipac, irqloops);
spin_unlock(&hw->lock);
return IRQ_HANDLED;
}
static irqreturn_t
diva20x_irq(int intno, void *dev_id)
{
struct inf_hw *hw = dev_id;
u8 val;
spin_lock(&hw->lock);
val = readb(hw->cfg.p);
if (!(val & PITA_INT0_STATUS)) { /* for us or shared ? */
spin_unlock(&hw->lock);
return IRQ_NONE; /* shared */
}
hw->irqcnt++;
mISDNipac_irq(&hw->ipac, irqloops);
writeb(PITA_INT0_STATUS, hw->cfg.p); /* ACK PITA INT0 */
spin_unlock(&hw->lock);
return IRQ_HANDLED;
}
static irqreturn_t
tiger_irq(int intno, void *dev_id)
{
struct inf_hw *hw = dev_id;
u8 val;
spin_lock(&hw->lock);
val = inb((u32)hw->cfg.start + TIGER_AUX_STATUS);
if (val & TIGER_IRQ_BIT) { /* for us or shared ? */
spin_unlock(&hw->lock);
return IRQ_NONE; /* shared */
}
hw->irqcnt++;
mISDNipac_irq(&hw->ipac, irqloops);
spin_unlock(&hw->lock);
return IRQ_HANDLED;
}
static irqreturn_t
elsa_irq(int intno, void *dev_id)
{
struct inf_hw *hw = dev_id;
u8 val;
spin_lock(&hw->lock);
val = inb((u32)hw->cfg.start + ELSA_IRQ_ADDR);
if (!(val & ELSA_IRQ_MASK)) {
spin_unlock(&hw->lock);
return IRQ_NONE; /* shared */
}
hw->irqcnt++;
mISDNipac_irq(&hw->ipac, irqloops);
spin_unlock(&hw->lock);
return IRQ_HANDLED;
}
static irqreturn_t
niccy_irq(int intno, void *dev_id)
{
struct inf_hw *hw = dev_id;
u32 val;
spin_lock(&hw->lock);
val = inl((u32)hw->cfg.start + NICCY_IRQ_CTRL_REG);
if (!(val & NICCY_IRQ_BIT)) { /* for us or shared ? */
spin_unlock(&hw->lock);
return IRQ_NONE; /* shared */
}
outl(val, (u32)hw->cfg.start + NICCY_IRQ_CTRL_REG);
hw->irqcnt++;
mISDNipac_irq(&hw->ipac, irqloops);
spin_unlock(&hw->lock);
return IRQ_HANDLED;
}
static irqreturn_t
gazel_irq(int intno, void *dev_id)
{
struct inf_hw *hw = dev_id;
irqreturn_t ret;
spin_lock(&hw->lock);
ret = mISDNipac_irq(&hw->ipac, irqloops);
spin_unlock(&hw->lock);
return ret;
}
static irqreturn_t
ipac_irq(int intno, void *dev_id)
{
struct inf_hw *hw = dev_id;
u8 val;
spin_lock(&hw->lock);
val = hw->ipac.read_reg(hw, IPAC_ISTA);
if (!(val & 0x3f)) {
spin_unlock(&hw->lock);
return IRQ_NONE; /* shared */
}
hw->irqcnt++;
mISDNipac_irq(&hw->ipac, irqloops);
spin_unlock(&hw->lock);
return IRQ_HANDLED;
}
static void
enable_hwirq(struct inf_hw *hw)
{
u16 w;
u32 val;
switch (hw->ci->typ) {
case INF_DIVA201:
case INF_DIVA202:
writel(PITA_INT0_ENABLE, hw->cfg.p);
break;
case INF_SPEEDWIN:
case INF_SAPHIR3:
outb(TIGER_IRQ_BIT, (u32)hw->cfg.start + TIGER_AUX_IRQMASK);
break;
case INF_QS1000:
outb(QS1000_IRQ_ON, (u32)hw->cfg.start + ELSA_IRQ_ADDR);
break;
case INF_QS3000:
outb(QS3000_IRQ_ON, (u32)hw->cfg.start + ELSA_IRQ_ADDR);
break;
case INF_NICCY:
val = inl((u32)hw->cfg.start + NICCY_IRQ_CTRL_REG);
val |= NICCY_IRQ_ENABLE;
outl(val, (u32)hw->cfg.start + NICCY_IRQ_CTRL_REG);
break;
case INF_SCT_1:
w = inw((u32)hw->cfg.start + SCT_PLX_IRQ_ADDR);
w |= SCT_PLX_IRQ_ENABLE;
outw(w, (u32)hw->cfg.start + SCT_PLX_IRQ_ADDR);
break;
case INF_GAZEL_R685:
outb(GAZEL_ISAC_EN + GAZEL_HSCX_EN + GAZEL_PCI_EN,
(u32)hw->cfg.start + GAZEL_INCSR);
break;
case INF_GAZEL_R753:
outb(GAZEL_IPAC_EN + GAZEL_PCI_EN,
(u32)hw->cfg.start + GAZEL_INCSR);
break;
default:
break;
}
}
static void
disable_hwirq(struct inf_hw *hw)
{
u16 w;
u32 val;
switch (hw->ci->typ) {
case INF_DIVA201:
case INF_DIVA202:
writel(0, hw->cfg.p);
break;
case INF_SPEEDWIN:
case INF_SAPHIR3:
outb(0, (u32)hw->cfg.start + TIGER_AUX_IRQMASK);
break;
case INF_QS1000:
outb(QS1000_IRQ_OFF, (u32)hw->cfg.start + ELSA_IRQ_ADDR);
break;
case INF_QS3000:
outb(QS3000_IRQ_OFF, (u32)hw->cfg.start + ELSA_IRQ_ADDR);
break;
case INF_NICCY:
val = inl((u32)hw->cfg.start + NICCY_IRQ_CTRL_REG);
val &= NICCY_IRQ_DISABLE;
outl(val, (u32)hw->cfg.start + NICCY_IRQ_CTRL_REG);
break;
case INF_SCT_1:
w = inw((u32)hw->cfg.start + SCT_PLX_IRQ_ADDR);
w &= (~SCT_PLX_IRQ_ENABLE);
outw(w, (u32)hw->cfg.start + SCT_PLX_IRQ_ADDR);
break;
case INF_GAZEL_R685:
case INF_GAZEL_R753:
outb(0, (u32)hw->cfg.start + GAZEL_INCSR);
break;
default:
break;
}
}
static void
ipac_chip_reset(struct inf_hw *hw)
{
hw->ipac.write_reg(hw, IPAC_POTA2, 0x20);
mdelay(5);
hw->ipac.write_reg(hw, IPAC_POTA2, 0x00);
mdelay(5);
hw->ipac.write_reg(hw, IPAC_CONF, hw->ipac.conf);
hw->ipac.write_reg(hw, IPAC_MASK, 0xc0);
}
static void
reset_inf(struct inf_hw *hw)
{
u16 w;
u32 val;
if (debug & DEBUG_HW)
pr_notice("%s: resetting card\n", hw->name);
switch (hw->ci->typ) {
case INF_DIVA20:
case INF_DIVA20U:
outb(0, (u32)hw->cfg.start + DIVA_PCI_CTRL);
mdelay(10);
outb(DIVA_RESET_BIT, (u32)hw->cfg.start + DIVA_PCI_CTRL);
mdelay(10);
/* Workaround PCI9060 */
outb(9, (u32)hw->cfg.start + 0x69);
outb(DIVA_RESET_BIT | DIVA_LED_A,
(u32)hw->cfg.start + DIVA_PCI_CTRL);
break;
case INF_DIVA201:
writel(PITA_PARA_SOFTRESET | PITA_PARA_MPX_MODE,
hw->cfg.p + PITA_MISC_REG);
mdelay(1);
writel(PITA_PARA_MPX_MODE, hw->cfg.p + PITA_MISC_REG);
mdelay(10);
break;
case INF_DIVA202:
writel(PITA_PARA_SOFTRESET | PITA_PARA_MPX_MODE,
hw->cfg.p + PITA_MISC_REG);
mdelay(1);
writel(PITA_PARA_MPX_MODE | PITA_SER_SOFTRESET,
hw->cfg.p + PITA_MISC_REG);
mdelay(10);
break;
case INF_SPEEDWIN:
case INF_SAPHIR3:
ipac_chip_reset(hw);
hw->ipac.write_reg(hw, IPAC_ACFG, 0xff);
hw->ipac.write_reg(hw, IPAC_AOE, 0x00);
hw->ipac.write_reg(hw, IPAC_PCFG, 0x12);
break;
case INF_QS1000:
case INF_QS3000:
ipac_chip_reset(hw);
hw->ipac.write_reg(hw, IPAC_ACFG, 0x00);
hw->ipac.write_reg(hw, IPAC_AOE, 0x3c);
hw->ipac.write_reg(hw, IPAC_ATX, 0xff);
break;
case INF_NICCY:
break;
case INF_SCT_1:
w = inw((u32)hw->cfg.start + SCT_PLX_RESET_ADDR);
w &= (~SCT_PLX_RESET_BIT);
outw(w, (u32)hw->cfg.start + SCT_PLX_RESET_ADDR);
mdelay(10);
w = inw((u32)hw->cfg.start + SCT_PLX_RESET_ADDR);
w |= SCT_PLX_RESET_BIT;
outw(w, (u32)hw->cfg.start + SCT_PLX_RESET_ADDR);
mdelay(10);
break;
case INF_GAZEL_R685:
val = inl((u32)hw->cfg.start + GAZEL_CNTRL);
val |= (GAZEL_RESET_9050 + GAZEL_RESET);
outl(val, (u32)hw->cfg.start + GAZEL_CNTRL);
val &= ~(GAZEL_RESET_9050 + GAZEL_RESET);
mdelay(4);
outl(val, (u32)hw->cfg.start + GAZEL_CNTRL);
mdelay(10);
hw->ipac.isac.adf2 = 0x87;
hw->ipac.hscx[0].slot = 0x1f;
hw->ipac.hscx[1].slot = 0x23;
break;
case INF_GAZEL_R753:
val = inl((u32)hw->cfg.start + GAZEL_CNTRL);
val |= (GAZEL_RESET_9050 + GAZEL_RESET);
outl(val, (u32)hw->cfg.start + GAZEL_CNTRL);
val &= ~(GAZEL_RESET_9050 + GAZEL_RESET);
mdelay(4);
outl(val, (u32)hw->cfg.start + GAZEL_CNTRL);
mdelay(10);
ipac_chip_reset(hw);
hw->ipac.write_reg(hw, IPAC_ACFG, 0xff);
hw->ipac.write_reg(hw, IPAC_AOE, 0x00);
hw->ipac.conf = 0x01; /* IOM off */
break;
default:
return;
}
enable_hwirq(hw);
}
static int
inf_ctrl(struct inf_hw *hw, u32 cmd, u_long arg)
{
int ret = 0;
switch (cmd) {
case HW_RESET_REQ:
reset_inf(hw);
break;
default:
pr_info("%s: %s unknown command %x %lx\n",
hw->name, __func__, cmd, arg);
ret = -EINVAL;
break;
}
return ret;
}
static int
init_irq(struct inf_hw *hw)
{
int ret, cnt = 3;
u_long flags;
if (!hw->ci->irqfunc)
return -EINVAL;
ret = request_irq(hw->irq, hw->ci->irqfunc, IRQF_SHARED, hw->name, hw);
if (ret) {
pr_info("%s: couldn't get interrupt %d\n", hw->name, hw->irq);
return ret;
}
while (cnt--) {
spin_lock_irqsave(&hw->lock, flags);
reset_inf(hw);
ret = hw->ipac.init(&hw->ipac);
if (ret) {
spin_unlock_irqrestore(&hw->lock, flags);
pr_info("%s: ISAC init failed with %d\n",
hw->name, ret);
break;
}
spin_unlock_irqrestore(&hw->lock, flags);
msleep_interruptible(10);
if (debug & DEBUG_HW)
pr_notice("%s: IRQ %d count %d\n", hw->name,
hw->irq, hw->irqcnt);
if (!hw->irqcnt) {
pr_info("%s: IRQ(%d) got no requests during init %d\n",
hw->name, hw->irq, 3 - cnt);
} else
return 0;
}
free_irq(hw->irq, hw);
return -EIO;
}
static void
release_io(struct inf_hw *hw)
{
if (hw->cfg.mode) {
if (hw->cfg.mode == AM_MEMIO) {
release_mem_region(hw->cfg.start, hw->cfg.size);
if (hw->cfg.p)
iounmap(hw->cfg.p);
} else
release_region(hw->cfg.start, hw->cfg.size);
hw->cfg.mode = AM_NONE;
}
if (hw->addr.mode) {
if (hw->addr.mode == AM_MEMIO) {
release_mem_region(hw->addr.start, hw->addr.size);
if (hw->addr.p)
iounmap(hw->addr.p);
} else
release_region(hw->addr.start, hw->addr.size);
hw->addr.mode = AM_NONE;
}
}
static int
setup_io(struct inf_hw *hw)
{
int err = 0;
if (hw->ci->cfg_mode) {
hw->cfg.start = pci_resource_start(hw->pdev, hw->ci->cfg_bar);
hw->cfg.size = pci_resource_len(hw->pdev, hw->ci->cfg_bar);
if (hw->ci->cfg_mode == AM_MEMIO) {
if (!request_mem_region(hw->cfg.start, hw->cfg.size,
hw->name))
err = -EBUSY;
} else {
if (!request_region(hw->cfg.start, hw->cfg.size,
hw->name))
err = -EBUSY;
}
if (err) {
pr_info("mISDN: %s config port %lx (%lu bytes)"
"already in use\n", hw->name,
(ulong)hw->cfg.start, (ulong)hw->cfg.size);
return err;
}
hw->cfg.mode = hw->ci->cfg_mode;
if (hw->ci->cfg_mode == AM_MEMIO) {
hw->cfg.p = ioremap(hw->cfg.start, hw->cfg.size);
if (!hw->cfg.p)
return -ENOMEM;
}
if (debug & DEBUG_HW)
pr_notice("%s: IO cfg %lx (%lu bytes) mode%d\n",
hw->name, (ulong)hw->cfg.start,
(ulong)hw->cfg.size, hw->ci->cfg_mode);
}
if (hw->ci->addr_mode) {
hw->addr.start = pci_resource_start(hw->pdev, hw->ci->addr_bar);
hw->addr.size = pci_resource_len(hw->pdev, hw->ci->addr_bar);
if (hw->ci->addr_mode == AM_MEMIO) {
if (!request_mem_region(hw->addr.start, hw->addr.size,
hw->name))
err = -EBUSY;
} else {
if (!request_region(hw->addr.start, hw->addr.size,
hw->name))
err = -EBUSY;
}
if (err) {
pr_info("mISDN: %s address port %lx (%lu bytes)"
"already in use\n", hw->name,
(ulong)hw->addr.start, (ulong)hw->addr.size);
return err;
}
hw->addr.mode = hw->ci->addr_mode;
if (hw->ci->addr_mode == AM_MEMIO) {
hw->addr.p = ioremap(hw->addr.start, hw->addr.size);
if (!hw->addr.p)
return -ENOMEM;
}
if (debug & DEBUG_HW)
pr_notice("%s: IO addr %lx (%lu bytes) mode%d\n",
hw->name, (ulong)hw->addr.start,
(ulong)hw->addr.size, hw->ci->addr_mode);
}
switch (hw->ci->typ) {
case INF_DIVA20:
case INF_DIVA20U:
hw->ipac.type = IPAC_TYPE_ISAC | IPAC_TYPE_HSCX;
hw->isac.mode = hw->cfg.mode;
hw->isac.a.io.ale = (u32)hw->cfg.start + DIVA_ISAC_ALE;
hw->isac.a.io.port = (u32)hw->cfg.start + DIVA_ISAC_PORT;
hw->hscx.mode = hw->cfg.mode;
hw->hscx.a.io.ale = (u32)hw->cfg.start + DIVA_HSCX_ALE;
hw->hscx.a.io.port = (u32)hw->cfg.start + DIVA_HSCX_PORT;
break;
case INF_DIVA201:
hw->ipac.type = IPAC_TYPE_IPAC;
hw->ipac.isac.off = 0x80;
hw->isac.mode = hw->addr.mode;
hw->isac.a.p = hw->addr.p;
hw->hscx.mode = hw->addr.mode;
hw->hscx.a.p = hw->addr.p;
break;
case INF_DIVA202:
hw->ipac.type = IPAC_TYPE_IPACX;
hw->isac.mode = hw->addr.mode;
hw->isac.a.p = hw->addr.p;
hw->hscx.mode = hw->addr.mode;
hw->hscx.a.p = hw->addr.p;
break;
case INF_SPEEDWIN:
case INF_SAPHIR3:
hw->ipac.type = IPAC_TYPE_IPAC;
hw->ipac.isac.off = 0x80;
hw->isac.mode = hw->cfg.mode;
hw->isac.a.io.ale = (u32)hw->cfg.start + TIGER_IPAC_ALE;
hw->isac.a.io.port = (u32)hw->cfg.start + TIGER_IPAC_PORT;
hw->hscx.mode = hw->cfg.mode;
hw->hscx.a.io.ale = (u32)hw->cfg.start + TIGER_IPAC_ALE;
hw->hscx.a.io.port = (u32)hw->cfg.start + TIGER_IPAC_PORT;
outb(0xff, (ulong)hw->cfg.start);
mdelay(1);
outb(0x00, (ulong)hw->cfg.start);
mdelay(1);
outb(TIGER_IOMASK, (ulong)hw->cfg.start + TIGER_AUX_CTRL);
break;
case INF_QS1000:
case INF_QS3000:
hw->ipac.type = IPAC_TYPE_IPAC;
hw->ipac.isac.off = 0x80;
hw->isac.a.io.ale = (u32)hw->addr.start;
hw->isac.a.io.port = (u32)hw->addr.start + 1;
hw->isac.mode = hw->addr.mode;
hw->hscx.a.io.ale = (u32)hw->addr.start;
hw->hscx.a.io.port = (u32)hw->addr.start + 1;
hw->hscx.mode = hw->addr.mode;
break;
case INF_NICCY:
hw->ipac.type = IPAC_TYPE_ISAC | IPAC_TYPE_HSCX;
hw->isac.mode = hw->addr.mode;
hw->isac.a.io.ale = (u32)hw->addr.start + NICCY_ISAC_ALE;
hw->isac.a.io.port = (u32)hw->addr.start + NICCY_ISAC_PORT;
hw->hscx.mode = hw->addr.mode;
hw->hscx.a.io.ale = (u32)hw->addr.start + NICCY_HSCX_ALE;
hw->hscx.a.io.port = (u32)hw->addr.start + NICCY_HSCX_PORT;
break;
case INF_SCT_1:
hw->ipac.type = IPAC_TYPE_IPAC;
hw->ipac.isac.off = 0x80;
hw->isac.a.io.ale = (u32)hw->addr.start;
hw->isac.a.io.port = hw->isac.a.io.ale + 4;
hw->isac.mode = hw->addr.mode;
hw->hscx.a.io.ale = hw->isac.a.io.ale;
hw->hscx.a.io.port = hw->isac.a.io.port;
hw->hscx.mode = hw->addr.mode;
break;
case INF_SCT_2:
hw->ipac.type = IPAC_TYPE_IPAC;
hw->ipac.isac.off = 0x80;
hw->isac.a.io.ale = (u32)hw->addr.start + 0x08;
hw->isac.a.io.port = hw->isac.a.io.ale + 4;
hw->isac.mode = hw->addr.mode;
hw->hscx.a.io.ale = hw->isac.a.io.ale;
hw->hscx.a.io.port = hw->isac.a.io.port;
hw->hscx.mode = hw->addr.mode;
break;
case INF_SCT_3:
hw->ipac.type = IPAC_TYPE_IPAC;
hw->ipac.isac.off = 0x80;
hw->isac.a.io.ale = (u32)hw->addr.start + 0x10;
hw->isac.a.io.port = hw->isac.a.io.ale + 4;
hw->isac.mode = hw->addr.mode;
hw->hscx.a.io.ale = hw->isac.a.io.ale;
hw->hscx.a.io.port = hw->isac.a.io.port;
hw->hscx.mode = hw->addr.mode;
break;
case INF_SCT_4:
hw->ipac.type = IPAC_TYPE_IPAC;
hw->ipac.isac.off = 0x80;
hw->isac.a.io.ale = (u32)hw->addr.start + 0x20;
hw->isac.a.io.port = hw->isac.a.io.ale + 4;
hw->isac.mode = hw->addr.mode;
hw->hscx.a.io.ale = hw->isac.a.io.ale;
hw->hscx.a.io.port = hw->isac.a.io.port;
hw->hscx.mode = hw->addr.mode;
break;
case INF_GAZEL_R685:
hw->ipac.type = IPAC_TYPE_ISAC | IPAC_TYPE_HSCX;
hw->ipac.isac.off = 0x80;
hw->isac.mode = hw->addr.mode;
hw->isac.a.io.port = (u32)hw->addr.start;
hw->hscx.mode = hw->addr.mode;
hw->hscx.a.io.port = hw->isac.a.io.port;
break;
case INF_GAZEL_R753:
hw->ipac.type = IPAC_TYPE_IPAC;
hw->ipac.isac.off = 0x80;
hw->isac.mode = hw->addr.mode;
hw->isac.a.io.ale = (u32)hw->addr.start;
hw->isac.a.io.port = (u32)hw->addr.start + GAZEL_IPAC_DATA_PORT;
hw->hscx.mode = hw->addr.mode;
hw->hscx.a.io.ale = hw->isac.a.io.ale;
hw->hscx.a.io.port = hw->isac.a.io.port;
break;
default:
return -EINVAL;
}
switch (hw->isac.mode) {
case AM_MEMIO:
ASSIGN_FUNC_IPAC(MIO, hw->ipac);
break;
case AM_IND_IO:
ASSIGN_FUNC_IPAC(IND, hw->ipac);
break;
case AM_IO:
ASSIGN_FUNC_IPAC(IO, hw->ipac);
break;
default:
return -EINVAL;
}
return 0;
}
static void
release_card(struct inf_hw *card) {
ulong flags;
int i;
spin_lock_irqsave(&card->lock, flags);
disable_hwirq(card);
spin_unlock_irqrestore(&card->lock, flags);
card->ipac.isac.release(&card->ipac.isac);
free_irq(card->irq, card);
mISDN_unregister_device(&card->ipac.isac.dch.dev);
release_io(card);
write_lock_irqsave(&card_lock, flags);
list_del(&card->list);
write_unlock_irqrestore(&card_lock, flags);
switch (card->ci->typ) {
case INF_SCT_2:
case INF_SCT_3:
case INF_SCT_4:
break;
case INF_SCT_1:
for (i = 0; i < 3; i++) {
if (card->sc[i])
release_card(card->sc[i]);
card->sc[i] = NULL;
}
fallthrough;
default:
pci_disable_device(card->pdev);
pci_set_drvdata(card->pdev, NULL);
break;
}
kfree(card);
inf_cnt--;
}
static int
setup_instance(struct inf_hw *card)
{
int err;
ulong flags;
snprintf(card->name, MISDN_MAX_IDLEN - 1, "%s.%d", card->ci->name,
inf_cnt + 1);
write_lock_irqsave(&card_lock, flags);
list_add_tail(&card->list, &Cards);
write_unlock_irqrestore(&card_lock, flags);
_set_debug(card);
card->ipac.isac.name = card->name;
card->ipac.name = card->name;
card->ipac.owner = THIS_MODULE;
spin_lock_init(&card->lock);
card->ipac.isac.hwlock = &card->lock;
card->ipac.hwlock = &card->lock;
card->ipac.ctrl = (void *)&inf_ctrl;
err = setup_io(card);
if (err)
goto error_setup;
card->ipac.isac.dch.dev.Bprotocols =
mISDNipac_init(&card->ipac, card);
if (card->ipac.isac.dch.dev.Bprotocols == 0)
goto error_setup;
err = mISDN_register_device(&card->ipac.isac.dch.dev,
&card->pdev->dev, card->name);
if (err)
goto error;
err = init_irq(card);
if (!err) {
inf_cnt++;
pr_notice("Infineon %d cards installed\n", inf_cnt);
return 0;
}
mISDN_unregister_device(&card->ipac.isac.dch.dev);
error:
card->ipac.release(&card->ipac);
error_setup:
release_io(card);
write_lock_irqsave(&card_lock, flags);
list_del(&card->list);
write_unlock_irqrestore(&card_lock, flags);
return err;
}
static const struct inf_cinfo inf_card_info[] = {
{
INF_DIVA20,
"Dialogic Diva 2.0",
"diva20",
AM_IND_IO, AM_NONE, 2, 0,
&diva_irq
},
{
INF_DIVA20U,
"Dialogic Diva 2.0U",
"diva20U",
AM_IND_IO, AM_NONE, 2, 0,
&diva_irq
},
{
INF_DIVA201,
"Dialogic Diva 2.01",
"diva201",
AM_MEMIO, AM_MEMIO, 0, 1,
&diva20x_irq
},
{
INF_DIVA202,
"Dialogic Diva 2.02",
"diva202",
AM_MEMIO, AM_MEMIO, 0, 1,
&diva20x_irq
},
{
INF_SPEEDWIN,
"Sedlbauer SpeedWin PCI",
"speedwin",
AM_IND_IO, AM_NONE, 0, 0,
&tiger_irq
},
{
INF_SAPHIR3,
"HST Saphir 3",
"saphir",
AM_IND_IO, AM_NONE, 0, 0,
&tiger_irq
},
{
INF_QS1000,
"Develo Microlink PCI",
"qs1000",
AM_IO, AM_IND_IO, 1, 3,
&elsa_irq
},
{
INF_QS3000,
"Develo QuickStep 3000",
"qs3000",
AM_IO, AM_IND_IO, 1, 3,
&elsa_irq
},
{
INF_NICCY,
"Sagem NICCY",
"niccy",
AM_IO, AM_IND_IO, 0, 1,
&niccy_irq
},
{
INF_SCT_1,
"SciTel Quadro",
"p1_scitel",
AM_IO, AM_IND_IO, 1, 5,
&ipac_irq
},
{
INF_SCT_2,
"SciTel Quadro",
"p2_scitel",
AM_NONE, AM_IND_IO, 0, 4,
&ipac_irq
},
{
INF_SCT_3,
"SciTel Quadro",
"p3_scitel",
AM_NONE, AM_IND_IO, 0, 3,
&ipac_irq
},
{
INF_SCT_4,
"SciTel Quadro",
"p4_scitel",
AM_NONE, AM_IND_IO, 0, 2,
&ipac_irq
},
{
INF_GAZEL_R685,
"Gazel R685",
"gazel685",
AM_IO, AM_IO, 1, 2,
&gazel_irq
},
{
INF_GAZEL_R753,
"Gazel R753",
"gazel753",
AM_IO, AM_IND_IO, 1, 2,
&ipac_irq
},
{
INF_NONE,
}
};
static const struct inf_cinfo *
get_card_info(enum inf_types typ)
{
const struct inf_cinfo *ci = inf_card_info;
while (ci->typ != INF_NONE) {
if (ci->typ == typ)
return ci;
ci++;
}
return NULL;
}
static int
inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -ENOMEM;
struct inf_hw *card;
card = kzalloc(sizeof(struct inf_hw), GFP_KERNEL);
if (!card) {
pr_info("No memory for Infineon ISDN card\n");
return err;
}
card->pdev = pdev;
err = pci_enable_device(pdev);
if (err) {
kfree(card);
return err;
}
card->ci = get_card_info(ent->driver_data);
if (!card->ci) {
pr_info("mISDN: do not have information about adapter at %s\n",
pci_name(pdev));
kfree(card);
pci_disable_device(pdev);
return -EINVAL;
} else
pr_notice("mISDN: found adapter %s at %s\n",
card->ci->full, pci_name(pdev));
card->irq = pdev->irq;
pci_set_drvdata(pdev, card);
err = setup_instance(card);
if (err) {
pci_disable_device(pdev);
kfree(card);
pci_set_drvdata(pdev, NULL);
} else if (ent->driver_data == INF_SCT_1) {
int i;
struct inf_hw *sc;
for (i = 1; i < 4; i++) {
sc = kzalloc(sizeof(struct inf_hw), GFP_KERNEL);
if (!sc) {
release_card(card);
pci_disable_device(pdev);
return -ENOMEM;
}
sc->irq = card->irq;
sc->pdev = card->pdev;
sc->ci = card->ci + i;
err = setup_instance(sc);
if (err) {
pci_disable_device(pdev);
kfree(sc);
release_card(card);
break;
} else
card->sc[i - 1] = sc;
}
}
return err;
}
static void
inf_remove(struct pci_dev *pdev)
{
struct inf_hw *card = pci_get_drvdata(pdev);
if (card)
release_card(card);
else
pr_debug("%s: drvdata already removed\n", __func__);
}
static struct pci_driver infineon_driver = {
.name = "ISDN Infineon pci",
.probe = inf_probe,
.remove = inf_remove,
.id_table = infineon_ids,
};
static int __init
infineon_init(void)
{
int err;
pr_notice("Infineon ISDN Driver Rev. %s\n", INFINEON_REV);
err = pci_register_driver(&infineon_driver);
return err;
}
static void __exit
infineon_cleanup(void)
{
pci_unregister_driver(&infineon_driver);
}
module_init(infineon_init);
module_exit(infineon_cleanup);
| linux-master | drivers/isdn/hardware/mISDN/mISDNinfineon.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* w6692.c mISDN driver for Winbond w6692 based cards
*
* Author Karsten Keil <[email protected]>
* based on the w6692 I4L driver from Petr Novak <[email protected]>
*
* Copyright 2009 by Karsten Keil <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/mISDNhw.h>
#include <linux/slab.h>
#include "w6692.h"
#define W6692_REV "2.0"
#define DBUSY_TIMER_VALUE 80
enum {
W6692_ASUS,
W6692_WINBOND,
W6692_USR
};
/* private data in the PCI devices list */
struct w6692map {
u_int subtype;
char *name;
};
static const struct w6692map w6692_map[] =
{
{W6692_ASUS, "Dynalink/AsusCom IS64PH"},
{W6692_WINBOND, "Winbond W6692"},
{W6692_USR, "USR W6692"}
};
#define PCI_DEVICE_ID_USR_6692 0x3409
struct w6692_ch {
struct bchannel bch;
u32 addr;
struct timer_list timer;
u8 b_mode;
};
struct w6692_hw {
struct list_head list;
struct pci_dev *pdev;
char name[MISDN_MAX_IDLEN];
u32 irq;
u32 irqcnt;
u32 addr;
u32 fmask; /* feature mask - bit set per card nr */
int subtype;
spinlock_t lock; /* hw lock */
u8 imask;
u8 pctl;
u8 xaddr;
u8 xdata;
u8 state;
struct w6692_ch bc[2];
struct dchannel dch;
char log[64];
};
static LIST_HEAD(Cards);
static DEFINE_RWLOCK(card_lock); /* protect Cards */
static int w6692_cnt;
static int debug;
static u32 led;
static u32 pots;
static void
_set_debug(struct w6692_hw *card)
{
card->dch.debug = debug;
card->bc[0].bch.debug = debug;
card->bc[1].bch.debug = debug;
}
static int
set_debug(const char *val, const struct kernel_param *kp)
{
int ret;
struct w6692_hw *card;
ret = param_set_uint(val, kp);
if (!ret) {
read_lock(&card_lock);
list_for_each_entry(card, &Cards, list)
_set_debug(card);
read_unlock(&card_lock);
}
return ret;
}
MODULE_AUTHOR("Karsten Keil");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(W6692_REV);
module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "W6692 debug mask");
module_param(led, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(led, "W6692 LED support bitmask (one bit per card)");
module_param(pots, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(pots, "W6692 POTS support bitmask (one bit per card)");
static inline u8
ReadW6692(struct w6692_hw *card, u8 offset)
{
return inb(card->addr + offset);
}
static inline void
WriteW6692(struct w6692_hw *card, u8 offset, u8 value)
{
outb(value, card->addr + offset);
}
static inline u8
ReadW6692B(struct w6692_ch *bc, u8 offset)
{
return inb(bc->addr + offset);
}
static inline void
WriteW6692B(struct w6692_ch *bc, u8 offset, u8 value)
{
outb(value, bc->addr + offset);
}
static void
enable_hwirq(struct w6692_hw *card)
{
WriteW6692(card, W_IMASK, card->imask);
}
static void
disable_hwirq(struct w6692_hw *card)
{
WriteW6692(card, W_IMASK, 0xff);
}
static const char *W6692Ver[] = {"V00", "V01", "V10", "V11"};
static void
W6692Version(struct w6692_hw *card)
{
int val;
val = ReadW6692(card, W_D_RBCH);
pr_notice("%s: Winbond W6692 version: %s\n", card->name,
W6692Ver[(val >> 6) & 3]);
}
static void
w6692_led_handler(struct w6692_hw *card, int on)
{
if ((!(card->fmask & led)) || card->subtype == W6692_USR)
return;
if (on) {
card->xdata &= 0xfb; /* LED ON */
WriteW6692(card, W_XDATA, card->xdata);
} else {
card->xdata |= 0x04; /* LED OFF */
WriteW6692(card, W_XDATA, card->xdata);
}
}
static void
ph_command(struct w6692_hw *card, u8 cmd)
{
pr_debug("%s: ph_command %x\n", card->name, cmd);
WriteW6692(card, W_CIX, cmd);
}
static void
W6692_new_ph(struct w6692_hw *card)
{
if (card->state == W_L1CMD_RST)
ph_command(card, W_L1CMD_DRC);
schedule_event(&card->dch, FLG_PHCHANGE);
}
static void
W6692_ph_bh(struct dchannel *dch)
{
struct w6692_hw *card = dch->hw;
switch (card->state) {
case W_L1CMD_RST:
dch->state = 0;
l1_event(dch->l1, HW_RESET_IND);
break;
case W_L1IND_CD:
dch->state = 3;
l1_event(dch->l1, HW_DEACT_CNF);
break;
case W_L1IND_DRD:
dch->state = 3;
l1_event(dch->l1, HW_DEACT_IND);
break;
case W_L1IND_CE:
dch->state = 4;
l1_event(dch->l1, HW_POWERUP_IND);
break;
case W_L1IND_LD:
if (dch->state <= 5) {
dch->state = 5;
l1_event(dch->l1, ANYSIGNAL);
} else {
dch->state = 8;
l1_event(dch->l1, LOSTFRAMING);
}
break;
case W_L1IND_ARD:
dch->state = 6;
l1_event(dch->l1, INFO2);
break;
case W_L1IND_AI8:
dch->state = 7;
l1_event(dch->l1, INFO4_P8);
break;
case W_L1IND_AI10:
dch->state = 7;
l1_event(dch->l1, INFO4_P10);
break;
default:
pr_debug("%s: TE unknown state %02x dch state %02x\n",
card->name, card->state, dch->state);
break;
}
pr_debug("%s: TE newstate %02x\n", card->name, dch->state);
}
static void
W6692_empty_Dfifo(struct w6692_hw *card, int count)
{
struct dchannel *dch = &card->dch;
u8 *ptr;
pr_debug("%s: empty_Dfifo %d\n", card->name, count);
if (!dch->rx_skb) {
dch->rx_skb = mI_alloc_skb(card->dch.maxlen, GFP_ATOMIC);
if (!dch->rx_skb) {
pr_info("%s: D receive out of memory\n", card->name);
WriteW6692(card, W_D_CMDR, W_D_CMDR_RACK);
return;
}
}
if ((dch->rx_skb->len + count) >= dch->maxlen) {
pr_debug("%s: empty_Dfifo overrun %d\n", card->name,
dch->rx_skb->len + count);
WriteW6692(card, W_D_CMDR, W_D_CMDR_RACK);
return;
}
ptr = skb_put(dch->rx_skb, count);
insb(card->addr + W_D_RFIFO, ptr, count);
WriteW6692(card, W_D_CMDR, W_D_CMDR_RACK);
if (debug & DEBUG_HW_DFIFO) {
snprintf(card->log, 63, "D-recv %s %d ",
card->name, count);
print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, ptr, count);
}
}
static void
W6692_fill_Dfifo(struct w6692_hw *card)
{
struct dchannel *dch = &card->dch;
int count;
u8 *ptr;
u8 cmd = W_D_CMDR_XMS;
pr_debug("%s: fill_Dfifo\n", card->name);
if (!dch->tx_skb)
return;
count = dch->tx_skb->len - dch->tx_idx;
if (count <= 0)
return;
if (count > W_D_FIFO_THRESH)
count = W_D_FIFO_THRESH;
else
cmd |= W_D_CMDR_XME;
ptr = dch->tx_skb->data + dch->tx_idx;
dch->tx_idx += count;
outsb(card->addr + W_D_XFIFO, ptr, count);
WriteW6692(card, W_D_CMDR, cmd);
if (test_and_set_bit(FLG_BUSY_TIMER, &dch->Flags)) {
pr_debug("%s: fill_Dfifo dbusytimer running\n", card->name);
del_timer(&dch->timer);
}
dch->timer.expires = jiffies + ((DBUSY_TIMER_VALUE * HZ) / 1000);
add_timer(&dch->timer);
if (debug & DEBUG_HW_DFIFO) {
snprintf(card->log, 63, "D-send %s %d ",
card->name, count);
print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, ptr, count);
}
}
static void
d_retransmit(struct w6692_hw *card)
{
struct dchannel *dch = &card->dch;
if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
del_timer(&dch->timer);
#ifdef FIXME
if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags))
dchannel_sched_event(dch, D_CLEARBUSY);
#endif
if (test_bit(FLG_TX_BUSY, &dch->Flags)) {
/* Restart frame */
dch->tx_idx = 0;
W6692_fill_Dfifo(card);
} else if (dch->tx_skb) { /* should not happen */
pr_info("%s: %s without TX_BUSY\n", card->name, __func__);
test_and_set_bit(FLG_TX_BUSY, &dch->Flags);
dch->tx_idx = 0;
W6692_fill_Dfifo(card);
} else {
pr_info("%s: XDU no TX_BUSY\n", card->name);
if (get_next_dframe(dch))
W6692_fill_Dfifo(card);
}
}
static void
handle_rxD(struct w6692_hw *card) {
u8 stat;
int count;
stat = ReadW6692(card, W_D_RSTA);
if (stat & (W_D_RSTA_RDOV | W_D_RSTA_CRCE | W_D_RSTA_RMB)) {
if (stat & W_D_RSTA_RDOV) {
pr_debug("%s: D-channel RDOV\n", card->name);
#ifdef ERROR_STATISTIC
card->dch.err_rx++;
#endif
}
if (stat & W_D_RSTA_CRCE) {
pr_debug("%s: D-channel CRC error\n", card->name);
#ifdef ERROR_STATISTIC
card->dch.err_crc++;
#endif
}
if (stat & W_D_RSTA_RMB) {
pr_debug("%s: D-channel ABORT\n", card->name);
#ifdef ERROR_STATISTIC
card->dch.err_rx++;
#endif
}
dev_kfree_skb(card->dch.rx_skb);
card->dch.rx_skb = NULL;
WriteW6692(card, W_D_CMDR, W_D_CMDR_RACK | W_D_CMDR_RRST);
} else {
count = ReadW6692(card, W_D_RBCL) & (W_D_FIFO_THRESH - 1);
if (count == 0)
count = W_D_FIFO_THRESH;
W6692_empty_Dfifo(card, count);
recv_Dchannel(&card->dch);
}
}
static void
handle_txD(struct w6692_hw *card) {
if (test_and_clear_bit(FLG_BUSY_TIMER, &card->dch.Flags))
del_timer(&card->dch.timer);
if (card->dch.tx_skb && card->dch.tx_idx < card->dch.tx_skb->len) {
W6692_fill_Dfifo(card);
} else {
dev_kfree_skb(card->dch.tx_skb);
if (get_next_dframe(&card->dch))
W6692_fill_Dfifo(card);
}
}
static void
handle_statusD(struct w6692_hw *card)
{
struct dchannel *dch = &card->dch;
u8 exval, v1, cir;
exval = ReadW6692(card, W_D_EXIR);
pr_debug("%s: D_EXIR %02x\n", card->name, exval);
if (exval & (W_D_EXI_XDUN | W_D_EXI_XCOL)) {
/* Transmit underrun/collision */
pr_debug("%s: D-channel underrun/collision\n", card->name);
#ifdef ERROR_STATISTIC
dch->err_tx++;
#endif
d_retransmit(card);
}
if (exval & W_D_EXI_RDOV) { /* RDOV */
pr_debug("%s: D-channel RDOV\n", card->name);
WriteW6692(card, W_D_CMDR, W_D_CMDR_RRST);
}
if (exval & W_D_EXI_TIN2) /* TIN2 - never */
pr_debug("%s: spurious TIN2 interrupt\n", card->name);
if (exval & W_D_EXI_MOC) { /* MOC - not supported */
v1 = ReadW6692(card, W_MOSR);
pr_debug("%s: spurious MOC interrupt MOSR %02x\n",
card->name, v1);
}
if (exval & W_D_EXI_ISC) { /* ISC - Level1 change */
cir = ReadW6692(card, W_CIR);
pr_debug("%s: ISC CIR %02X\n", card->name, cir);
if (cir & W_CIR_ICC) {
v1 = cir & W_CIR_COD_MASK;
pr_debug("%s: ph_state_change %x -> %x\n", card->name,
dch->state, v1);
card->state = v1;
if (card->fmask & led) {
switch (v1) {
case W_L1IND_AI8:
case W_L1IND_AI10:
w6692_led_handler(card, 1);
break;
default:
w6692_led_handler(card, 0);
break;
}
}
W6692_new_ph(card);
}
if (cir & W_CIR_SCC) {
v1 = ReadW6692(card, W_SQR);
pr_debug("%s: SCC SQR %02X\n", card->name, v1);
}
}
if (exval & W_D_EXI_WEXP)
pr_debug("%s: spurious WEXP interrupt!\n", card->name);
if (exval & W_D_EXI_TEXP)
pr_debug("%s: spurious TEXP interrupt!\n", card->name);
}
static void
W6692_empty_Bfifo(struct w6692_ch *wch, int count)
{
struct w6692_hw *card = wch->bch.hw;
u8 *ptr;
int maxlen;
pr_debug("%s: empty_Bfifo %d\n", card->name, count);
if (unlikely(wch->bch.state == ISDN_P_NONE)) {
pr_debug("%s: empty_Bfifo ISDN_P_NONE\n", card->name);
WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RACT);
if (wch->bch.rx_skb)
skb_trim(wch->bch.rx_skb, 0);
return;
}
if (test_bit(FLG_RX_OFF, &wch->bch.Flags)) {
wch->bch.dropcnt += count;
WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RACT);
return;
}
maxlen = bchannel_get_rxbuf(&wch->bch, count);
if (maxlen < 0) {
WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RACT);
if (wch->bch.rx_skb)
skb_trim(wch->bch.rx_skb, 0);
pr_warn("%s.B%d: No bufferspace for %d bytes\n",
card->name, wch->bch.nr, count);
return;
}
ptr = skb_put(wch->bch.rx_skb, count);
insb(wch->addr + W_B_RFIFO, ptr, count);
WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RACT);
if (debug & DEBUG_HW_DFIFO) {
snprintf(card->log, 63, "B%1d-recv %s %d ",
wch->bch.nr, card->name, count);
print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, ptr, count);
}
}
static void
W6692_fill_Bfifo(struct w6692_ch *wch)
{
struct w6692_hw *card = wch->bch.hw;
int count, fillempty = 0;
u8 *ptr, cmd = W_B_CMDR_RACT | W_B_CMDR_XMS;
pr_debug("%s: fill Bfifo\n", card->name);
if (!wch->bch.tx_skb) {
if (!test_bit(FLG_TX_EMPTY, &wch->bch.Flags))
return;
ptr = wch->bch.fill;
count = W_B_FIFO_THRESH;
fillempty = 1;
} else {
count = wch->bch.tx_skb->len - wch->bch.tx_idx;
if (count <= 0)
return;
ptr = wch->bch.tx_skb->data + wch->bch.tx_idx;
}
if (count > W_B_FIFO_THRESH)
count = W_B_FIFO_THRESH;
else if (test_bit(FLG_HDLC, &wch->bch.Flags))
cmd |= W_B_CMDR_XME;
pr_debug("%s: fill Bfifo%d/%d\n", card->name,
count, wch->bch.tx_idx);
wch->bch.tx_idx += count;
if (fillempty) {
while (count > 0) {
outsb(wch->addr + W_B_XFIFO, ptr, MISDN_BCH_FILL_SIZE);
count -= MISDN_BCH_FILL_SIZE;
}
} else {
outsb(wch->addr + W_B_XFIFO, ptr, count);
}
WriteW6692B(wch, W_B_CMDR, cmd);
if ((debug & DEBUG_HW_BFIFO) && !fillempty) {
snprintf(card->log, 63, "B%1d-send %s %d ",
wch->bch.nr, card->name, count);
print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, ptr, count);
}
}
#if 0
static int
setvolume(struct w6692_ch *wch, int mic, struct sk_buff *skb)
{
struct w6692_hw *card = wch->bch.hw;
u16 *vol = (u16 *)skb->data;
u8 val;
if ((!(card->fmask & pots)) ||
!test_bit(FLG_TRANSPARENT, &wch->bch.Flags))
return -ENODEV;
if (skb->len < 2)
return -EINVAL;
if (*vol > 7)
return -EINVAL;
val = *vol & 7;
val = 7 - val;
if (mic) {
val <<= 3;
card->xaddr &= 0xc7;
} else {
card->xaddr &= 0xf8;
}
card->xaddr |= val;
WriteW6692(card, W_XADDR, card->xaddr);
return 0;
}
static int
enable_pots(struct w6692_ch *wch)
{
struct w6692_hw *card = wch->bch.hw;
if ((!(card->fmask & pots)) ||
!test_bit(FLG_TRANSPARENT, &wch->bch.Flags))
return -ENODEV;
wch->b_mode |= W_B_MODE_EPCM | W_B_MODE_BSW0;
WriteW6692B(wch, W_B_MODE, wch->b_mode);
WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RRST | W_B_CMDR_XRST);
card->pctl |= ((wch->bch.nr & 2) ? W_PCTL_PCX : 0);
WriteW6692(card, W_PCTL, card->pctl);
return 0;
}
#endif
static int
disable_pots(struct w6692_ch *wch)
{
struct w6692_hw *card = wch->bch.hw;
if (!(card->fmask & pots))
return -ENODEV;
wch->b_mode &= ~(W_B_MODE_EPCM | W_B_MODE_BSW0);
WriteW6692B(wch, W_B_MODE, wch->b_mode);
WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RRST | W_B_CMDR_RACT |
W_B_CMDR_XRST);
return 0;
}
static int
w6692_mode(struct w6692_ch *wch, u32 pr)
{
struct w6692_hw *card;
card = wch->bch.hw;
pr_debug("%s: B%d protocol %x-->%x\n", card->name,
wch->bch.nr, wch->bch.state, pr);
switch (pr) {
case ISDN_P_NONE:
if ((card->fmask & pots) && (wch->b_mode & W_B_MODE_EPCM))
disable_pots(wch);
wch->b_mode = 0;
mISDN_clear_bchannel(&wch->bch);
WriteW6692B(wch, W_B_MODE, wch->b_mode);
WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RRST | W_B_CMDR_XRST);
test_and_clear_bit(FLG_HDLC, &wch->bch.Flags);
test_and_clear_bit(FLG_TRANSPARENT, &wch->bch.Flags);
break;
case ISDN_P_B_RAW:
wch->b_mode = W_B_MODE_MMS;
WriteW6692B(wch, W_B_MODE, wch->b_mode);
WriteW6692B(wch, W_B_EXIM, 0);
WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RRST | W_B_CMDR_RACT |
W_B_CMDR_XRST);
test_and_set_bit(FLG_TRANSPARENT, &wch->bch.Flags);
break;
case ISDN_P_B_HDLC:
wch->b_mode = W_B_MODE_ITF;
WriteW6692B(wch, W_B_MODE, wch->b_mode);
WriteW6692B(wch, W_B_ADM1, 0xff);
WriteW6692B(wch, W_B_ADM2, 0xff);
WriteW6692B(wch, W_B_EXIM, 0);
WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RRST | W_B_CMDR_RACT |
W_B_CMDR_XRST);
test_and_set_bit(FLG_HDLC, &wch->bch.Flags);
break;
default:
pr_info("%s: protocol %x not known\n", card->name, pr);
return -ENOPROTOOPT;
}
wch->bch.state = pr;
return 0;
}
static void
send_next(struct w6692_ch *wch)
{
if (wch->bch.tx_skb && wch->bch.tx_idx < wch->bch.tx_skb->len) {
W6692_fill_Bfifo(wch);
} else {
dev_kfree_skb(wch->bch.tx_skb);
if (get_next_bframe(&wch->bch)) {
W6692_fill_Bfifo(wch);
test_and_clear_bit(FLG_TX_EMPTY, &wch->bch.Flags);
} else if (test_bit(FLG_TX_EMPTY, &wch->bch.Flags)) {
W6692_fill_Bfifo(wch);
}
}
}
static void
W6692B_interrupt(struct w6692_hw *card, int ch)
{
struct w6692_ch *wch = &card->bc[ch];
int count;
u8 stat, star = 0;
stat = ReadW6692B(wch, W_B_EXIR);
pr_debug("%s: B%d EXIR %02x\n", card->name, wch->bch.nr, stat);
if (stat & W_B_EXI_RME) {
star = ReadW6692B(wch, W_B_STAR);
if (star & (W_B_STAR_RDOV | W_B_STAR_CRCE | W_B_STAR_RMB)) {
if ((star & W_B_STAR_RDOV) &&
test_bit(FLG_ACTIVE, &wch->bch.Flags)) {
pr_debug("%s: B%d RDOV proto=%x\n", card->name,
wch->bch.nr, wch->bch.state);
#ifdef ERROR_STATISTIC
wch->bch.err_rdo++;
#endif
}
if (test_bit(FLG_HDLC, &wch->bch.Flags)) {
if (star & W_B_STAR_CRCE) {
pr_debug("%s: B%d CRC error\n",
card->name, wch->bch.nr);
#ifdef ERROR_STATISTIC
wch->bch.err_crc++;
#endif
}
if (star & W_B_STAR_RMB) {
pr_debug("%s: B%d message abort\n",
card->name, wch->bch.nr);
#ifdef ERROR_STATISTIC
wch->bch.err_inv++;
#endif
}
}
WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RACK |
W_B_CMDR_RRST | W_B_CMDR_RACT);
if (wch->bch.rx_skb)
skb_trim(wch->bch.rx_skb, 0);
} else {
count = ReadW6692B(wch, W_B_RBCL) &
(W_B_FIFO_THRESH - 1);
if (count == 0)
count = W_B_FIFO_THRESH;
W6692_empty_Bfifo(wch, count);
recv_Bchannel(&wch->bch, 0, false);
}
}
if (stat & W_B_EXI_RMR) {
if (!(stat & W_B_EXI_RME))
star = ReadW6692B(wch, W_B_STAR);
if (star & W_B_STAR_RDOV) {
pr_debug("%s: B%d RDOV proto=%x\n", card->name,
wch->bch.nr, wch->bch.state);
#ifdef ERROR_STATISTIC
wch->bch.err_rdo++;
#endif
WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RACK |
W_B_CMDR_RRST | W_B_CMDR_RACT);
} else {
W6692_empty_Bfifo(wch, W_B_FIFO_THRESH);
if (test_bit(FLG_TRANSPARENT, &wch->bch.Flags))
recv_Bchannel(&wch->bch, 0, false);
}
}
if (stat & W_B_EXI_RDOV) {
/* only if it is not handled yet */
if (!(star & W_B_STAR_RDOV)) {
pr_debug("%s: B%d RDOV IRQ proto=%x\n", card->name,
wch->bch.nr, wch->bch.state);
#ifdef ERROR_STATISTIC
wch->bch.err_rdo++;
#endif
WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RACK |
W_B_CMDR_RRST | W_B_CMDR_RACT);
}
}
if (stat & W_B_EXI_XFR) {
if (!(stat & (W_B_EXI_RME | W_B_EXI_RMR))) {
star = ReadW6692B(wch, W_B_STAR);
pr_debug("%s: B%d star %02x\n", card->name,
wch->bch.nr, star);
}
if (star & W_B_STAR_XDOW) {
pr_warn("%s: B%d XDOW proto=%x\n", card->name,
wch->bch.nr, wch->bch.state);
#ifdef ERROR_STATISTIC
wch->bch.err_xdu++;
#endif
WriteW6692B(wch, W_B_CMDR, W_B_CMDR_XRST |
W_B_CMDR_RACT);
/* resend */
if (wch->bch.tx_skb) {
if (!test_bit(FLG_TRANSPARENT, &wch->bch.Flags))
wch->bch.tx_idx = 0;
}
}
send_next(wch);
if (star & W_B_STAR_XDOW)
return; /* handle XDOW only once */
}
if (stat & W_B_EXI_XDUN) {
pr_warn("%s: B%d XDUN proto=%x\n", card->name,
wch->bch.nr, wch->bch.state);
#ifdef ERROR_STATISTIC
wch->bch.err_xdu++;
#endif
/* resend - no XRST needed */
if (wch->bch.tx_skb) {
if (!test_bit(FLG_TRANSPARENT, &wch->bch.Flags))
wch->bch.tx_idx = 0;
} else if (test_bit(FLG_FILLEMPTY, &wch->bch.Flags)) {
test_and_set_bit(FLG_TX_EMPTY, &wch->bch.Flags);
}
send_next(wch);
}
}
static irqreturn_t
w6692_irq(int intno, void *dev_id)
{
struct w6692_hw *card = dev_id;
u8 ista;
spin_lock(&card->lock);
ista = ReadW6692(card, W_ISTA);
if ((ista | card->imask) == card->imask) {
/* possible a shared IRQ reqest */
spin_unlock(&card->lock);
return IRQ_NONE;
}
card->irqcnt++;
pr_debug("%s: ista %02x\n", card->name, ista);
ista &= ~card->imask;
if (ista & W_INT_B1_EXI)
W6692B_interrupt(card, 0);
if (ista & W_INT_B2_EXI)
W6692B_interrupt(card, 1);
if (ista & W_INT_D_RME)
handle_rxD(card);
if (ista & W_INT_D_RMR)
W6692_empty_Dfifo(card, W_D_FIFO_THRESH);
if (ista & W_INT_D_XFR)
handle_txD(card);
if (ista & W_INT_D_EXI)
handle_statusD(card);
if (ista & (W_INT_XINT0 | W_INT_XINT1)) /* XINT0/1 - never */
pr_debug("%s: W6692 spurious XINT!\n", card->name);
/* End IRQ Handler */
spin_unlock(&card->lock);
return IRQ_HANDLED;
}
static void
dbusy_timer_handler(struct timer_list *t)
{
struct dchannel *dch = from_timer(dch, t, timer);
struct w6692_hw *card = dch->hw;
int rbch, star;
u_long flags;
if (test_bit(FLG_BUSY_TIMER, &dch->Flags)) {
spin_lock_irqsave(&card->lock, flags);
rbch = ReadW6692(card, W_D_RBCH);
star = ReadW6692(card, W_D_STAR);
pr_debug("%s: D-Channel Busy RBCH %02x STAR %02x\n",
card->name, rbch, star);
if (star & W_D_STAR_XBZ) /* D-Channel Busy */
test_and_set_bit(FLG_L1_BUSY, &dch->Flags);
else {
/* discard frame; reset transceiver */
test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags);
if (dch->tx_idx)
dch->tx_idx = 0;
else
pr_info("%s: W6692 D-Channel Busy no tx_idx\n",
card->name);
/* Transmitter reset */
WriteW6692(card, W_D_CMDR, W_D_CMDR_XRST);
}
spin_unlock_irqrestore(&card->lock, flags);
}
}
static void initW6692(struct w6692_hw *card)
{
u8 val;
timer_setup(&card->dch.timer, dbusy_timer_handler, 0);
w6692_mode(&card->bc[0], ISDN_P_NONE);
w6692_mode(&card->bc[1], ISDN_P_NONE);
WriteW6692(card, W_D_CTL, 0x00);
disable_hwirq(card);
WriteW6692(card, W_D_SAM, 0xff);
WriteW6692(card, W_D_TAM, 0xff);
WriteW6692(card, W_D_MODE, W_D_MODE_RACT);
card->state = W_L1CMD_RST;
ph_command(card, W_L1CMD_RST);
ph_command(card, W_L1CMD_ECK);
/* enable all IRQ but extern */
card->imask = 0x18;
WriteW6692(card, W_D_EXIM, 0x00);
WriteW6692B(&card->bc[0], W_B_EXIM, 0);
WriteW6692B(&card->bc[1], W_B_EXIM, 0);
/* Reset D-chan receiver and transmitter */
WriteW6692(card, W_D_CMDR, W_D_CMDR_RRST | W_D_CMDR_XRST);
/* Reset B-chan receiver and transmitter */
WriteW6692B(&card->bc[0], W_B_CMDR, W_B_CMDR_RRST | W_B_CMDR_XRST);
WriteW6692B(&card->bc[1], W_B_CMDR, W_B_CMDR_RRST | W_B_CMDR_XRST);
/* enable peripheral */
if (card->subtype == W6692_USR) {
/* seems that USR implemented some power control features
* Pin 79 is connected to the oscilator circuit so we
* have to handle it here
*/
card->pctl = 0x80;
card->xdata = 0;
WriteW6692(card, W_PCTL, card->pctl);
WriteW6692(card, W_XDATA, card->xdata);
} else {
card->pctl = W_PCTL_OE5 | W_PCTL_OE4 | W_PCTL_OE2 |
W_PCTL_OE1 | W_PCTL_OE0;
card->xaddr = 0x00;/* all sw off */
if (card->fmask & pots)
card->xdata |= 0x06; /* POWER UP/ LED OFF / ALAW */
if (card->fmask & led)
card->xdata |= 0x04; /* LED OFF */
if ((card->fmask & pots) || (card->fmask & led)) {
WriteW6692(card, W_PCTL, card->pctl);
WriteW6692(card, W_XADDR, card->xaddr);
WriteW6692(card, W_XDATA, card->xdata);
val = ReadW6692(card, W_XADDR);
if (debug & DEBUG_HW)
pr_notice("%s: W_XADDR=%02x\n",
card->name, val);
}
}
}
static void
reset_w6692(struct w6692_hw *card)
{
WriteW6692(card, W_D_CTL, W_D_CTL_SRST);
mdelay(10);
WriteW6692(card, W_D_CTL, 0);
}
static int
init_card(struct w6692_hw *card)
{
int cnt = 3;
u_long flags;
spin_lock_irqsave(&card->lock, flags);
disable_hwirq(card);
spin_unlock_irqrestore(&card->lock, flags);
if (request_irq(card->irq, w6692_irq, IRQF_SHARED, card->name, card)) {
pr_info("%s: couldn't get interrupt %d\n", card->name,
card->irq);
return -EIO;
}
while (cnt--) {
spin_lock_irqsave(&card->lock, flags);
initW6692(card);
enable_hwirq(card);
spin_unlock_irqrestore(&card->lock, flags);
/* Timeout 10ms */
msleep_interruptible(10);
if (debug & DEBUG_HW)
pr_notice("%s: IRQ %d count %d\n", card->name,
card->irq, card->irqcnt);
if (!card->irqcnt) {
pr_info("%s: IRQ(%d) getting no IRQs during init %d\n",
card->name, card->irq, 3 - cnt);
reset_w6692(card);
} else
return 0;
}
free_irq(card->irq, card);
return -EIO;
}
static int
w6692_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct bchannel *bch = container_of(ch, struct bchannel, ch);
struct w6692_ch *bc = container_of(bch, struct w6692_ch, bch);
struct w6692_hw *card = bch->hw;
int ret = -EINVAL;
struct mISDNhead *hh = mISDN_HEAD_P(skb);
unsigned long flags;
switch (hh->prim) {
case PH_DATA_REQ:
spin_lock_irqsave(&card->lock, flags);
ret = bchannel_senddata(bch, skb);
if (ret > 0) { /* direct TX */
ret = 0;
W6692_fill_Bfifo(bc);
}
spin_unlock_irqrestore(&card->lock, flags);
return ret;
case PH_ACTIVATE_REQ:
spin_lock_irqsave(&card->lock, flags);
if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
ret = w6692_mode(bc, ch->protocol);
else
ret = 0;
spin_unlock_irqrestore(&card->lock, flags);
if (!ret)
_queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
NULL, GFP_KERNEL);
break;
case PH_DEACTIVATE_REQ:
spin_lock_irqsave(&card->lock, flags);
mISDN_clear_bchannel(bch);
w6692_mode(bc, ISDN_P_NONE);
spin_unlock_irqrestore(&card->lock, flags);
_queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
NULL, GFP_KERNEL);
ret = 0;
break;
default:
pr_info("%s: %s unknown prim(%x,%x)\n",
card->name, __func__, hh->prim, hh->id);
ret = -EINVAL;
}
if (!ret)
dev_kfree_skb(skb);
return ret;
}
static int
channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
{
return mISDN_ctrl_bchannel(bch, cq);
}
static int
open_bchannel(struct w6692_hw *card, struct channel_req *rq)
{
struct bchannel *bch;
if (rq->adr.channel == 0 || rq->adr.channel > 2)
return -EINVAL;
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
bch = &card->bc[rq->adr.channel - 1].bch;
if (test_and_set_bit(FLG_OPEN, &bch->Flags))
return -EBUSY; /* b-channel can be only open once */
bch->ch.protocol = rq->protocol;
rq->ch = &bch->ch;
return 0;
}
static int
channel_ctrl(struct w6692_hw *card, struct mISDN_ctrl_req *cq)
{
int ret = 0;
switch (cq->op) {
case MISDN_CTRL_GETOP:
cq->op = MISDN_CTRL_L1_TIMER3;
break;
case MISDN_CTRL_L1_TIMER3:
ret = l1_event(card->dch.l1, HW_TIMER3_VALUE | (cq->p1 & 0xff));
break;
default:
pr_info("%s: unknown CTRL OP %x\n", card->name, cq->op);
ret = -EINVAL;
break;
}
return ret;
}
static int
w6692_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
{
struct bchannel *bch = container_of(ch, struct bchannel, ch);
struct w6692_ch *bc = container_of(bch, struct w6692_ch, bch);
struct w6692_hw *card = bch->hw;
int ret = -EINVAL;
u_long flags;
pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg);
switch (cmd) {
case CLOSE_CHANNEL:
test_and_clear_bit(FLG_OPEN, &bch->Flags);
cancel_work_sync(&bch->workq);
spin_lock_irqsave(&card->lock, flags);
mISDN_clear_bchannel(bch);
w6692_mode(bc, ISDN_P_NONE);
spin_unlock_irqrestore(&card->lock, flags);
ch->protocol = ISDN_P_NONE;
ch->peer = NULL;
module_put(THIS_MODULE);
ret = 0;
break;
case CONTROL_CHANNEL:
ret = channel_bctrl(bch, arg);
break;
default:
pr_info("%s: %s unknown prim(%x)\n",
card->name, __func__, cmd);
}
return ret;
}
static int
w6692_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
struct dchannel *dch = container_of(dev, struct dchannel, dev);
struct w6692_hw *card = container_of(dch, struct w6692_hw, dch);
int ret = -EINVAL;
struct mISDNhead *hh = mISDN_HEAD_P(skb);
u32 id;
u_long flags;
switch (hh->prim) {
case PH_DATA_REQ:
spin_lock_irqsave(&card->lock, flags);
ret = dchannel_senddata(dch, skb);
if (ret > 0) { /* direct TX */
id = hh->id; /* skb can be freed */
W6692_fill_Dfifo(card);
ret = 0;
spin_unlock_irqrestore(&card->lock, flags);
queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
} else
spin_unlock_irqrestore(&card->lock, flags);
return ret;
case PH_ACTIVATE_REQ:
ret = l1_event(dch->l1, hh->prim);
break;
case PH_DEACTIVATE_REQ:
test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
ret = l1_event(dch->l1, hh->prim);
break;
}
if (!ret)
dev_kfree_skb(skb);
return ret;
}
static int
w6692_l1callback(struct dchannel *dch, u32 cmd)
{
struct w6692_hw *card = container_of(dch, struct w6692_hw, dch);
u_long flags;
pr_debug("%s: cmd(%x) state(%02x)\n", card->name, cmd, card->state);
switch (cmd) {
case INFO3_P8:
spin_lock_irqsave(&card->lock, flags);
ph_command(card, W_L1CMD_AR8);
spin_unlock_irqrestore(&card->lock, flags);
break;
case INFO3_P10:
spin_lock_irqsave(&card->lock, flags);
ph_command(card, W_L1CMD_AR10);
spin_unlock_irqrestore(&card->lock, flags);
break;
case HW_RESET_REQ:
spin_lock_irqsave(&card->lock, flags);
if (card->state != W_L1IND_DRD)
ph_command(card, W_L1CMD_RST);
ph_command(card, W_L1CMD_ECK);
spin_unlock_irqrestore(&card->lock, flags);
break;
case HW_DEACT_REQ:
skb_queue_purge(&dch->squeue);
if (dch->tx_skb) {
dev_kfree_skb(dch->tx_skb);
dch->tx_skb = NULL;
}
dch->tx_idx = 0;
if (dch->rx_skb) {
dev_kfree_skb(dch->rx_skb);
dch->rx_skb = NULL;
}
test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
del_timer(&dch->timer);
break;
case HW_POWERUP_REQ:
spin_lock_irqsave(&card->lock, flags);
ph_command(card, W_L1CMD_ECK);
spin_unlock_irqrestore(&card->lock, flags);
break;
case PH_ACTIVATE_IND:
test_and_set_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
GFP_ATOMIC);
break;
case PH_DEACTIVATE_IND:
test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
GFP_ATOMIC);
break;
default:
pr_debug("%s: %s unknown command %x\n", card->name,
__func__, cmd);
return -1;
}
return 0;
}
static int
open_dchannel(struct w6692_hw *card, struct channel_req *rq, void *caller)
{
pr_debug("%s: %s dev(%d) open from %p\n", card->name, __func__,
card->dch.dev.id, caller);
if (rq->protocol != ISDN_P_TE_S0)
return -EINVAL;
if (rq->adr.channel == 1)
/* E-Channel not supported */
return -EINVAL;
rq->ch = &card->dch.dev.D;
rq->ch->protocol = rq->protocol;
if (card->dch.state == 7)
_queue_data(rq->ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
0, NULL, GFP_KERNEL);
return 0;
}
static int
w6692_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
{
struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
struct dchannel *dch = container_of(dev, struct dchannel, dev);
struct w6692_hw *card = container_of(dch, struct w6692_hw, dch);
struct channel_req *rq;
int err = 0;
pr_debug("%s: DCTRL: %x %p\n", card->name, cmd, arg);
switch (cmd) {
case OPEN_CHANNEL:
rq = arg;
if (rq->protocol == ISDN_P_TE_S0)
err = open_dchannel(card, rq, __builtin_return_address(0));
else
err = open_bchannel(card, rq);
if (err)
break;
if (!try_module_get(THIS_MODULE))
pr_info("%s: cannot get module\n", card->name);
break;
case CLOSE_CHANNEL:
pr_debug("%s: dev(%d) close from %p\n", card->name,
dch->dev.id, __builtin_return_address(0));
module_put(THIS_MODULE);
break;
case CONTROL_CHANNEL:
err = channel_ctrl(card, arg);
break;
default:
pr_debug("%s: unknown DCTRL command %x\n", card->name, cmd);
return -EINVAL;
}
return err;
}
static int
setup_w6692(struct w6692_hw *card)
{
u32 val;
if (!request_region(card->addr, 256, card->name)) {
pr_info("%s: config port %x-%x already in use\n", card->name,
card->addr, card->addr + 255);
return -EIO;
}
W6692Version(card);
card->bc[0].addr = card->addr;
card->bc[1].addr = card->addr + 0x40;
val = ReadW6692(card, W_ISTA);
if (debug & DEBUG_HW)
pr_notice("%s ISTA=%02x\n", card->name, val);
val = ReadW6692(card, W_IMASK);
if (debug & DEBUG_HW)
pr_notice("%s IMASK=%02x\n", card->name, val);
val = ReadW6692(card, W_D_EXIR);
if (debug & DEBUG_HW)
pr_notice("%s D_EXIR=%02x\n", card->name, val);
val = ReadW6692(card, W_D_EXIM);
if (debug & DEBUG_HW)
pr_notice("%s D_EXIM=%02x\n", card->name, val);
val = ReadW6692(card, W_D_RSTA);
if (debug & DEBUG_HW)
pr_notice("%s D_RSTA=%02x\n", card->name, val);
return 0;
}
static void
release_card(struct w6692_hw *card)
{
u_long flags;
spin_lock_irqsave(&card->lock, flags);
disable_hwirq(card);
w6692_mode(&card->bc[0], ISDN_P_NONE);
w6692_mode(&card->bc[1], ISDN_P_NONE);
if ((card->fmask & led) || card->subtype == W6692_USR) {
card->xdata |= 0x04; /* LED OFF */
WriteW6692(card, W_XDATA, card->xdata);
}
spin_unlock_irqrestore(&card->lock, flags);
free_irq(card->irq, card);
l1_event(card->dch.l1, CLOSE_CHANNEL);
mISDN_unregister_device(&card->dch.dev);
release_region(card->addr, 256);
mISDN_freebchannel(&card->bc[1].bch);
mISDN_freebchannel(&card->bc[0].bch);
mISDN_freedchannel(&card->dch);
write_lock_irqsave(&card_lock, flags);
list_del(&card->list);
write_unlock_irqrestore(&card_lock, flags);
pci_disable_device(card->pdev);
pci_set_drvdata(card->pdev, NULL);
kfree(card);
}
static int
setup_instance(struct w6692_hw *card)
{
int i, err;
u_long flags;
snprintf(card->name, MISDN_MAX_IDLEN - 1, "w6692.%d", w6692_cnt + 1);
write_lock_irqsave(&card_lock, flags);
list_add_tail(&card->list, &Cards);
write_unlock_irqrestore(&card_lock, flags);
card->fmask = (1 << w6692_cnt);
_set_debug(card);
spin_lock_init(&card->lock);
mISDN_initdchannel(&card->dch, MAX_DFRAME_LEN_L1, W6692_ph_bh);
card->dch.dev.Dprotocols = (1 << ISDN_P_TE_S0);
card->dch.dev.D.send = w6692_l2l1D;
card->dch.dev.D.ctrl = w6692_dctrl;
card->dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
(1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
card->dch.hw = card;
card->dch.dev.nrbchan = 2;
for (i = 0; i < 2; i++) {
mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM,
W_B_FIFO_THRESH);
card->bc[i].bch.hw = card;
card->bc[i].bch.nr = i + 1;
card->bc[i].bch.ch.nr = i + 1;
card->bc[i].bch.ch.send = w6692_l2l1B;
card->bc[i].bch.ch.ctrl = w6692_bctrl;
set_channelmap(i + 1, card->dch.dev.channelmap);
list_add(&card->bc[i].bch.ch.list, &card->dch.dev.bchannels);
}
err = setup_w6692(card);
if (err)
goto error_setup;
err = mISDN_register_device(&card->dch.dev, &card->pdev->dev,
card->name);
if (err)
goto error_reg;
err = init_card(card);
if (err)
goto error_init;
err = create_l1(&card->dch, w6692_l1callback);
if (!err) {
w6692_cnt++;
pr_notice("W6692 %d cards installed\n", w6692_cnt);
return 0;
}
free_irq(card->irq, card);
error_init:
mISDN_unregister_device(&card->dch.dev);
error_reg:
release_region(card->addr, 256);
error_setup:
mISDN_freebchannel(&card->bc[1].bch);
mISDN_freebchannel(&card->bc[0].bch);
mISDN_freedchannel(&card->dch);
write_lock_irqsave(&card_lock, flags);
list_del(&card->list);
write_unlock_irqrestore(&card_lock, flags);
kfree(card);
return err;
}
static int
w6692_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -ENOMEM;
struct w6692_hw *card;
struct w6692map *m = (struct w6692map *)ent->driver_data;
card = kzalloc(sizeof(struct w6692_hw), GFP_KERNEL);
if (!card) {
pr_info("No kmem for w6692 card\n");
return err;
}
card->pdev = pdev;
card->subtype = m->subtype;
err = pci_enable_device(pdev);
if (err) {
kfree(card);
return err;
}
printk(KERN_INFO "mISDN_w6692: found adapter %s at %s\n",
m->name, pci_name(pdev));
card->addr = pci_resource_start(pdev, 1);
card->irq = pdev->irq;
pci_set_drvdata(pdev, card);
err = setup_instance(card);
if (err)
pci_set_drvdata(pdev, NULL);
return err;
}
static void
w6692_remove_pci(struct pci_dev *pdev)
{
struct w6692_hw *card = pci_get_drvdata(pdev);
if (card)
release_card(card);
else
if (debug)
pr_notice("%s: drvdata already removed\n", __func__);
}
static const struct pci_device_id w6692_ids[] = {
{ PCI_VENDOR_ID_DYNALINK, PCI_DEVICE_ID_DYNALINK_IS64PH,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, (ulong)&w6692_map[0]},
{ PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_6692,
PCI_VENDOR_ID_USR, PCI_DEVICE_ID_USR_6692, 0, 0,
(ulong)&w6692_map[2]},
{ PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_6692,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, (ulong)&w6692_map[1]},
{ }
};
MODULE_DEVICE_TABLE(pci, w6692_ids);
static struct pci_driver w6692_driver = {
.name = "w6692",
.probe = w6692_probe,
.remove = w6692_remove_pci,
.id_table = w6692_ids,
};
static int __init w6692_init(void)
{
int err;
pr_notice("Winbond W6692 PCI driver Rev. %s\n", W6692_REV);
err = pci_register_driver(&w6692_driver);
return err;
}
static void __exit w6692_cleanup(void)
{
pci_unregister_driver(&w6692_driver);
}
module_init(w6692_init);
module_exit(w6692_cleanup);
| linux-master | drivers/isdn/hardware/mISDN/w6692.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* hfcsusb.c
* mISDN driver for Colognechip HFC-S USB chip
*
* Copyright 2001 by Peter Sprenger ([email protected])
* Copyright 2008 by Martin Bachem ([email protected])
*
* module params
* debug=<n>, default=0, with n=0xHHHHGGGG
* H - l1 driver flags described in hfcsusb.h
* G - common mISDN debug flags described at mISDNhw.h
*
* poll=<n>, default 128
* n : burst size of PH_DATA_IND at transparent rx data
*
* Revision: 0.3.3 (socket), 2008-11-05
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/usb.h>
#include <linux/mISDNhw.h>
#include <linux/slab.h>
#include "hfcsusb.h"
static unsigned int debug;
static int poll = DEFAULT_TRANSP_BURST_SZ;
static LIST_HEAD(HFClist);
static DEFINE_RWLOCK(HFClock);
MODULE_AUTHOR("Martin Bachem");
MODULE_LICENSE("GPL");
module_param(debug, uint, S_IRUGO | S_IWUSR);
module_param(poll, int, 0);
static int hfcsusb_cnt;
/* some function prototypes */
static void hfcsusb_ph_command(struct hfcsusb *hw, u_char command);
static void release_hw(struct hfcsusb *hw);
static void reset_hfcsusb(struct hfcsusb *hw);
static void setPortMode(struct hfcsusb *hw);
static void hfcsusb_start_endpoint(struct hfcsusb *hw, int channel);
static void hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel);
static int hfcsusb_setup_bch(struct bchannel *bch, int protocol);
static void deactivate_bchannel(struct bchannel *bch);
static int hfcsusb_ph_info(struct hfcsusb *hw);
/* start next background transfer for control channel */
static void
ctrl_start_transfer(struct hfcsusb *hw)
{
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
if (hw->ctrl_cnt) {
hw->ctrl_urb->pipe = hw->ctrl_out_pipe;
hw->ctrl_urb->setup_packet = (u_char *)&hw->ctrl_write;
hw->ctrl_urb->transfer_buffer = NULL;
hw->ctrl_urb->transfer_buffer_length = 0;
hw->ctrl_write.wIndex =
cpu_to_le16(hw->ctrl_buff[hw->ctrl_out_idx].hfcs_reg);
hw->ctrl_write.wValue =
cpu_to_le16(hw->ctrl_buff[hw->ctrl_out_idx].reg_val);
usb_submit_urb(hw->ctrl_urb, GFP_ATOMIC);
}
}
/*
* queue a control transfer request to write HFC-S USB
* chip register using CTRL resuest queue
*/
static int write_reg(struct hfcsusb *hw, __u8 reg, __u8 val)
{
struct ctrl_buf *buf;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s reg(0x%02x) val(0x%02x)\n",
hw->name, __func__, reg, val);
spin_lock(&hw->ctrl_lock);
if (hw->ctrl_cnt >= HFC_CTRL_BUFSIZE) {
spin_unlock(&hw->ctrl_lock);
return 1;
}
buf = &hw->ctrl_buff[hw->ctrl_in_idx];
buf->hfcs_reg = reg;
buf->reg_val = val;
if (++hw->ctrl_in_idx >= HFC_CTRL_BUFSIZE)
hw->ctrl_in_idx = 0;
if (++hw->ctrl_cnt == 1)
ctrl_start_transfer(hw);
spin_unlock(&hw->ctrl_lock);
return 0;
}
/* control completion routine handling background control cmds */
static void
ctrl_complete(struct urb *urb)
{
struct hfcsusb *hw = (struct hfcsusb *) urb->context;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
urb->dev = hw->dev;
if (hw->ctrl_cnt) {
hw->ctrl_cnt--; /* decrement actual count */
if (++hw->ctrl_out_idx >= HFC_CTRL_BUFSIZE)
hw->ctrl_out_idx = 0; /* pointer wrap */
ctrl_start_transfer(hw); /* start next transfer */
}
}
/* handle LED bits */
static void
set_led_bit(struct hfcsusb *hw, signed short led_bits, int set_on)
{
if (set_on) {
if (led_bits < 0)
hw->led_state &= ~abs(led_bits);
else
hw->led_state |= led_bits;
} else {
if (led_bits < 0)
hw->led_state |= abs(led_bits);
else
hw->led_state &= ~led_bits;
}
}
/* handle LED requests */
static void
handle_led(struct hfcsusb *hw, int event)
{
struct hfcsusb_vdata *driver_info = (struct hfcsusb_vdata *)
hfcsusb_idtab[hw->vend_idx].driver_info;
__u8 tmpled;
if (driver_info->led_scheme == LED_OFF)
return;
tmpled = hw->led_state;
switch (event) {
case LED_POWER_ON:
set_led_bit(hw, driver_info->led_bits[0], 1);
set_led_bit(hw, driver_info->led_bits[1], 0);
set_led_bit(hw, driver_info->led_bits[2], 0);
set_led_bit(hw, driver_info->led_bits[3], 0);
break;
case LED_POWER_OFF:
set_led_bit(hw, driver_info->led_bits[0], 0);
set_led_bit(hw, driver_info->led_bits[1], 0);
set_led_bit(hw, driver_info->led_bits[2], 0);
set_led_bit(hw, driver_info->led_bits[3], 0);
break;
case LED_S0_ON:
set_led_bit(hw, driver_info->led_bits[1], 1);
break;
case LED_S0_OFF:
set_led_bit(hw, driver_info->led_bits[1], 0);
break;
case LED_B1_ON:
set_led_bit(hw, driver_info->led_bits[2], 1);
break;
case LED_B1_OFF:
set_led_bit(hw, driver_info->led_bits[2], 0);
break;
case LED_B2_ON:
set_led_bit(hw, driver_info->led_bits[3], 1);
break;
case LED_B2_OFF:
set_led_bit(hw, driver_info->led_bits[3], 0);
break;
}
if (hw->led_state != tmpled) {
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s reg(0x%02x) val(x%02x)\n",
hw->name, __func__,
HFCUSB_P_DATA, hw->led_state);
write_reg(hw, HFCUSB_P_DATA, hw->led_state);
}
}
/*
* Layer2 -> Layer 1 Bchannel data
*/
static int
hfcusb_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct bchannel *bch = container_of(ch, struct bchannel, ch);
struct hfcsusb *hw = bch->hw;
int ret = -EINVAL;
struct mISDNhead *hh = mISDN_HEAD_P(skb);
u_long flags;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
switch (hh->prim) {
case PH_DATA_REQ:
spin_lock_irqsave(&hw->lock, flags);
ret = bchannel_senddata(bch, skb);
spin_unlock_irqrestore(&hw->lock, flags);
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s PH_DATA_REQ ret(%i)\n",
hw->name, __func__, ret);
if (ret > 0)
ret = 0;
return ret;
case PH_ACTIVATE_REQ:
if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags)) {
hfcsusb_start_endpoint(hw, bch->nr - 1);
ret = hfcsusb_setup_bch(bch, ch->protocol);
} else
ret = 0;
if (!ret)
_queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
0, NULL, GFP_KERNEL);
break;
case PH_DEACTIVATE_REQ:
deactivate_bchannel(bch);
_queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY,
0, NULL, GFP_KERNEL);
ret = 0;
break;
}
if (!ret)
dev_kfree_skb(skb);
return ret;
}
/*
* send full D/B channel status information
* as MPH_INFORMATION_IND
*/
static int
hfcsusb_ph_info(struct hfcsusb *hw)
{
struct ph_info *phi;
struct dchannel *dch = &hw->dch;
int i;
phi = kzalloc(struct_size(phi, bch, dch->dev.nrbchan), GFP_ATOMIC);
if (!phi)
return -ENOMEM;
phi->dch.ch.protocol = hw->protocol;
phi->dch.ch.Flags = dch->Flags;
phi->dch.state = dch->state;
phi->dch.num_bch = dch->dev.nrbchan;
for (i = 0; i < dch->dev.nrbchan; i++) {
phi->bch[i].protocol = hw->bch[i].ch.protocol;
phi->bch[i].Flags = hw->bch[i].Flags;
}
_queue_data(&dch->dev.D, MPH_INFORMATION_IND, MISDN_ID_ANY,
struct_size(phi, bch, dch->dev.nrbchan), phi, GFP_ATOMIC);
kfree(phi);
return 0;
}
/*
* Layer2 -> Layer 1 Dchannel data
*/
static int
hfcusb_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
struct dchannel *dch = container_of(dev, struct dchannel, dev);
struct mISDNhead *hh = mISDN_HEAD_P(skb);
struct hfcsusb *hw = dch->hw;
int ret = -EINVAL;
u_long flags;
switch (hh->prim) {
case PH_DATA_REQ:
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s: PH_DATA_REQ\n",
hw->name, __func__);
spin_lock_irqsave(&hw->lock, flags);
ret = dchannel_senddata(dch, skb);
spin_unlock_irqrestore(&hw->lock, flags);
if (ret > 0) {
ret = 0;
queue_ch_frame(ch, PH_DATA_CNF, hh->id, NULL);
}
break;
case PH_ACTIVATE_REQ:
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s: PH_ACTIVATE_REQ %s\n",
hw->name, __func__,
(hw->protocol == ISDN_P_NT_S0) ? "NT" : "TE");
if (hw->protocol == ISDN_P_NT_S0) {
ret = 0;
if (test_bit(FLG_ACTIVE, &dch->Flags)) {
_queue_data(&dch->dev.D,
PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
NULL, GFP_ATOMIC);
} else {
hfcsusb_ph_command(hw,
HFC_L1_ACTIVATE_NT);
test_and_set_bit(FLG_L2_ACTIVATED,
&dch->Flags);
}
} else {
hfcsusb_ph_command(hw, HFC_L1_ACTIVATE_TE);
ret = l1_event(dch->l1, hh->prim);
}
break;
case PH_DEACTIVATE_REQ:
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s: PH_DEACTIVATE_REQ\n",
hw->name, __func__);
test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
if (hw->protocol == ISDN_P_NT_S0) {
struct sk_buff_head free_queue;
__skb_queue_head_init(&free_queue);
hfcsusb_ph_command(hw, HFC_L1_DEACTIVATE_NT);
spin_lock_irqsave(&hw->lock, flags);
skb_queue_splice_init(&dch->squeue, &free_queue);
if (dch->tx_skb) {
__skb_queue_tail(&free_queue, dch->tx_skb);
dch->tx_skb = NULL;
}
dch->tx_idx = 0;
if (dch->rx_skb) {
__skb_queue_tail(&free_queue, dch->rx_skb);
dch->rx_skb = NULL;
}
test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
spin_unlock_irqrestore(&hw->lock, flags);
__skb_queue_purge(&free_queue);
#ifdef FIXME
if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags))
dchannel_sched_event(&hc->dch, D_CLEARBUSY);
#endif
ret = 0;
} else
ret = l1_event(dch->l1, hh->prim);
break;
case MPH_INFORMATION_REQ:
ret = hfcsusb_ph_info(hw);
break;
}
return ret;
}
/*
* Layer 1 callback function
*/
static int
hfc_l1callback(struct dchannel *dch, u_int cmd)
{
struct hfcsusb *hw = dch->hw;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s cmd 0x%x\n",
hw->name, __func__, cmd);
switch (cmd) {
case INFO3_P8:
case INFO3_P10:
case HW_RESET_REQ:
case HW_POWERUP_REQ:
break;
case HW_DEACT_REQ:
skb_queue_purge(&dch->squeue);
if (dch->tx_skb) {
dev_kfree_skb(dch->tx_skb);
dch->tx_skb = NULL;
}
dch->tx_idx = 0;
if (dch->rx_skb) {
dev_kfree_skb(dch->rx_skb);
dch->rx_skb = NULL;
}
test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
break;
case PH_ACTIVATE_IND:
test_and_set_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
GFP_ATOMIC);
break;
case PH_DEACTIVATE_IND:
test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
GFP_ATOMIC);
break;
default:
if (dch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s: unknown cmd %x\n",
hw->name, __func__, cmd);
return -1;
}
return hfcsusb_ph_info(hw);
}
static int
open_dchannel(struct hfcsusb *hw, struct mISDNchannel *ch,
struct channel_req *rq)
{
int err = 0;
if (debug & DEBUG_HW_OPEN)
printk(KERN_DEBUG "%s: %s: dev(%d) open addr(%i) from %p\n",
hw->name, __func__, hw->dch.dev.id, rq->adr.channel,
__builtin_return_address(0));
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
test_and_clear_bit(FLG_ACTIVE, &hw->dch.Flags);
test_and_clear_bit(FLG_ACTIVE, &hw->ech.Flags);
hfcsusb_start_endpoint(hw, HFC_CHAN_D);
/* E-Channel logging */
if (rq->adr.channel == 1) {
if (hw->fifos[HFCUSB_PCM_RX].pipe) {
hfcsusb_start_endpoint(hw, HFC_CHAN_E);
set_bit(FLG_ACTIVE, &hw->ech.Flags);
_queue_data(&hw->ech.dev.D, PH_ACTIVATE_IND,
MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
} else
return -EINVAL;
}
if (!hw->initdone) {
hw->protocol = rq->protocol;
if (rq->protocol == ISDN_P_TE_S0) {
err = create_l1(&hw->dch, hfc_l1callback);
if (err)
return err;
}
setPortMode(hw);
ch->protocol = rq->protocol;
hw->initdone = 1;
} else {
if (rq->protocol != ch->protocol)
return -EPROTONOSUPPORT;
}
if (((ch->protocol == ISDN_P_NT_S0) && (hw->dch.state == 3)) ||
((ch->protocol == ISDN_P_TE_S0) && (hw->dch.state == 7)))
_queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
0, NULL, GFP_KERNEL);
rq->ch = ch;
if (!try_module_get(THIS_MODULE))
printk(KERN_WARNING "%s: %s: cannot get module\n",
hw->name, __func__);
return 0;
}
static int
open_bchannel(struct hfcsusb *hw, struct channel_req *rq)
{
struct bchannel *bch;
if (rq->adr.channel == 0 || rq->adr.channel > 2)
return -EINVAL;
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s B%i\n",
hw->name, __func__, rq->adr.channel);
bch = &hw->bch[rq->adr.channel - 1];
if (test_and_set_bit(FLG_OPEN, &bch->Flags))
return -EBUSY; /* b-channel can be only open once */
bch->ch.protocol = rq->protocol;
rq->ch = &bch->ch;
if (!try_module_get(THIS_MODULE))
printk(KERN_WARNING "%s: %s:cannot get module\n",
hw->name, __func__);
return 0;
}
static int
channel_ctrl(struct hfcsusb *hw, struct mISDN_ctrl_req *cq)
{
int ret = 0;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s op(0x%x) channel(0x%x)\n",
hw->name, __func__, (cq->op), (cq->channel));
switch (cq->op) {
case MISDN_CTRL_GETOP:
cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT |
MISDN_CTRL_DISCONNECT;
break;
default:
printk(KERN_WARNING "%s: %s: unknown Op %x\n",
hw->name, __func__, cq->op);
ret = -EINVAL;
break;
}
return ret;
}
/*
* device control function
*/
static int
hfc_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
{
struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
struct dchannel *dch = container_of(dev, struct dchannel, dev);
struct hfcsusb *hw = dch->hw;
struct channel_req *rq;
int err = 0;
if (dch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s: cmd:%x %p\n",
hw->name, __func__, cmd, arg);
switch (cmd) {
case OPEN_CHANNEL:
rq = arg;
if ((rq->protocol == ISDN_P_TE_S0) ||
(rq->protocol == ISDN_P_NT_S0))
err = open_dchannel(hw, ch, rq);
else
err = open_bchannel(hw, rq);
if (!err)
hw->open++;
break;
case CLOSE_CHANNEL:
hw->open--;
if (debug & DEBUG_HW_OPEN)
printk(KERN_DEBUG
"%s: %s: dev(%d) close from %p (open %d)\n",
hw->name, __func__, hw->dch.dev.id,
__builtin_return_address(0), hw->open);
if (!hw->open) {
hfcsusb_stop_endpoint(hw, HFC_CHAN_D);
if (hw->fifos[HFCUSB_PCM_RX].pipe)
hfcsusb_stop_endpoint(hw, HFC_CHAN_E);
handle_led(hw, LED_POWER_ON);
}
module_put(THIS_MODULE);
break;
case CONTROL_CHANNEL:
err = channel_ctrl(hw, arg);
break;
default:
if (dch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s: unknown command %x\n",
hw->name, __func__, cmd);
return -EINVAL;
}
return err;
}
/*
* S0 TE state change event handler
*/
static void
ph_state_te(struct dchannel *dch)
{
struct hfcsusb *hw = dch->hw;
if (debug & DEBUG_HW) {
if (dch->state <= HFC_MAX_TE_LAYER1_STATE)
printk(KERN_DEBUG "%s: %s: %s\n", hw->name, __func__,
HFC_TE_LAYER1_STATES[dch->state]);
else
printk(KERN_DEBUG "%s: %s: TE F%d\n",
hw->name, __func__, dch->state);
}
switch (dch->state) {
case 0:
l1_event(dch->l1, HW_RESET_IND);
break;
case 3:
l1_event(dch->l1, HW_DEACT_IND);
break;
case 5:
case 8:
l1_event(dch->l1, ANYSIGNAL);
break;
case 6:
l1_event(dch->l1, INFO2);
break;
case 7:
l1_event(dch->l1, INFO4_P8);
break;
}
if (dch->state == 7)
handle_led(hw, LED_S0_ON);
else
handle_led(hw, LED_S0_OFF);
}
/*
* S0 NT state change event handler
*/
static void
ph_state_nt(struct dchannel *dch)
{
struct hfcsusb *hw = dch->hw;
if (debug & DEBUG_HW) {
if (dch->state <= HFC_MAX_NT_LAYER1_STATE)
printk(KERN_DEBUG "%s: %s: %s\n",
hw->name, __func__,
HFC_NT_LAYER1_STATES[dch->state]);
else
printk(KERN_INFO DRIVER_NAME "%s: %s: NT G%d\n",
hw->name, __func__, dch->state);
}
switch (dch->state) {
case (1):
test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
hw->nt_timer = 0;
hw->timers &= ~NT_ACTIVATION_TIMER;
handle_led(hw, LED_S0_OFF);
break;
case (2):
if (hw->nt_timer < 0) {
hw->nt_timer = 0;
hw->timers &= ~NT_ACTIVATION_TIMER;
hfcsusb_ph_command(dch->hw, HFC_L1_DEACTIVATE_NT);
} else {
hw->timers |= NT_ACTIVATION_TIMER;
hw->nt_timer = NT_T1_COUNT;
/* allow G2 -> G3 transition */
write_reg(hw, HFCUSB_STATES, 2 | HFCUSB_NT_G2_G3);
}
break;
case (3):
hw->nt_timer = 0;
hw->timers &= ~NT_ACTIVATION_TIMER;
test_and_set_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, PH_ACTIVATE_IND,
MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
handle_led(hw, LED_S0_ON);
break;
case (4):
hw->nt_timer = 0;
hw->timers &= ~NT_ACTIVATION_TIMER;
break;
default:
break;
}
hfcsusb_ph_info(hw);
}
static void
ph_state(struct dchannel *dch)
{
struct hfcsusb *hw = dch->hw;
if (hw->protocol == ISDN_P_NT_S0)
ph_state_nt(dch);
else if (hw->protocol == ISDN_P_TE_S0)
ph_state_te(dch);
}
/*
* disable/enable BChannel for desired protocoll
*/
static int
hfcsusb_setup_bch(struct bchannel *bch, int protocol)
{
struct hfcsusb *hw = bch->hw;
__u8 conhdlc, sctrl, sctrl_r;
if (debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s: protocol %x-->%x B%d\n",
hw->name, __func__, bch->state, protocol,
bch->nr);
/* setup val for CON_HDLC */
conhdlc = 0;
if (protocol > ISDN_P_NONE)
conhdlc = 8; /* enable FIFO */
switch (protocol) {
case (-1): /* used for init */
bch->state = -1;
fallthrough;
case (ISDN_P_NONE):
if (bch->state == ISDN_P_NONE)
return 0; /* already in idle state */
bch->state = ISDN_P_NONE;
clear_bit(FLG_HDLC, &bch->Flags);
clear_bit(FLG_TRANSPARENT, &bch->Flags);
break;
case (ISDN_P_B_RAW):
conhdlc |= 2;
bch->state = protocol;
set_bit(FLG_TRANSPARENT, &bch->Flags);
break;
case (ISDN_P_B_HDLC):
bch->state = protocol;
set_bit(FLG_HDLC, &bch->Flags);
break;
default:
if (debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s: prot not known %x\n",
hw->name, __func__, protocol);
return -ENOPROTOOPT;
}
if (protocol >= ISDN_P_NONE) {
write_reg(hw, HFCUSB_FIFO, (bch->nr == 1) ? 0 : 2);
write_reg(hw, HFCUSB_CON_HDLC, conhdlc);
write_reg(hw, HFCUSB_INC_RES_F, 2);
write_reg(hw, HFCUSB_FIFO, (bch->nr == 1) ? 1 : 3);
write_reg(hw, HFCUSB_CON_HDLC, conhdlc);
write_reg(hw, HFCUSB_INC_RES_F, 2);
sctrl = 0x40 + ((hw->protocol == ISDN_P_TE_S0) ? 0x00 : 0x04);
sctrl_r = 0x0;
if (test_bit(FLG_ACTIVE, &hw->bch[0].Flags)) {
sctrl |= 1;
sctrl_r |= 1;
}
if (test_bit(FLG_ACTIVE, &hw->bch[1].Flags)) {
sctrl |= 2;
sctrl_r |= 2;
}
write_reg(hw, HFCUSB_SCTRL, sctrl);
write_reg(hw, HFCUSB_SCTRL_R, sctrl_r);
if (protocol > ISDN_P_NONE)
handle_led(hw, (bch->nr == 1) ? LED_B1_ON : LED_B2_ON);
else
handle_led(hw, (bch->nr == 1) ? LED_B1_OFF :
LED_B2_OFF);
}
return hfcsusb_ph_info(hw);
}
static void
hfcsusb_ph_command(struct hfcsusb *hw, u_char command)
{
if (debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s: %x\n",
hw->name, __func__, command);
switch (command) {
case HFC_L1_ACTIVATE_TE:
/* force sending sending INFO1 */
write_reg(hw, HFCUSB_STATES, 0x14);
/* start l1 activation */
write_reg(hw, HFCUSB_STATES, 0x04);
break;
case HFC_L1_FORCE_DEACTIVATE_TE:
write_reg(hw, HFCUSB_STATES, 0x10);
write_reg(hw, HFCUSB_STATES, 0x03);
break;
case HFC_L1_ACTIVATE_NT:
if (hw->dch.state == 3)
_queue_data(&hw->dch.dev.D, PH_ACTIVATE_IND,
MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
else
write_reg(hw, HFCUSB_STATES, HFCUSB_ACTIVATE |
HFCUSB_DO_ACTION | HFCUSB_NT_G2_G3);
break;
case HFC_L1_DEACTIVATE_NT:
write_reg(hw, HFCUSB_STATES,
HFCUSB_DO_ACTION);
break;
}
}
/*
* Layer 1 B-channel hardware access
*/
static int
channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
{
return mISDN_ctrl_bchannel(bch, cq);
}
/* collect data from incoming interrupt or isochron USB data */
static void
hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
int finish)
{
struct hfcsusb *hw = fifo->hw;
struct sk_buff *rx_skb = NULL;
int maxlen = 0;
int fifon = fifo->fifonum;
int i;
int hdlc = 0;
unsigned long flags;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s: fifo(%i) len(%i) "
"dch(%p) bch(%p) ech(%p)\n",
hw->name, __func__, fifon, len,
fifo->dch, fifo->bch, fifo->ech);
if (!len)
return;
if ((!!fifo->dch + !!fifo->bch + !!fifo->ech) != 1) {
printk(KERN_DEBUG "%s: %s: undefined channel\n",
hw->name, __func__);
return;
}
spin_lock_irqsave(&hw->lock, flags);
if (fifo->dch) {
rx_skb = fifo->dch->rx_skb;
maxlen = fifo->dch->maxlen;
hdlc = 1;
}
if (fifo->bch) {
if (test_bit(FLG_RX_OFF, &fifo->bch->Flags)) {
fifo->bch->dropcnt += len;
spin_unlock_irqrestore(&hw->lock, flags);
return;
}
maxlen = bchannel_get_rxbuf(fifo->bch, len);
rx_skb = fifo->bch->rx_skb;
if (maxlen < 0) {
if (rx_skb)
skb_trim(rx_skb, 0);
pr_warn("%s.B%d: No bufferspace for %d bytes\n",
hw->name, fifo->bch->nr, len);
spin_unlock_irqrestore(&hw->lock, flags);
return;
}
maxlen = fifo->bch->maxlen;
hdlc = test_bit(FLG_HDLC, &fifo->bch->Flags);
}
if (fifo->ech) {
rx_skb = fifo->ech->rx_skb;
maxlen = fifo->ech->maxlen;
hdlc = 1;
}
if (fifo->dch || fifo->ech) {
if (!rx_skb) {
rx_skb = mI_alloc_skb(maxlen, GFP_ATOMIC);
if (rx_skb) {
if (fifo->dch)
fifo->dch->rx_skb = rx_skb;
if (fifo->ech)
fifo->ech->rx_skb = rx_skb;
skb_trim(rx_skb, 0);
} else {
printk(KERN_DEBUG "%s: %s: No mem for rx_skb\n",
hw->name, __func__);
spin_unlock_irqrestore(&hw->lock, flags);
return;
}
}
/* D/E-Channel SKB range check */
if ((rx_skb->len + len) >= MAX_DFRAME_LEN_L1) {
printk(KERN_DEBUG "%s: %s: sbk mem exceeded "
"for fifo(%d) HFCUSB_D_RX\n",
hw->name, __func__, fifon);
skb_trim(rx_skb, 0);
spin_unlock_irqrestore(&hw->lock, flags);
return;
}
}
skb_put_data(rx_skb, data, len);
if (hdlc) {
/* we have a complete hdlc packet */
if (finish) {
if ((rx_skb->len > 3) &&
(!(rx_skb->data[rx_skb->len - 1]))) {
if (debug & DBG_HFC_FIFO_VERBOSE) {
printk(KERN_DEBUG "%s: %s: fifon(%i)"
" new RX len(%i): ",
hw->name, __func__, fifon,
rx_skb->len);
i = 0;
while (i < rx_skb->len)
printk("%02x ",
rx_skb->data[i++]);
printk("\n");
}
/* remove CRC & status */
skb_trim(rx_skb, rx_skb->len - 3);
if (fifo->dch)
recv_Dchannel(fifo->dch);
if (fifo->bch)
recv_Bchannel(fifo->bch, MISDN_ID_ANY,
0);
if (fifo->ech)
recv_Echannel(fifo->ech,
&hw->dch);
} else {
if (debug & DBG_HFC_FIFO_VERBOSE) {
printk(KERN_DEBUG
"%s: CRC or minlen ERROR fifon(%i) "
"RX len(%i): ",
hw->name, fifon, rx_skb->len);
i = 0;
while (i < rx_skb->len)
printk("%02x ",
rx_skb->data[i++]);
printk("\n");
}
skb_trim(rx_skb, 0);
}
}
} else {
/* deliver transparent data to layer2 */
recv_Bchannel(fifo->bch, MISDN_ID_ANY, false);
}
spin_unlock_irqrestore(&hw->lock, flags);
}
static void
fill_isoc_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe,
void *buf, int num_packets, int packet_size, int interval,
usb_complete_t complete, void *context)
{
int k;
usb_fill_bulk_urb(urb, dev, pipe, buf, packet_size * num_packets,
complete, context);
urb->number_of_packets = num_packets;
urb->transfer_flags = URB_ISO_ASAP;
urb->actual_length = 0;
urb->interval = interval;
for (k = 0; k < num_packets; k++) {
urb->iso_frame_desc[k].offset = packet_size * k;
urb->iso_frame_desc[k].length = packet_size;
urb->iso_frame_desc[k].actual_length = 0;
}
}
/* receive completion routine for all ISO tx fifos */
static void
rx_iso_complete(struct urb *urb)
{
struct iso_urb *context_iso_urb = (struct iso_urb *) urb->context;
struct usb_fifo *fifo = context_iso_urb->owner_fifo;
struct hfcsusb *hw = fifo->hw;
int k, len, errcode, offset, num_isoc_packets, fifon, maxlen,
status, iso_status, i;
__u8 *buf;
static __u8 eof[8];
__u8 s0_state;
unsigned long flags;
fifon = fifo->fifonum;
status = urb->status;
spin_lock_irqsave(&hw->lock, flags);
if (fifo->stop_gracefull) {
fifo->stop_gracefull = 0;
fifo->active = 0;
spin_unlock_irqrestore(&hw->lock, flags);
return;
}
spin_unlock_irqrestore(&hw->lock, flags);
/*
* ISO transfer only partially completed,
* look at individual frame status for details
*/
if (status == -EXDEV) {
if (debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s: with -EXDEV "
"urb->status %d, fifonum %d\n",
hw->name, __func__, status, fifon);
/* clear status, so go on with ISO transfers */
status = 0;
}
s0_state = 0;
if (fifo->active && !status) {
num_isoc_packets = iso_packets[fifon];
maxlen = fifo->usb_packet_maxlen;
for (k = 0; k < num_isoc_packets; ++k) {
len = urb->iso_frame_desc[k].actual_length;
offset = urb->iso_frame_desc[k].offset;
buf = context_iso_urb->buffer + offset;
iso_status = urb->iso_frame_desc[k].status;
if (iso_status && (debug & DBG_HFC_FIFO_VERBOSE)) {
printk(KERN_DEBUG "%s: %s: "
"ISO packet %i, status: %i\n",
hw->name, __func__, k, iso_status);
}
/* USB data log for every D ISO in */
if ((fifon == HFCUSB_D_RX) &&
(debug & DBG_HFC_USB_VERBOSE)) {
printk(KERN_DEBUG
"%s: %s: %d (%d/%d) len(%d) ",
hw->name, __func__, urb->start_frame,
k, num_isoc_packets - 1,
len);
for (i = 0; i < len; i++)
printk("%x ", buf[i]);
printk("\n");
}
if (!iso_status) {
if (fifo->last_urblen != maxlen) {
/*
* save fifo fill-level threshold bits
* to use them later in TX ISO URB
* completions
*/
hw->threshold_mask = buf[1];
if (fifon == HFCUSB_D_RX)
s0_state = (buf[0] >> 4);
eof[fifon] = buf[0] & 1;
if (len > 2)
hfcsusb_rx_frame(fifo, buf + 2,
len - 2, (len < maxlen)
? eof[fifon] : 0);
} else
hfcsusb_rx_frame(fifo, buf, len,
(len < maxlen) ?
eof[fifon] : 0);
fifo->last_urblen = len;
}
}
/* signal S0 layer1 state change */
if ((s0_state) && (hw->initdone) &&
(s0_state != hw->dch.state)) {
hw->dch.state = s0_state;
schedule_event(&hw->dch, FLG_PHCHANGE);
}
fill_isoc_urb(urb, fifo->hw->dev, fifo->pipe,
context_iso_urb->buffer, num_isoc_packets,
fifo->usb_packet_maxlen, fifo->intervall,
(usb_complete_t)rx_iso_complete, urb->context);
errcode = usb_submit_urb(urb, GFP_ATOMIC);
if (errcode < 0) {
if (debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s: error submitting "
"ISO URB: %d\n",
hw->name, __func__, errcode);
}
} else {
if (status && (debug & DBG_HFC_URB_INFO))
printk(KERN_DEBUG "%s: %s: rx_iso_complete : "
"urb->status %d, fifonum %d\n",
hw->name, __func__, status, fifon);
}
}
/* receive completion routine for all interrupt rx fifos */
static void
rx_int_complete(struct urb *urb)
{
int len, status, i;
__u8 *buf, maxlen, fifon;
struct usb_fifo *fifo = (struct usb_fifo *) urb->context;
struct hfcsusb *hw = fifo->hw;
static __u8 eof[8];
unsigned long flags;
spin_lock_irqsave(&hw->lock, flags);
if (fifo->stop_gracefull) {
fifo->stop_gracefull = 0;
fifo->active = 0;
spin_unlock_irqrestore(&hw->lock, flags);
return;
}
spin_unlock_irqrestore(&hw->lock, flags);
fifon = fifo->fifonum;
if ((!fifo->active) || (urb->status)) {
if (debug & DBG_HFC_URB_ERROR)
printk(KERN_DEBUG
"%s: %s: RX-Fifo %i is going down (%i)\n",
hw->name, __func__, fifon, urb->status);
fifo->urb->interval = 0; /* cancel automatic rescheduling */
return;
}
len = urb->actual_length;
buf = fifo->buffer;
maxlen = fifo->usb_packet_maxlen;
/* USB data log for every D INT in */
if ((fifon == HFCUSB_D_RX) && (debug & DBG_HFC_USB_VERBOSE)) {
printk(KERN_DEBUG "%s: %s: D RX INT len(%d) ",
hw->name, __func__, len);
for (i = 0; i < len; i++)
printk("%02x ", buf[i]);
printk("\n");
}
if (fifo->last_urblen != fifo->usb_packet_maxlen) {
/* the threshold mask is in the 2nd status byte */
hw->threshold_mask = buf[1];
/* signal S0 layer1 state change */
if (hw->initdone && ((buf[0] >> 4) != hw->dch.state)) {
hw->dch.state = (buf[0] >> 4);
schedule_event(&hw->dch, FLG_PHCHANGE);
}
eof[fifon] = buf[0] & 1;
/* if we have more than the 2 status bytes -> collect data */
if (len > 2)
hfcsusb_rx_frame(fifo, buf + 2,
urb->actual_length - 2,
(len < maxlen) ? eof[fifon] : 0);
} else {
hfcsusb_rx_frame(fifo, buf, urb->actual_length,
(len < maxlen) ? eof[fifon] : 0);
}
fifo->last_urblen = urb->actual_length;
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
if (debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s: error resubmitting USB\n",
hw->name, __func__);
}
}
/* transmit completion routine for all ISO tx fifos */
static void
tx_iso_complete(struct urb *urb)
{
struct iso_urb *context_iso_urb = (struct iso_urb *) urb->context;
struct usb_fifo *fifo = context_iso_urb->owner_fifo;
struct hfcsusb *hw = fifo->hw;
struct sk_buff *tx_skb;
int k, tx_offset, num_isoc_packets, sink, remain, current_len,
errcode, hdlc, i;
int *tx_idx;
int frame_complete, fifon, status, fillempty = 0;
__u8 threshbit, *p;
unsigned long flags;
spin_lock_irqsave(&hw->lock, flags);
if (fifo->stop_gracefull) {
fifo->stop_gracefull = 0;
fifo->active = 0;
spin_unlock_irqrestore(&hw->lock, flags);
return;
}
if (fifo->dch) {
tx_skb = fifo->dch->tx_skb;
tx_idx = &fifo->dch->tx_idx;
hdlc = 1;
} else if (fifo->bch) {
tx_skb = fifo->bch->tx_skb;
tx_idx = &fifo->bch->tx_idx;
hdlc = test_bit(FLG_HDLC, &fifo->bch->Flags);
if (!tx_skb && !hdlc &&
test_bit(FLG_FILLEMPTY, &fifo->bch->Flags))
fillempty = 1;
} else {
printk(KERN_DEBUG "%s: %s: neither BCH nor DCH\n",
hw->name, __func__);
spin_unlock_irqrestore(&hw->lock, flags);
return;
}
fifon = fifo->fifonum;
status = urb->status;
tx_offset = 0;
/*
* ISO transfer only partially completed,
* look at individual frame status for details
*/
if (status == -EXDEV) {
if (debug & DBG_HFC_URB_ERROR)
printk(KERN_DEBUG "%s: %s: "
"-EXDEV (%i) fifon (%d)\n",
hw->name, __func__, status, fifon);
/* clear status, so go on with ISO transfers */
status = 0;
}
if (fifo->active && !status) {
/* is FifoFull-threshold set for our channel? */
threshbit = (hw->threshold_mask & (1 << fifon));
num_isoc_packets = iso_packets[fifon];
/* predict dataflow to avoid fifo overflow */
if (fifon >= HFCUSB_D_TX)
sink = (threshbit) ? SINK_DMIN : SINK_DMAX;
else
sink = (threshbit) ? SINK_MIN : SINK_MAX;
fill_isoc_urb(urb, fifo->hw->dev, fifo->pipe,
context_iso_urb->buffer, num_isoc_packets,
fifo->usb_packet_maxlen, fifo->intervall,
(usb_complete_t)tx_iso_complete, urb->context);
memset(context_iso_urb->buffer, 0,
sizeof(context_iso_urb->buffer));
frame_complete = 0;
for (k = 0; k < num_isoc_packets; ++k) {
/* analyze tx success of previous ISO packets */
if (debug & DBG_HFC_URB_ERROR) {
errcode = urb->iso_frame_desc[k].status;
if (errcode) {
printk(KERN_DEBUG "%s: %s: "
"ISO packet %i, status: %i\n",
hw->name, __func__, k, errcode);
}
}
/* Generate next ISO Packets */
if (tx_skb)
remain = tx_skb->len - *tx_idx;
else if (fillempty)
remain = 15; /* > not complete */
else
remain = 0;
if (remain > 0) {
fifo->bit_line -= sink;
current_len = (0 - fifo->bit_line) / 8;
if (current_len > 14)
current_len = 14;
if (current_len < 0)
current_len = 0;
if (remain < current_len)
current_len = remain;
/* how much bit do we put on the line? */
fifo->bit_line += current_len * 8;
context_iso_urb->buffer[tx_offset] = 0;
if (current_len == remain) {
if (hdlc) {
/* signal frame completion */
context_iso_urb->
buffer[tx_offset] = 1;
/* add 2 byte flags and 16bit
* CRC at end of ISDN frame */
fifo->bit_line += 32;
}
frame_complete = 1;
}
/* copy tx data to iso-urb buffer */
p = context_iso_urb->buffer + tx_offset + 1;
if (fillempty) {
memset(p, fifo->bch->fill[0],
current_len);
} else {
memcpy(p, (tx_skb->data + *tx_idx),
current_len);
*tx_idx += current_len;
}
urb->iso_frame_desc[k].offset = tx_offset;
urb->iso_frame_desc[k].length = current_len + 1;
/* USB data log for every D ISO out */
if ((fifon == HFCUSB_D_RX) && !fillempty &&
(debug & DBG_HFC_USB_VERBOSE)) {
printk(KERN_DEBUG
"%s: %s (%d/%d) offs(%d) len(%d) ",
hw->name, __func__,
k, num_isoc_packets - 1,
urb->iso_frame_desc[k].offset,
urb->iso_frame_desc[k].length);
for (i = urb->iso_frame_desc[k].offset;
i < (urb->iso_frame_desc[k].offset
+ urb->iso_frame_desc[k].length);
i++)
printk("%x ",
context_iso_urb->buffer[i]);
printk(" skb->len(%i) tx-idx(%d)\n",
tx_skb->len, *tx_idx);
}
tx_offset += (current_len + 1);
} else {
urb->iso_frame_desc[k].offset = tx_offset++;
urb->iso_frame_desc[k].length = 1;
/* we lower data margin every msec */
fifo->bit_line -= sink;
if (fifo->bit_line < BITLINE_INF)
fifo->bit_line = BITLINE_INF;
}
if (frame_complete) {
frame_complete = 0;
if (debug & DBG_HFC_FIFO_VERBOSE) {
printk(KERN_DEBUG "%s: %s: "
"fifon(%i) new TX len(%i): ",
hw->name, __func__,
fifon, tx_skb->len);
i = 0;
while (i < tx_skb->len)
printk("%02x ",
tx_skb->data[i++]);
printk("\n");
}
dev_consume_skb_irq(tx_skb);
tx_skb = NULL;
if (fifo->dch && get_next_dframe(fifo->dch))
tx_skb = fifo->dch->tx_skb;
else if (fifo->bch &&
get_next_bframe(fifo->bch))
tx_skb = fifo->bch->tx_skb;
}
}
errcode = usb_submit_urb(urb, GFP_ATOMIC);
if (errcode < 0) {
if (debug & DEBUG_HW)
printk(KERN_DEBUG
"%s: %s: error submitting ISO URB: %d \n",
hw->name, __func__, errcode);
}
/*
* abuse DChannel tx iso completion to trigger NT mode state
* changes tx_iso_complete is assumed to be called every
* fifo->intervall (ms)
*/
if ((fifon == HFCUSB_D_TX) && (hw->protocol == ISDN_P_NT_S0)
&& (hw->timers & NT_ACTIVATION_TIMER)) {
if ((--hw->nt_timer) < 0)
schedule_event(&hw->dch, FLG_PHCHANGE);
}
} else {
if (status && (debug & DBG_HFC_URB_ERROR))
printk(KERN_DEBUG "%s: %s: urb->status %s (%i)"
"fifonum=%d\n",
hw->name, __func__,
symbolic(urb_errlist, status), status, fifon);
}
spin_unlock_irqrestore(&hw->lock, flags);
}
/*
* allocs urbs and start isoc transfer with two pending urbs to avoid
* gaps in the transfer chain
*/
static int
start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb,
usb_complete_t complete, int packet_size)
{
struct hfcsusb *hw = fifo->hw;
int i, k, errcode;
if (debug)
printk(KERN_DEBUG "%s: %s: fifo %i\n",
hw->name, __func__, fifo->fifonum);
/* allocate Memory for Iso out Urbs */
for (i = 0; i < 2; i++) {
if (!(fifo->iso[i].urb)) {
fifo->iso[i].urb =
usb_alloc_urb(num_packets_per_urb, GFP_KERNEL);
if (!(fifo->iso[i].urb)) {
printk(KERN_DEBUG
"%s: %s: alloc urb for fifo %i failed",
hw->name, __func__, fifo->fifonum);
continue;
}
fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
fifo->iso[i].indx = i;
/* Init the first iso */
if (ISO_BUFFER_SIZE >=
(fifo->usb_packet_maxlen *
num_packets_per_urb)) {
fill_isoc_urb(fifo->iso[i].urb,
fifo->hw->dev, fifo->pipe,
fifo->iso[i].buffer,
num_packets_per_urb,
fifo->usb_packet_maxlen,
fifo->intervall, complete,
&fifo->iso[i]);
memset(fifo->iso[i].buffer, 0,
sizeof(fifo->iso[i].buffer));
for (k = 0; k < num_packets_per_urb; k++) {
fifo->iso[i].urb->
iso_frame_desc[k].offset =
k * packet_size;
fifo->iso[i].urb->
iso_frame_desc[k].length =
packet_size;
}
} else {
printk(KERN_DEBUG
"%s: %s: ISO Buffer size to small!\n",
hw->name, __func__);
}
}
fifo->bit_line = BITLINE_INF;
errcode = usb_submit_urb(fifo->iso[i].urb, GFP_KERNEL);
fifo->active = (errcode >= 0) ? 1 : 0;
fifo->stop_gracefull = 0;
if (errcode < 0) {
printk(KERN_DEBUG "%s: %s: %s URB nr:%d\n",
hw->name, __func__,
symbolic(urb_errlist, errcode), i);
}
}
return fifo->active;
}
static void
stop_iso_gracefull(struct usb_fifo *fifo)
{
struct hfcsusb *hw = fifo->hw;
int i, timeout;
u_long flags;
for (i = 0; i < 2; i++) {
spin_lock_irqsave(&hw->lock, flags);
if (debug)
printk(KERN_DEBUG "%s: %s for fifo %i.%i\n",
hw->name, __func__, fifo->fifonum, i);
fifo->stop_gracefull = 1;
spin_unlock_irqrestore(&hw->lock, flags);
}
for (i = 0; i < 2; i++) {
timeout = 3;
while (fifo->stop_gracefull && timeout--)
schedule_timeout_interruptible((HZ / 1000) * 16);
if (debug && fifo->stop_gracefull)
printk(KERN_DEBUG "%s: ERROR %s for fifo %i.%i\n",
hw->name, __func__, fifo->fifonum, i);
}
}
static void
stop_int_gracefull(struct usb_fifo *fifo)
{
struct hfcsusb *hw = fifo->hw;
int timeout;
u_long flags;
spin_lock_irqsave(&hw->lock, flags);
if (debug)
printk(KERN_DEBUG "%s: %s for fifo %i\n",
hw->name, __func__, fifo->fifonum);
fifo->stop_gracefull = 1;
spin_unlock_irqrestore(&hw->lock, flags);
timeout = 3;
while (fifo->stop_gracefull && timeout--)
schedule_timeout_interruptible((HZ / 1000) * 3);
if (debug && fifo->stop_gracefull)
printk(KERN_DEBUG "%s: ERROR %s for fifo %i\n",
hw->name, __func__, fifo->fifonum);
}
/* start the interrupt transfer for the given fifo */
static void
start_int_fifo(struct usb_fifo *fifo)
{
struct hfcsusb *hw = fifo->hw;
int errcode;
if (debug)
printk(KERN_DEBUG "%s: %s: INT IN fifo:%d\n",
hw->name, __func__, fifo->fifonum);
if (!fifo->urb) {
fifo->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!fifo->urb)
return;
}
usb_fill_int_urb(fifo->urb, fifo->hw->dev, fifo->pipe,
fifo->buffer, fifo->usb_packet_maxlen,
(usb_complete_t)rx_int_complete, fifo, fifo->intervall);
fifo->active = 1;
fifo->stop_gracefull = 0;
errcode = usb_submit_urb(fifo->urb, GFP_KERNEL);
if (errcode) {
printk(KERN_DEBUG "%s: %s: submit URB: status:%i\n",
hw->name, __func__, errcode);
fifo->active = 0;
}
}
static void
setPortMode(struct hfcsusb *hw)
{
if (debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s %s\n", hw->name, __func__,
(hw->protocol == ISDN_P_TE_S0) ? "TE" : "NT");
if (hw->protocol == ISDN_P_TE_S0) {
write_reg(hw, HFCUSB_SCTRL, 0x40);
write_reg(hw, HFCUSB_SCTRL_E, 0x00);
write_reg(hw, HFCUSB_CLKDEL, CLKDEL_TE);
write_reg(hw, HFCUSB_STATES, 3 | 0x10);
write_reg(hw, HFCUSB_STATES, 3);
} else {
write_reg(hw, HFCUSB_SCTRL, 0x44);
write_reg(hw, HFCUSB_SCTRL_E, 0x09);
write_reg(hw, HFCUSB_CLKDEL, CLKDEL_NT);
write_reg(hw, HFCUSB_STATES, 1 | 0x10);
write_reg(hw, HFCUSB_STATES, 1);
}
}
static void
reset_hfcsusb(struct hfcsusb *hw)
{
struct usb_fifo *fifo;
int i;
if (debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
/* do Chip reset */
write_reg(hw, HFCUSB_CIRM, 8);
/* aux = output, reset off */
write_reg(hw, HFCUSB_CIRM, 0x10);
/* set USB_SIZE to match the wMaxPacketSize for INT or BULK transfers */
write_reg(hw, HFCUSB_USB_SIZE, (hw->packet_size / 8) |
((hw->packet_size / 8) << 4));
/* set USB_SIZE_I to match the wMaxPacketSize for ISO transfers */
write_reg(hw, HFCUSB_USB_SIZE_I, hw->iso_packet_size);
/* enable PCM/GCI master mode */
write_reg(hw, HFCUSB_MST_MODE1, 0); /* set default values */
write_reg(hw, HFCUSB_MST_MODE0, 1); /* enable master mode */
/* init the fifos */
write_reg(hw, HFCUSB_F_THRES,
(HFCUSB_TX_THRESHOLD / 8) | ((HFCUSB_RX_THRESHOLD / 8) << 4));
fifo = hw->fifos;
for (i = 0; i < HFCUSB_NUM_FIFOS; i++) {
write_reg(hw, HFCUSB_FIFO, i); /* select the desired fifo */
fifo[i].max_size =
(i <= HFCUSB_B2_RX) ? MAX_BCH_SIZE : MAX_DFRAME_LEN;
fifo[i].last_urblen = 0;
/* set 2 bit for D- & E-channel */
write_reg(hw, HFCUSB_HDLC_PAR, ((i <= HFCUSB_B2_RX) ? 0 : 2));
/* enable all fifos */
if (i == HFCUSB_D_TX)
write_reg(hw, HFCUSB_CON_HDLC,
(hw->protocol == ISDN_P_NT_S0) ? 0x08 : 0x09);
else
write_reg(hw, HFCUSB_CON_HDLC, 0x08);
write_reg(hw, HFCUSB_INC_RES_F, 2); /* reset the fifo */
}
write_reg(hw, HFCUSB_SCTRL_R, 0); /* disable both B receivers */
handle_led(hw, LED_POWER_ON);
}
/* start USB data pipes dependand on device's endpoint configuration */
static void
hfcsusb_start_endpoint(struct hfcsusb *hw, int channel)
{
/* quick check if endpoint already running */
if ((channel == HFC_CHAN_D) && (hw->fifos[HFCUSB_D_RX].active))
return;
if ((channel == HFC_CHAN_B1) && (hw->fifos[HFCUSB_B1_RX].active))
return;
if ((channel == HFC_CHAN_B2) && (hw->fifos[HFCUSB_B2_RX].active))
return;
if ((channel == HFC_CHAN_E) && (hw->fifos[HFCUSB_PCM_RX].active))
return;
/* start rx endpoints using USB INT IN method */
if (hw->cfg_used == CNF_3INT3ISO || hw->cfg_used == CNF_4INT3ISO)
start_int_fifo(hw->fifos + channel * 2 + 1);
/* start rx endpoints using USB ISO IN method */
if (hw->cfg_used == CNF_3ISO3ISO || hw->cfg_used == CNF_4ISO3ISO) {
switch (channel) {
case HFC_CHAN_D:
start_isoc_chain(hw->fifos + HFCUSB_D_RX,
ISOC_PACKETS_D,
(usb_complete_t)rx_iso_complete,
16);
break;
case HFC_CHAN_E:
start_isoc_chain(hw->fifos + HFCUSB_PCM_RX,
ISOC_PACKETS_D,
(usb_complete_t)rx_iso_complete,
16);
break;
case HFC_CHAN_B1:
start_isoc_chain(hw->fifos + HFCUSB_B1_RX,
ISOC_PACKETS_B,
(usb_complete_t)rx_iso_complete,
16);
break;
case HFC_CHAN_B2:
start_isoc_chain(hw->fifos + HFCUSB_B2_RX,
ISOC_PACKETS_B,
(usb_complete_t)rx_iso_complete,
16);
break;
}
}
/* start tx endpoints using USB ISO OUT method */
switch (channel) {
case HFC_CHAN_D:
start_isoc_chain(hw->fifos + HFCUSB_D_TX,
ISOC_PACKETS_B,
(usb_complete_t)tx_iso_complete, 1);
break;
case HFC_CHAN_B1:
start_isoc_chain(hw->fifos + HFCUSB_B1_TX,
ISOC_PACKETS_D,
(usb_complete_t)tx_iso_complete, 1);
break;
case HFC_CHAN_B2:
start_isoc_chain(hw->fifos + HFCUSB_B2_TX,
ISOC_PACKETS_B,
(usb_complete_t)tx_iso_complete, 1);
break;
}
}
/* stop USB data pipes dependand on device's endpoint configuration */
static void
hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
{
/* quick check if endpoint currently running */
if ((channel == HFC_CHAN_D) && (!hw->fifos[HFCUSB_D_RX].active))
return;
if ((channel == HFC_CHAN_B1) && (!hw->fifos[HFCUSB_B1_RX].active))
return;
if ((channel == HFC_CHAN_B2) && (!hw->fifos[HFCUSB_B2_RX].active))
return;
if ((channel == HFC_CHAN_E) && (!hw->fifos[HFCUSB_PCM_RX].active))
return;
/* rx endpoints using USB INT IN method */
if (hw->cfg_used == CNF_3INT3ISO || hw->cfg_used == CNF_4INT3ISO)
stop_int_gracefull(hw->fifos + channel * 2 + 1);
/* rx endpoints using USB ISO IN method */
if (hw->cfg_used == CNF_3ISO3ISO || hw->cfg_used == CNF_4ISO3ISO)
stop_iso_gracefull(hw->fifos + channel * 2 + 1);
/* tx endpoints using USB ISO OUT method */
if (channel != HFC_CHAN_E)
stop_iso_gracefull(hw->fifos + channel * 2);
}
/* Hardware Initialization */
static int
setup_hfcsusb(struct hfcsusb *hw)
{
void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL);
u_char b;
int ret;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
if (!dmabuf)
return -ENOMEM;
ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf);
memcpy(&b, dmabuf, sizeof(u_char));
kfree(dmabuf);
/* check the chip id */
if (ret != 1) {
printk(KERN_DEBUG "%s: %s: cannot read chip id\n",
hw->name, __func__);
return 1;
}
if (b != HFCUSB_CHIPID) {
printk(KERN_DEBUG "%s: %s: Invalid chip id 0x%02x\n",
hw->name, __func__, b);
return 1;
}
/* first set the needed config, interface and alternate */
(void) usb_set_interface(hw->dev, hw->if_used, hw->alt_used);
hw->led_state = 0;
/* init the background machinery for control requests */
hw->ctrl_read.bRequestType = 0xc0;
hw->ctrl_read.bRequest = 1;
hw->ctrl_read.wLength = cpu_to_le16(1);
hw->ctrl_write.bRequestType = 0x40;
hw->ctrl_write.bRequest = 0;
hw->ctrl_write.wLength = 0;
usb_fill_control_urb(hw->ctrl_urb, hw->dev, hw->ctrl_out_pipe,
(u_char *)&hw->ctrl_write, NULL, 0,
(usb_complete_t)ctrl_complete, hw);
reset_hfcsusb(hw);
return 0;
}
static void
release_hw(struct hfcsusb *hw)
{
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
/*
* stop all endpoints gracefully
* TODO: mISDN_core should generate CLOSE_CHANNEL
* signals after calling mISDN_unregister_device()
*/
hfcsusb_stop_endpoint(hw, HFC_CHAN_D);
hfcsusb_stop_endpoint(hw, HFC_CHAN_B1);
hfcsusb_stop_endpoint(hw, HFC_CHAN_B2);
if (hw->fifos[HFCUSB_PCM_RX].pipe)
hfcsusb_stop_endpoint(hw, HFC_CHAN_E);
if (hw->protocol == ISDN_P_TE_S0)
l1_event(hw->dch.l1, CLOSE_CHANNEL);
mISDN_unregister_device(&hw->dch.dev);
mISDN_freebchannel(&hw->bch[1]);
mISDN_freebchannel(&hw->bch[0]);
mISDN_freedchannel(&hw->dch);
if (hw->ctrl_urb) {
usb_kill_urb(hw->ctrl_urb);
usb_free_urb(hw->ctrl_urb);
hw->ctrl_urb = NULL;
}
if (hw->intf)
usb_set_intfdata(hw->intf, NULL);
list_del(&hw->list);
kfree(hw);
hw = NULL;
}
static void
deactivate_bchannel(struct bchannel *bch)
{
struct hfcsusb *hw = bch->hw;
u_long flags;
if (bch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: %s: bch->nr(%i)\n",
hw->name, __func__, bch->nr);
spin_lock_irqsave(&hw->lock, flags);
mISDN_clear_bchannel(bch);
spin_unlock_irqrestore(&hw->lock, flags);
hfcsusb_setup_bch(bch, ISDN_P_NONE);
hfcsusb_stop_endpoint(hw, bch->nr - 1);
}
/*
* Layer 1 B-channel hardware access
*/
static int
hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
{
struct bchannel *bch = container_of(ch, struct bchannel, ch);
int ret = -EINVAL;
if (bch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg);
switch (cmd) {
case HW_TESTRX_RAW:
case HW_TESTRX_HDLC:
case HW_TESTRX_OFF:
ret = -EINVAL;
break;
case CLOSE_CHANNEL:
test_and_clear_bit(FLG_OPEN, &bch->Flags);
deactivate_bchannel(bch);
ch->protocol = ISDN_P_NONE;
ch->peer = NULL;
module_put(THIS_MODULE);
ret = 0;
break;
case CONTROL_CHANNEL:
ret = channel_bctrl(bch, arg);
break;
default:
printk(KERN_WARNING "%s: unknown prim(%x)\n",
__func__, cmd);
}
return ret;
}
static int
setup_instance(struct hfcsusb *hw, struct device *parent)
{
u_long flags;
int err, i;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
spin_lock_init(&hw->ctrl_lock);
spin_lock_init(&hw->lock);
mISDN_initdchannel(&hw->dch, MAX_DFRAME_LEN_L1, ph_state);
hw->dch.debug = debug & 0xFFFF;
hw->dch.hw = hw;
hw->dch.dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0);
hw->dch.dev.D.send = hfcusb_l2l1D;
hw->dch.dev.D.ctrl = hfc_dctrl;
/* enable E-Channel logging */
if (hw->fifos[HFCUSB_PCM_RX].pipe)
mISDN_initdchannel(&hw->ech, MAX_DFRAME_LEN_L1, NULL);
hw->dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
(1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
hw->dch.dev.nrbchan = 2;
for (i = 0; i < 2; i++) {
hw->bch[i].nr = i + 1;
set_channelmap(i + 1, hw->dch.dev.channelmap);
hw->bch[i].debug = debug;
mISDN_initbchannel(&hw->bch[i], MAX_DATA_MEM, poll >> 1);
hw->bch[i].hw = hw;
hw->bch[i].ch.send = hfcusb_l2l1B;
hw->bch[i].ch.ctrl = hfc_bctrl;
hw->bch[i].ch.nr = i + 1;
list_add(&hw->bch[i].ch.list, &hw->dch.dev.bchannels);
}
hw->fifos[HFCUSB_B1_TX].bch = &hw->bch[0];
hw->fifos[HFCUSB_B1_RX].bch = &hw->bch[0];
hw->fifos[HFCUSB_B2_TX].bch = &hw->bch[1];
hw->fifos[HFCUSB_B2_RX].bch = &hw->bch[1];
hw->fifos[HFCUSB_D_TX].dch = &hw->dch;
hw->fifos[HFCUSB_D_RX].dch = &hw->dch;
hw->fifos[HFCUSB_PCM_RX].ech = &hw->ech;
hw->fifos[HFCUSB_PCM_TX].ech = &hw->ech;
err = setup_hfcsusb(hw);
if (err)
goto out;
snprintf(hw->name, MISDN_MAX_IDLEN - 1, "%s.%d", DRIVER_NAME,
hfcsusb_cnt + 1);
printk(KERN_INFO "%s: registered as '%s'\n",
DRIVER_NAME, hw->name);
err = mISDN_register_device(&hw->dch.dev, parent, hw->name);
if (err)
goto out;
hfcsusb_cnt++;
write_lock_irqsave(&HFClock, flags);
list_add_tail(&hw->list, &HFClist);
write_unlock_irqrestore(&HFClock, flags);
return 0;
out:
mISDN_freebchannel(&hw->bch[1]);
mISDN_freebchannel(&hw->bch[0]);
mISDN_freedchannel(&hw->dch);
kfree(hw);
return err;
}
static int
hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct hfcsusb *hw;
struct usb_device *dev = interface_to_usbdev(intf);
struct usb_host_interface *iface = intf->cur_altsetting;
struct usb_host_interface *iface_used = NULL;
struct usb_host_endpoint *ep;
struct hfcsusb_vdata *driver_info;
int ifnum = iface->desc.bInterfaceNumber, i, idx, alt_idx,
probe_alt_setting, vend_idx, cfg_used, *vcf, attr, cfg_found,
ep_addr, cmptbl[16], small_match, iso_packet_size, packet_size,
alt_used = 0;
vend_idx = 0xffff;
for (i = 0; hfcsusb_idtab[i].idVendor; i++) {
if ((le16_to_cpu(dev->descriptor.idVendor)
== hfcsusb_idtab[i].idVendor) &&
(le16_to_cpu(dev->descriptor.idProduct)
== hfcsusb_idtab[i].idProduct)) {
vend_idx = i;
continue;
}
}
printk(KERN_DEBUG
"%s: interface(%d) actalt(%d) minor(%d) vend_idx(%d)\n",
__func__, ifnum, iface->desc.bAlternateSetting,
intf->minor, vend_idx);
if (vend_idx == 0xffff) {
printk(KERN_WARNING
"%s: no valid vendor found in USB descriptor\n",
__func__);
return -EIO;
}
/* if vendor and product ID is OK, start probing alternate settings */
alt_idx = 0;
small_match = -1;
/* default settings */
iso_packet_size = 16;
packet_size = 64;
while (alt_idx < intf->num_altsetting) {
iface = intf->altsetting + alt_idx;
probe_alt_setting = iface->desc.bAlternateSetting;
cfg_used = 0;
while (validconf[cfg_used][0]) {
cfg_found = 1;
vcf = validconf[cfg_used];
ep = iface->endpoint;
memcpy(cmptbl, vcf, 16 * sizeof(int));
/* check for all endpoints in this alternate setting */
for (i = 0; i < iface->desc.bNumEndpoints; i++) {
ep_addr = ep->desc.bEndpointAddress;
/* get endpoint base */
idx = ((ep_addr & 0x7f) - 1) * 2;
if (idx > 15)
return -EIO;
if (ep_addr & 0x80)
idx++;
attr = ep->desc.bmAttributes;
if (cmptbl[idx] != EP_NOP) {
if (cmptbl[idx] == EP_NUL)
cfg_found = 0;
if (attr == USB_ENDPOINT_XFER_INT
&& cmptbl[idx] == EP_INT)
cmptbl[idx] = EP_NUL;
if (attr == USB_ENDPOINT_XFER_BULK
&& cmptbl[idx] == EP_BLK)
cmptbl[idx] = EP_NUL;
if (attr == USB_ENDPOINT_XFER_ISOC
&& cmptbl[idx] == EP_ISO)
cmptbl[idx] = EP_NUL;
if (attr == USB_ENDPOINT_XFER_INT &&
ep->desc.bInterval < vcf[17]) {
cfg_found = 0;
}
}
ep++;
}
for (i = 0; i < 16; i++)
if (cmptbl[i] != EP_NOP && cmptbl[i] != EP_NUL)
cfg_found = 0;
if (cfg_found) {
if (small_match < cfg_used) {
small_match = cfg_used;
alt_used = probe_alt_setting;
iface_used = iface;
}
}
cfg_used++;
}
alt_idx++;
} /* (alt_idx < intf->num_altsetting) */
/* not found a valid USB Ta Endpoint config */
if (small_match == -1)
return -EIO;
iface = iface_used;
hw = kzalloc(sizeof(struct hfcsusb), GFP_KERNEL);
if (!hw)
return -ENOMEM; /* got no mem */
snprintf(hw->name, MISDN_MAX_IDLEN - 1, "%s", DRIVER_NAME);
ep = iface->endpoint;
vcf = validconf[small_match];
for (i = 0; i < iface->desc.bNumEndpoints; i++) {
struct usb_fifo *f;
ep_addr = ep->desc.bEndpointAddress;
/* get endpoint base */
idx = ((ep_addr & 0x7f) - 1) * 2;
if (ep_addr & 0x80)
idx++;
f = &hw->fifos[idx & 7];
/* init Endpoints */
if (vcf[idx] == EP_NOP || vcf[idx] == EP_NUL) {
ep++;
continue;
}
switch (ep->desc.bmAttributes) {
case USB_ENDPOINT_XFER_INT:
f->pipe = usb_rcvintpipe(dev,
ep->desc.bEndpointAddress);
f->usb_transfer_mode = USB_INT;
packet_size = le16_to_cpu(ep->desc.wMaxPacketSize);
break;
case USB_ENDPOINT_XFER_BULK:
if (ep_addr & 0x80)
f->pipe = usb_rcvbulkpipe(dev,
ep->desc.bEndpointAddress);
else
f->pipe = usb_sndbulkpipe(dev,
ep->desc.bEndpointAddress);
f->usb_transfer_mode = USB_BULK;
packet_size = le16_to_cpu(ep->desc.wMaxPacketSize);
break;
case USB_ENDPOINT_XFER_ISOC:
if (ep_addr & 0x80)
f->pipe = usb_rcvisocpipe(dev,
ep->desc.bEndpointAddress);
else
f->pipe = usb_sndisocpipe(dev,
ep->desc.bEndpointAddress);
f->usb_transfer_mode = USB_ISOC;
iso_packet_size = le16_to_cpu(ep->desc.wMaxPacketSize);
break;
default:
f->pipe = 0;
}
if (f->pipe) {
f->fifonum = idx & 7;
f->hw = hw;
f->usb_packet_maxlen =
le16_to_cpu(ep->desc.wMaxPacketSize);
f->intervall = ep->desc.bInterval;
}
ep++;
}
hw->dev = dev; /* save device */
hw->if_used = ifnum; /* save used interface */
hw->alt_used = alt_used; /* and alternate config */
hw->ctrl_paksize = dev->descriptor.bMaxPacketSize0; /* control size */
hw->cfg_used = vcf[16]; /* store used config */
hw->vend_idx = vend_idx; /* store found vendor */
hw->packet_size = packet_size;
hw->iso_packet_size = iso_packet_size;
/* create the control pipes needed for register access */
hw->ctrl_in_pipe = usb_rcvctrlpipe(hw->dev, 0);
hw->ctrl_out_pipe = usb_sndctrlpipe(hw->dev, 0);
driver_info = (struct hfcsusb_vdata *)
hfcsusb_idtab[vend_idx].driver_info;
hw->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!hw->ctrl_urb) {
pr_warn("%s: No memory for control urb\n",
driver_info->vend_name);
kfree(hw);
return -ENOMEM;
}
pr_info("%s: %s: detected \"%s\" (%s, if=%d alt=%d)\n",
hw->name, __func__, driver_info->vend_name,
conf_str[small_match], ifnum, alt_used);
if (setup_instance(hw, dev->dev.parent))
return -EIO;
hw->intf = intf;
usb_set_intfdata(hw->intf, hw);
return 0;
}
/* function called when an active device is removed */
static void
hfcsusb_disconnect(struct usb_interface *intf)
{
struct hfcsusb *hw = usb_get_intfdata(intf);
struct hfcsusb *next;
int cnt = 0;
printk(KERN_INFO "%s: device disconnected\n", hw->name);
handle_led(hw, LED_POWER_OFF);
release_hw(hw);
list_for_each_entry_safe(hw, next, &HFClist, list)
cnt++;
if (!cnt)
hfcsusb_cnt = 0;
usb_set_intfdata(intf, NULL);
}
static struct usb_driver hfcsusb_drv = {
.name = DRIVER_NAME,
.id_table = hfcsusb_idtab,
.probe = hfcsusb_probe,
.disconnect = hfcsusb_disconnect,
.disable_hub_initiated_lpm = 1,
};
module_usb_driver(hfcsusb_drv);
| linux-master | drivers/isdn/hardware/mISDN/hfcsusb.c |
/*
* Kernel CAPI 2.0 Module - /proc/capi handling
*
* Copyright 1999 by Carsten Paeth <[email protected]>
* Copyright 2002 by Kai Germaschewski <[email protected]>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include "kcapi.h"
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/export.h>
static char *state2str(unsigned short state)
{
switch (state) {
case CAPI_CTR_DETECTED: return "detected";
case CAPI_CTR_LOADING: return "loading";
case CAPI_CTR_RUNNING: return "running";
default: return "???";
}
}
// /proc/capi
// ===========================================================================
// /proc/capi/controller:
// cnr driver cardstate name driverinfo
// /proc/capi/contrstats:
// cnr nrecvctlpkt nrecvdatapkt nsentctlpkt nsentdatapkt
// ---------------------------------------------------------------------------
static void *controller_start(struct seq_file *seq, loff_t *pos)
__acquires(capi_controller_lock)
{
mutex_lock(&capi_controller_lock);
if (*pos < CAPI_MAXCONTR)
return &capi_controller[*pos];
return NULL;
}
static void *controller_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
if (*pos < CAPI_MAXCONTR)
return &capi_controller[*pos];
return NULL;
}
static void controller_stop(struct seq_file *seq, void *v)
__releases(capi_controller_lock)
{
mutex_unlock(&capi_controller_lock);
}
static int controller_show(struct seq_file *seq, void *v)
{
struct capi_ctr *ctr = *(struct capi_ctr **) v;
if (!ctr)
return 0;
seq_printf(seq, "%d %-10s %-8s %-16s %s\n",
ctr->cnr, ctr->driver_name,
state2str(ctr->state),
ctr->name,
ctr->procinfo ? ctr->procinfo(ctr) : "");
return 0;
}
static int contrstats_show(struct seq_file *seq, void *v)
{
struct capi_ctr *ctr = *(struct capi_ctr **) v;
if (!ctr)
return 0;
seq_printf(seq, "%d %lu %lu %lu %lu\n",
ctr->cnr,
ctr->nrecvctlpkt,
ctr->nrecvdatapkt,
ctr->nsentctlpkt,
ctr->nsentdatapkt);
return 0;
}
static const struct seq_operations seq_controller_ops = {
.start = controller_start,
.next = controller_next,
.stop = controller_stop,
.show = controller_show,
};
static const struct seq_operations seq_contrstats_ops = {
.start = controller_start,
.next = controller_next,
.stop = controller_stop,
.show = contrstats_show,
};
// /proc/capi/applications:
// applid l3cnt dblkcnt dblklen #ncci recvqueuelen
// /proc/capi/applstats:
// applid nrecvctlpkt nrecvdatapkt nsentctlpkt nsentdatapkt
// ---------------------------------------------------------------------------
static void *applications_start(struct seq_file *seq, loff_t *pos)
__acquires(capi_controller_lock)
{
mutex_lock(&capi_controller_lock);
if (*pos < CAPI_MAXAPPL)
return &capi_applications[*pos];
return NULL;
}
static void *
applications_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
if (*pos < CAPI_MAXAPPL)
return &capi_applications[*pos];
return NULL;
}
static void applications_stop(struct seq_file *seq, void *v)
__releases(capi_controller_lock)
{
mutex_unlock(&capi_controller_lock);
}
static int
applications_show(struct seq_file *seq, void *v)
{
struct capi20_appl *ap = *(struct capi20_appl **) v;
if (!ap)
return 0;
seq_printf(seq, "%u %d %d %d\n",
ap->applid,
ap->rparam.level3cnt,
ap->rparam.datablkcnt,
ap->rparam.datablklen);
return 0;
}
static int
applstats_show(struct seq_file *seq, void *v)
{
struct capi20_appl *ap = *(struct capi20_appl **) v;
if (!ap)
return 0;
seq_printf(seq, "%u %lu %lu %lu %lu\n",
ap->applid,
ap->nrecvctlpkt,
ap->nrecvdatapkt,
ap->nsentctlpkt,
ap->nsentdatapkt);
return 0;
}
static const struct seq_operations seq_applications_ops = {
.start = applications_start,
.next = applications_next,
.stop = applications_stop,
.show = applications_show,
};
static const struct seq_operations seq_applstats_ops = {
.start = applications_start,
.next = applications_next,
.stop = applications_stop,
.show = applstats_show,
};
// ---------------------------------------------------------------------------
/* /proc/capi/drivers is always empty */
static ssize_t empty_read(struct file *file, char __user *buf,
size_t size, loff_t *off)
{
return 0;
}
static const struct proc_ops empty_proc_ops = {
.proc_read = empty_read,
.proc_lseek = default_llseek,
};
// ---------------------------------------------------------------------------
void __init
kcapi_proc_init(void)
{
proc_mkdir("capi", NULL);
proc_mkdir("capi/controllers", NULL);
proc_create_seq("capi/controller", 0, NULL, &seq_controller_ops);
proc_create_seq("capi/contrstats", 0, NULL, &seq_contrstats_ops);
proc_create_seq("capi/applications", 0, NULL, &seq_applications_ops);
proc_create_seq("capi/applstats", 0, NULL, &seq_applstats_ops);
proc_create("capi/driver", 0, NULL, &empty_proc_ops);
}
void
kcapi_proc_exit(void)
{
remove_proc_entry("capi/driver", NULL);
remove_proc_entry("capi/controller", NULL);
remove_proc_entry("capi/contrstats", NULL);
remove_proc_entry("capi/applications", NULL);
remove_proc_entry("capi/applstats", NULL);
remove_proc_entry("capi/controllers", NULL);
remove_proc_entry("capi", NULL);
}
| linux-master | drivers/isdn/capi/kcapi_proc.c |
/* $Id: capi.c,v 1.1.2.7 2004/04/28 09:48:59 armin Exp $
*
* CAPI 2.0 Interface for Linux
*
* Copyright 1996 by Carsten Paeth <[email protected]>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/ethtool.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/fcntl.h>
#include <linux/fs.h>
#include <linux/signal.h>
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/tty.h>
#include <linux/netdevice.h>
#include <linux/ppp_defs.h>
#include <linux/ppp-ioctl.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/poll.h>
#include <linux/capi.h>
#include <linux/kernelcapi.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/moduleparam.h>
#include <linux/isdn/capiutil.h>
#include <linux/isdn/capicmd.h>
#include "kcapi.h"
MODULE_DESCRIPTION("CAPI4Linux: kernel CAPI layer and /dev/capi20 interface");
MODULE_AUTHOR("Carsten Paeth");
MODULE_LICENSE("GPL");
/* -------- driver information -------------------------------------- */
static DEFINE_MUTEX(capi_mutex);
static struct class *capi_class;
static int capi_major = 68; /* allocated */
module_param_named(major, capi_major, uint, 0);
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
#define CAPINC_NR_PORTS 32
#define CAPINC_MAX_PORTS 256
static int capi_ttyminors = CAPINC_NR_PORTS;
module_param_named(ttyminors, capi_ttyminors, uint, 0);
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
/* -------- defines ------------------------------------------------- */
#define CAPINC_MAX_RECVQUEUE 10
#define CAPINC_MAX_SENDQUEUE 10
#define CAPI_MAX_BLKSIZE 2048
/* -------- data structures ----------------------------------------- */
struct capidev;
struct capincci;
struct capiminor;
struct ackqueue_entry {
struct list_head list;
u16 datahandle;
};
struct capiminor {
unsigned int minor;
struct capi20_appl *ap;
u32 ncci;
atomic_t datahandle;
atomic_t msgid;
struct tty_port port;
int ttyinstop;
int ttyoutstop;
struct sk_buff_head inqueue;
struct sk_buff_head outqueue;
int outbytes;
struct sk_buff *outskb;
spinlock_t outlock;
/* transmit path */
struct list_head ackqueue;
int nack;
spinlock_t ackqlock;
};
struct capincci {
struct list_head list;
u32 ncci;
struct capidev *cdev;
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
struct capiminor *minorp;
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
};
struct capidev {
struct list_head list;
struct capi20_appl ap;
u16 errcode;
unsigned userflags;
struct sk_buff_head recvqueue;
wait_queue_head_t recvwait;
struct list_head nccis;
struct mutex lock;
};
/* -------- global variables ---------------------------------------- */
static DEFINE_MUTEX(capidev_list_lock);
static LIST_HEAD(capidev_list);
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
static DEFINE_SPINLOCK(capiminors_lock);
static struct capiminor **capiminors;
static struct tty_driver *capinc_tty_driver;
/* -------- datahandles --------------------------------------------- */
static int capiminor_add_ack(struct capiminor *mp, u16 datahandle)
{
struct ackqueue_entry *n;
n = kmalloc(sizeof(*n), GFP_ATOMIC);
if (unlikely(!n)) {
printk(KERN_ERR "capi: alloc datahandle failed\n");
return -1;
}
n->datahandle = datahandle;
INIT_LIST_HEAD(&n->list);
spin_lock_bh(&mp->ackqlock);
list_add_tail(&n->list, &mp->ackqueue);
mp->nack++;
spin_unlock_bh(&mp->ackqlock);
return 0;
}
static int capiminor_del_ack(struct capiminor *mp, u16 datahandle)
{
struct ackqueue_entry *p, *tmp;
spin_lock_bh(&mp->ackqlock);
list_for_each_entry_safe(p, tmp, &mp->ackqueue, list) {
if (p->datahandle == datahandle) {
list_del(&p->list);
mp->nack--;
spin_unlock_bh(&mp->ackqlock);
kfree(p);
return 0;
}
}
spin_unlock_bh(&mp->ackqlock);
return -1;
}
static void capiminor_del_all_ack(struct capiminor *mp)
{
struct ackqueue_entry *p, *tmp;
list_for_each_entry_safe(p, tmp, &mp->ackqueue, list) {
list_del(&p->list);
kfree(p);
mp->nack--;
}
}
/* -------- struct capiminor ---------------------------------------- */
static void capiminor_destroy(struct tty_port *port)
{
struct capiminor *mp = container_of(port, struct capiminor, port);
kfree_skb(mp->outskb);
skb_queue_purge(&mp->inqueue);
skb_queue_purge(&mp->outqueue);
capiminor_del_all_ack(mp);
kfree(mp);
}
static const struct tty_port_operations capiminor_port_ops = {
.destruct = capiminor_destroy,
};
static struct capiminor *capiminor_alloc(struct capi20_appl *ap, u32 ncci)
{
struct capiminor *mp;
struct device *dev;
unsigned int minor;
mp = kzalloc(sizeof(*mp), GFP_KERNEL);
if (!mp) {
printk(KERN_ERR "capi: can't alloc capiminor\n");
return NULL;
}
mp->ap = ap;
mp->ncci = ncci;
INIT_LIST_HEAD(&mp->ackqueue);
spin_lock_init(&mp->ackqlock);
skb_queue_head_init(&mp->inqueue);
skb_queue_head_init(&mp->outqueue);
spin_lock_init(&mp->outlock);
tty_port_init(&mp->port);
mp->port.ops = &capiminor_port_ops;
/* Allocate the least unused minor number. */
spin_lock(&capiminors_lock);
for (minor = 0; minor < capi_ttyminors; minor++)
if (!capiminors[minor]) {
capiminors[minor] = mp;
break;
}
spin_unlock(&capiminors_lock);
if (minor == capi_ttyminors) {
printk(KERN_NOTICE "capi: out of minors\n");
goto err_out1;
}
mp->minor = minor;
dev = tty_port_register_device(&mp->port, capinc_tty_driver, minor,
NULL);
if (IS_ERR(dev))
goto err_out2;
return mp;
err_out2:
spin_lock(&capiminors_lock);
capiminors[minor] = NULL;
spin_unlock(&capiminors_lock);
err_out1:
tty_port_put(&mp->port);
return NULL;
}
static struct capiminor *capiminor_get(unsigned int minor)
{
struct capiminor *mp;
spin_lock(&capiminors_lock);
mp = capiminors[minor];
if (mp)
tty_port_get(&mp->port);
spin_unlock(&capiminors_lock);
return mp;
}
static inline void capiminor_put(struct capiminor *mp)
{
tty_port_put(&mp->port);
}
static void capiminor_free(struct capiminor *mp)
{
tty_unregister_device(capinc_tty_driver, mp->minor);
spin_lock(&capiminors_lock);
capiminors[mp->minor] = NULL;
spin_unlock(&capiminors_lock);
capiminor_put(mp);
}
/* -------- struct capincci ----------------------------------------- */
static void capincci_alloc_minor(struct capidev *cdev, struct capincci *np)
{
if (cdev->userflags & CAPIFLAG_HIGHJACKING)
np->minorp = capiminor_alloc(&cdev->ap, np->ncci);
}
static void capincci_free_minor(struct capincci *np)
{
struct capiminor *mp = np->minorp;
struct tty_struct *tty;
if (mp) {
tty = tty_port_tty_get(&mp->port);
if (tty) {
tty_vhangup(tty);
tty_kref_put(tty);
}
capiminor_free(mp);
}
}
static inline unsigned int capincci_minor_opencount(struct capincci *np)
{
struct capiminor *mp = np->minorp;
unsigned int count = 0;
struct tty_struct *tty;
if (mp) {
tty = tty_port_tty_get(&mp->port);
if (tty) {
count = tty->count;
tty_kref_put(tty);
}
}
return count;
}
#else /* !CONFIG_ISDN_CAPI_MIDDLEWARE */
static inline void
capincci_alloc_minor(struct capidev *cdev, struct capincci *np) { }
static inline void capincci_free_minor(struct capincci *np) { }
#endif /* !CONFIG_ISDN_CAPI_MIDDLEWARE */
static struct capincci *capincci_alloc(struct capidev *cdev, u32 ncci)
{
struct capincci *np;
np = kzalloc(sizeof(*np), GFP_KERNEL);
if (!np)
return NULL;
np->ncci = ncci;
np->cdev = cdev;
capincci_alloc_minor(cdev, np);
list_add_tail(&np->list, &cdev->nccis);
return np;
}
static void capincci_free(struct capidev *cdev, u32 ncci)
{
struct capincci *np, *tmp;
list_for_each_entry_safe(np, tmp, &cdev->nccis, list)
if (ncci == 0xffffffff || np->ncci == ncci) {
capincci_free_minor(np);
list_del(&np->list);
kfree(np);
}
}
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
static struct capincci *capincci_find(struct capidev *cdev, u32 ncci)
{
struct capincci *np;
list_for_each_entry(np, &cdev->nccis, list)
if (np->ncci == ncci)
return np;
return NULL;
}
/* -------- handle data queue --------------------------------------- */
static struct sk_buff *
gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
{
struct sk_buff *nskb;
nskb = alloc_skb(CAPI_DATA_B3_RESP_LEN, GFP_KERNEL);
if (nskb) {
u16 datahandle = CAPIMSG_U16(skb->data, CAPIMSG_BASELEN + 4 + 4 + 2);
unsigned char *s = skb_put(nskb, CAPI_DATA_B3_RESP_LEN);
capimsg_setu16(s, 0, CAPI_DATA_B3_RESP_LEN);
capimsg_setu16(s, 2, mp->ap->applid);
capimsg_setu8 (s, 4, CAPI_DATA_B3);
capimsg_setu8 (s, 5, CAPI_RESP);
capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
capimsg_setu32(s, 8, mp->ncci);
capimsg_setu16(s, 12, datahandle);
}
return nskb;
}
static int handle_recv_skb(struct capiminor *mp, struct sk_buff *skb)
{
unsigned int datalen = skb->len - CAPIMSG_LEN(skb->data);
struct tty_struct *tty;
struct sk_buff *nskb;
u16 errcode, datahandle;
struct tty_ldisc *ld;
int ret = -1;
tty = tty_port_tty_get(&mp->port);
if (!tty) {
pr_debug("capi: currently no receiver\n");
return -1;
}
ld = tty_ldisc_ref(tty);
if (!ld) {
/* fatal error, do not requeue */
ret = 0;
kfree_skb(skb);
goto deref_tty;
}
if (ld->ops->receive_buf == NULL) {
pr_debug("capi: ldisc has no receive_buf function\n");
/* fatal error, do not requeue */
goto free_skb;
}
if (mp->ttyinstop) {
pr_debug("capi: recv tty throttled\n");
goto deref_ldisc;
}
if (tty->receive_room < datalen) {
pr_debug("capi: no room in tty\n");
goto deref_ldisc;
}
nskb = gen_data_b3_resp_for(mp, skb);
if (!nskb) {
printk(KERN_ERR "capi: gen_data_b3_resp failed\n");
goto deref_ldisc;
}
datahandle = CAPIMSG_U16(skb->data, CAPIMSG_BASELEN + 4);
errcode = capi20_put_message(mp->ap, nskb);
if (errcode == CAPI_NOERROR) {
skb_pull(skb, CAPIMSG_LEN(skb->data));
pr_debug("capi: DATA_B3_RESP %u len=%d => ldisc\n",
datahandle, skb->len);
ld->ops->receive_buf(tty, skb->data, NULL, skb->len);
} else {
printk(KERN_ERR "capi: send DATA_B3_RESP failed=%x\n",
errcode);
kfree_skb(nskb);
if (errcode == CAPI_SENDQUEUEFULL)
goto deref_ldisc;
}
free_skb:
ret = 0;
kfree_skb(skb);
deref_ldisc:
tty_ldisc_deref(ld);
deref_tty:
tty_kref_put(tty);
return ret;
}
static void handle_minor_recv(struct capiminor *mp)
{
struct sk_buff *skb;
while ((skb = skb_dequeue(&mp->inqueue)) != NULL)
if (handle_recv_skb(mp, skb) < 0) {
skb_queue_head(&mp->inqueue, skb);
return;
}
}
static void handle_minor_send(struct capiminor *mp)
{
struct tty_struct *tty;
struct sk_buff *skb;
u16 len;
u16 errcode;
u16 datahandle;
tty = tty_port_tty_get(&mp->port);
if (!tty)
return;
if (mp->ttyoutstop) {
pr_debug("capi: send: tty stopped\n");
tty_kref_put(tty);
return;
}
while (1) {
spin_lock_bh(&mp->outlock);
skb = __skb_dequeue(&mp->outqueue);
if (!skb) {
spin_unlock_bh(&mp->outlock);
break;
}
len = (u16)skb->len;
mp->outbytes -= len;
spin_unlock_bh(&mp->outlock);
datahandle = atomic_inc_return(&mp->datahandle);
skb_push(skb, CAPI_DATA_B3_REQ_LEN);
memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
capimsg_setu16(skb->data, 2, mp->ap->applid);
capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
capimsg_setu8 (skb->data, 5, CAPI_REQ);
capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
capimsg_setu16(skb->data, 16, len); /* Data length */
capimsg_setu16(skb->data, 18, datahandle);
capimsg_setu16(skb->data, 20, 0); /* Flags */
if (capiminor_add_ack(mp, datahandle) < 0) {
skb_pull(skb, CAPI_DATA_B3_REQ_LEN);
spin_lock_bh(&mp->outlock);
__skb_queue_head(&mp->outqueue, skb);
mp->outbytes += len;
spin_unlock_bh(&mp->outlock);
break;
}
errcode = capi20_put_message(mp->ap, skb);
if (errcode == CAPI_NOERROR) {
pr_debug("capi: DATA_B3_REQ %u len=%u\n",
datahandle, len);
continue;
}
capiminor_del_ack(mp, datahandle);
if (errcode == CAPI_SENDQUEUEFULL) {
skb_pull(skb, CAPI_DATA_B3_REQ_LEN);
spin_lock_bh(&mp->outlock);
__skb_queue_head(&mp->outqueue, skb);
mp->outbytes += len;
spin_unlock_bh(&mp->outlock);
break;
}
/* ups, drop packet */
printk(KERN_ERR "capi: put_message = %x\n", errcode);
kfree_skb(skb);
}
tty_kref_put(tty);
}
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
/* -------- function called by lower level -------------------------- */
static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
{
struct capidev *cdev = ap->private;
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
struct capiminor *mp;
u16 datahandle;
struct capincci *np;
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
mutex_lock(&cdev->lock);
if (CAPIMSG_CMD(skb->data) == CAPI_CONNECT_B3_CONF) {
u16 info = CAPIMSG_U16(skb->data, 12); // Info field
if ((info & 0xff00) == 0)
capincci_alloc(cdev, CAPIMSG_NCCI(skb->data));
}
if (CAPIMSG_CMD(skb->data) == CAPI_CONNECT_B3_IND)
capincci_alloc(cdev, CAPIMSG_NCCI(skb->data));
if (CAPIMSG_COMMAND(skb->data) != CAPI_DATA_B3) {
skb_queue_tail(&cdev->recvqueue, skb);
wake_up_interruptible(&cdev->recvwait);
goto unlock_out;
}
#ifndef CONFIG_ISDN_CAPI_MIDDLEWARE
skb_queue_tail(&cdev->recvqueue, skb);
wake_up_interruptible(&cdev->recvwait);
#else /* CONFIG_ISDN_CAPI_MIDDLEWARE */
np = capincci_find(cdev, CAPIMSG_CONTROL(skb->data));
if (!np) {
printk(KERN_ERR "BUG: capi_signal: ncci not found\n");
skb_queue_tail(&cdev->recvqueue, skb);
wake_up_interruptible(&cdev->recvwait);
goto unlock_out;
}
mp = np->minorp;
if (!mp) {
skb_queue_tail(&cdev->recvqueue, skb);
wake_up_interruptible(&cdev->recvwait);
goto unlock_out;
}
if (CAPIMSG_SUBCOMMAND(skb->data) == CAPI_IND) {
datahandle = CAPIMSG_U16(skb->data, CAPIMSG_BASELEN + 4 + 4 + 2);
pr_debug("capi_signal: DATA_B3_IND %u len=%d\n",
datahandle, skb->len-CAPIMSG_LEN(skb->data));
skb_queue_tail(&mp->inqueue, skb);
handle_minor_recv(mp);
} else if (CAPIMSG_SUBCOMMAND(skb->data) == CAPI_CONF) {
datahandle = CAPIMSG_U16(skb->data, CAPIMSG_BASELEN + 4);
pr_debug("capi_signal: DATA_B3_CONF %u 0x%x\n",
datahandle,
CAPIMSG_U16(skb->data, CAPIMSG_BASELEN + 4 + 2));
kfree_skb(skb);
capiminor_del_ack(mp, datahandle);
tty_port_tty_wakeup(&mp->port);
handle_minor_send(mp);
} else {
/* ups, let capi application handle it :-) */
skb_queue_tail(&cdev->recvqueue, skb);
wake_up_interruptible(&cdev->recvwait);
}
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
unlock_out:
mutex_unlock(&cdev->lock);
}
/* -------- file_operations for capidev ----------------------------- */
static ssize_t
capi_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct capidev *cdev = file->private_data;
struct sk_buff *skb;
size_t copied;
int err;
if (!cdev->ap.applid)
return -ENODEV;
skb = skb_dequeue(&cdev->recvqueue);
if (!skb) {
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
err = wait_event_interruptible(cdev->recvwait,
(skb = skb_dequeue(&cdev->recvqueue)));
if (err)
return err;
}
if (skb->len > count) {
skb_queue_head(&cdev->recvqueue, skb);
return -EMSGSIZE;
}
if (copy_to_user(buf, skb->data, skb->len)) {
skb_queue_head(&cdev->recvqueue, skb);
return -EFAULT;
}
copied = skb->len;
kfree_skb(skb);
return copied;
}
static ssize_t
capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
struct capidev *cdev = file->private_data;
struct sk_buff *skb;
u16 mlen;
if (!cdev->ap.applid)
return -ENODEV;
if (count < CAPIMSG_BASELEN)
return -EINVAL;
skb = alloc_skb(count, GFP_USER);
if (!skb)
return -ENOMEM;
if (copy_from_user(skb_put(skb, count), buf, count)) {
kfree_skb(skb);
return -EFAULT;
}
mlen = CAPIMSG_LEN(skb->data);
if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
if (count < CAPI_DATA_B3_REQ_LEN ||
(size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
kfree_skb(skb);
return -EINVAL;
}
} else {
if (mlen != count) {
kfree_skb(skb);
return -EINVAL;
}
}
CAPIMSG_SETAPPID(skb->data, cdev->ap.applid);
if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) {
if (count < CAPI_DISCONNECT_B3_RESP_LEN) {
kfree_skb(skb);
return -EINVAL;
}
mutex_lock(&cdev->lock);
capincci_free(cdev, CAPIMSG_NCCI(skb->data));
mutex_unlock(&cdev->lock);
}
cdev->errcode = capi20_put_message(&cdev->ap, skb);
if (cdev->errcode) {
kfree_skb(skb);
return -EIO;
}
return count;
}
static __poll_t
capi_poll(struct file *file, poll_table *wait)
{
struct capidev *cdev = file->private_data;
__poll_t mask = 0;
if (!cdev->ap.applid)
return EPOLLERR;
poll_wait(file, &(cdev->recvwait), wait);
mask = EPOLLOUT | EPOLLWRNORM;
if (!skb_queue_empty_lockless(&cdev->recvqueue))
mask |= EPOLLIN | EPOLLRDNORM;
return mask;
}
static int
capi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct capidev *cdev = file->private_data;
capi_ioctl_struct data;
int retval = -EINVAL;
void __user *argp = (void __user *)arg;
switch (cmd) {
case CAPI_REGISTER:
mutex_lock(&cdev->lock);
if (cdev->ap.applid) {
retval = -EEXIST;
goto register_out;
}
if (copy_from_user(&cdev->ap.rparam, argp,
sizeof(struct capi_register_params))) {
retval = -EFAULT;
goto register_out;
}
cdev->ap.private = cdev;
cdev->ap.recv_message = capi_recv_message;
cdev->errcode = capi20_register(&cdev->ap);
retval = (int)cdev->ap.applid;
if (cdev->errcode) {
cdev->ap.applid = 0;
retval = -EIO;
}
register_out:
mutex_unlock(&cdev->lock);
return retval;
case CAPI_GET_VERSION:
if (copy_from_user(&data.contr, argp,
sizeof(data.contr)))
return -EFAULT;
cdev->errcode = capi20_get_version(data.contr, &data.version);
if (cdev->errcode)
return -EIO;
if (copy_to_user(argp, &data.version,
sizeof(data.version)))
return -EFAULT;
return 0;
case CAPI_GET_SERIAL:
if (copy_from_user(&data.contr, argp,
sizeof(data.contr)))
return -EFAULT;
cdev->errcode = capi20_get_serial(data.contr, data.serial);
if (cdev->errcode)
return -EIO;
if (copy_to_user(argp, data.serial,
sizeof(data.serial)))
return -EFAULT;
return 0;
case CAPI_GET_PROFILE:
if (copy_from_user(&data.contr, argp,
sizeof(data.contr)))
return -EFAULT;
if (data.contr == 0) {
cdev->errcode = capi20_get_profile(data.contr, &data.profile);
if (cdev->errcode)
return -EIO;
retval = copy_to_user(argp,
&data.profile.ncontroller,
sizeof(data.profile.ncontroller));
} else {
cdev->errcode = capi20_get_profile(data.contr, &data.profile);
if (cdev->errcode)
return -EIO;
retval = copy_to_user(argp, &data.profile,
sizeof(data.profile));
}
if (retval)
return -EFAULT;
return 0;
case CAPI_GET_MANUFACTURER:
if (copy_from_user(&data.contr, argp,
sizeof(data.contr)))
return -EFAULT;
cdev->errcode = capi20_get_manufacturer(data.contr, data.manufacturer);
if (cdev->errcode)
return -EIO;
if (copy_to_user(argp, data.manufacturer,
sizeof(data.manufacturer)))
return -EFAULT;
return 0;
case CAPI_GET_ERRCODE:
data.errcode = cdev->errcode;
cdev->errcode = CAPI_NOERROR;
if (arg) {
if (copy_to_user(argp, &data.errcode,
sizeof(data.errcode)))
return -EFAULT;
}
return data.errcode;
case CAPI_INSTALLED:
if (capi20_isinstalled() == CAPI_NOERROR)
return 0;
return -ENXIO;
case CAPI_MANUFACTURER_CMD: {
struct capi_manufacturer_cmd mcmd;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (copy_from_user(&mcmd, argp, sizeof(mcmd)))
return -EFAULT;
return capi20_manufacturer(mcmd.cmd, mcmd.data);
}
case CAPI_SET_FLAGS:
case CAPI_CLR_FLAGS: {
unsigned userflags;
if (copy_from_user(&userflags, argp, sizeof(userflags)))
return -EFAULT;
mutex_lock(&cdev->lock);
if (cmd == CAPI_SET_FLAGS)
cdev->userflags |= userflags;
else
cdev->userflags &= ~userflags;
mutex_unlock(&cdev->lock);
return 0;
}
case CAPI_GET_FLAGS:
if (copy_to_user(argp, &cdev->userflags,
sizeof(cdev->userflags)))
return -EFAULT;
return 0;
#ifndef CONFIG_ISDN_CAPI_MIDDLEWARE
case CAPI_NCCI_OPENCOUNT:
return 0;
#else /* CONFIG_ISDN_CAPI_MIDDLEWARE */
case CAPI_NCCI_OPENCOUNT: {
struct capincci *nccip;
unsigned ncci;
int count = 0;
if (copy_from_user(&ncci, argp, sizeof(ncci)))
return -EFAULT;
mutex_lock(&cdev->lock);
nccip = capincci_find(cdev, (u32)ncci);
if (nccip)
count = capincci_minor_opencount(nccip);
mutex_unlock(&cdev->lock);
return count;
}
case CAPI_NCCI_GETUNIT: {
struct capincci *nccip;
struct capiminor *mp;
unsigned ncci;
int unit = -ESRCH;
if (copy_from_user(&ncci, argp, sizeof(ncci)))
return -EFAULT;
mutex_lock(&cdev->lock);
nccip = capincci_find(cdev, (u32)ncci);
if (nccip) {
mp = nccip->minorp;
if (mp)
unit = mp->minor;
}
mutex_unlock(&cdev->lock);
return unit;
}
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
default:
return -EINVAL;
}
}
static long
capi_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int ret;
mutex_lock(&capi_mutex);
ret = capi_ioctl(file, cmd, arg);
mutex_unlock(&capi_mutex);
return ret;
}
#ifdef CONFIG_COMPAT
static long
capi_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int ret;
if (cmd == CAPI_MANUFACTURER_CMD) {
struct {
compat_ulong_t cmd;
compat_uptr_t data;
} mcmd32;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (copy_from_user(&mcmd32, compat_ptr(arg), sizeof(mcmd32)))
return -EFAULT;
mutex_lock(&capi_mutex);
ret = capi20_manufacturer(mcmd32.cmd, compat_ptr(mcmd32.data));
mutex_unlock(&capi_mutex);
return ret;
}
return capi_unlocked_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
}
#endif
static int capi_open(struct inode *inode, struct file *file)
{
struct capidev *cdev;
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
return -ENOMEM;
mutex_init(&cdev->lock);
skb_queue_head_init(&cdev->recvqueue);
init_waitqueue_head(&cdev->recvwait);
INIT_LIST_HEAD(&cdev->nccis);
file->private_data = cdev;
mutex_lock(&capidev_list_lock);
list_add_tail(&cdev->list, &capidev_list);
mutex_unlock(&capidev_list_lock);
return stream_open(inode, file);
}
static int capi_release(struct inode *inode, struct file *file)
{
struct capidev *cdev = file->private_data;
mutex_lock(&capidev_list_lock);
list_del(&cdev->list);
mutex_unlock(&capidev_list_lock);
if (cdev->ap.applid)
capi20_release(&cdev->ap);
skb_queue_purge(&cdev->recvqueue);
capincci_free(cdev, 0xffffffff);
kfree(cdev);
return 0;
}
static const struct file_operations capi_fops =
{
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = capi_read,
.write = capi_write,
.poll = capi_poll,
.unlocked_ioctl = capi_unlocked_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = capi_compat_ioctl,
#endif
.open = capi_open,
.release = capi_release,
};
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
/* -------- tty_operations for capincci ----------------------------- */
static int
capinc_tty_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct capiminor *mp = capiminor_get(tty->index);
int ret = tty_standard_install(driver, tty);
if (ret == 0)
tty->driver_data = mp;
else
capiminor_put(mp);
return ret;
}
static void capinc_tty_cleanup(struct tty_struct *tty)
{
struct capiminor *mp = tty->driver_data;
tty->driver_data = NULL;
capiminor_put(mp);
}
static int capinc_tty_open(struct tty_struct *tty, struct file *filp)
{
struct capiminor *mp = tty->driver_data;
int err;
err = tty_port_open(&mp->port, tty, filp);
if (err)
return err;
handle_minor_recv(mp);
return 0;
}
static void capinc_tty_close(struct tty_struct *tty, struct file *filp)
{
struct capiminor *mp = tty->driver_data;
tty_port_close(&mp->port, tty, filp);
}
static ssize_t capinc_tty_write(struct tty_struct *tty, const u8 *buf,
size_t count)
{
struct capiminor *mp = tty->driver_data;
struct sk_buff *skb;
pr_debug("capinc_tty_write(count=%zu)\n", count);
spin_lock_bh(&mp->outlock);
skb = mp->outskb;
if (skb) {
mp->outskb = NULL;
__skb_queue_tail(&mp->outqueue, skb);
mp->outbytes += skb->len;
}
skb = alloc_skb(CAPI_DATA_B3_REQ_LEN + count, GFP_ATOMIC);
if (!skb) {
printk(KERN_ERR "capinc_tty_write: alloc_skb failed\n");
spin_unlock_bh(&mp->outlock);
return -ENOMEM;
}
skb_reserve(skb, CAPI_DATA_B3_REQ_LEN);
skb_put_data(skb, buf, count);
__skb_queue_tail(&mp->outqueue, skb);
mp->outbytes += skb->len;
spin_unlock_bh(&mp->outlock);
handle_minor_send(mp);
return count;
}
static int capinc_tty_put_char(struct tty_struct *tty, u8 ch)
{
struct capiminor *mp = tty->driver_data;
bool invoke_send = false;
struct sk_buff *skb;
int ret = 1;
pr_debug("capinc_put_char(%u)\n", ch);
spin_lock_bh(&mp->outlock);
skb = mp->outskb;
if (skb) {
if (skb_tailroom(skb) > 0) {
skb_put_u8(skb, ch);
goto unlock_out;
}
mp->outskb = NULL;
__skb_queue_tail(&mp->outqueue, skb);
mp->outbytes += skb->len;
invoke_send = true;
}
skb = alloc_skb(CAPI_DATA_B3_REQ_LEN + CAPI_MAX_BLKSIZE, GFP_ATOMIC);
if (skb) {
skb_reserve(skb, CAPI_DATA_B3_REQ_LEN);
skb_put_u8(skb, ch);
mp->outskb = skb;
} else {
printk(KERN_ERR "capinc_put_char: char %u lost\n", ch);
ret = 0;
}
unlock_out:
spin_unlock_bh(&mp->outlock);
if (invoke_send)
handle_minor_send(mp);
return ret;
}
static void capinc_tty_flush_chars(struct tty_struct *tty)
{
struct capiminor *mp = tty->driver_data;
struct sk_buff *skb;
spin_lock_bh(&mp->outlock);
skb = mp->outskb;
if (skb) {
mp->outskb = NULL;
__skb_queue_tail(&mp->outqueue, skb);
mp->outbytes += skb->len;
spin_unlock_bh(&mp->outlock);
handle_minor_send(mp);
} else
spin_unlock_bh(&mp->outlock);
handle_minor_recv(mp);
}
static unsigned int capinc_tty_write_room(struct tty_struct *tty)
{
struct capiminor *mp = tty->driver_data;
unsigned int room;
room = CAPINC_MAX_SENDQUEUE-skb_queue_len(&mp->outqueue);
room *= CAPI_MAX_BLKSIZE;
pr_debug("capinc_tty_write_room = %u\n", room);
return room;
}
static unsigned int capinc_tty_chars_in_buffer(struct tty_struct *tty)
{
struct capiminor *mp = tty->driver_data;
pr_debug("capinc_tty_chars_in_buffer = %d nack=%d sq=%d rq=%d\n",
mp->outbytes, mp->nack,
skb_queue_len(&mp->outqueue),
skb_queue_len(&mp->inqueue));
return mp->outbytes;
}
static void capinc_tty_throttle(struct tty_struct *tty)
{
struct capiminor *mp = tty->driver_data;
mp->ttyinstop = 1;
}
static void capinc_tty_unthrottle(struct tty_struct *tty)
{
struct capiminor *mp = tty->driver_data;
mp->ttyinstop = 0;
handle_minor_recv(mp);
}
static void capinc_tty_stop(struct tty_struct *tty)
{
struct capiminor *mp = tty->driver_data;
mp->ttyoutstop = 1;
}
static void capinc_tty_start(struct tty_struct *tty)
{
struct capiminor *mp = tty->driver_data;
mp->ttyoutstop = 0;
handle_minor_send(mp);
}
static void capinc_tty_hangup(struct tty_struct *tty)
{
struct capiminor *mp = tty->driver_data;
tty_port_hangup(&mp->port);
}
static void capinc_tty_send_xchar(struct tty_struct *tty, char ch)
{
pr_debug("capinc_tty_send_xchar(%d)\n", ch);
}
static const struct tty_operations capinc_ops = {
.open = capinc_tty_open,
.close = capinc_tty_close,
.write = capinc_tty_write,
.put_char = capinc_tty_put_char,
.flush_chars = capinc_tty_flush_chars,
.write_room = capinc_tty_write_room,
.chars_in_buffer = capinc_tty_chars_in_buffer,
.throttle = capinc_tty_throttle,
.unthrottle = capinc_tty_unthrottle,
.stop = capinc_tty_stop,
.start = capinc_tty_start,
.hangup = capinc_tty_hangup,
.send_xchar = capinc_tty_send_xchar,
.install = capinc_tty_install,
.cleanup = capinc_tty_cleanup,
};
static int __init capinc_tty_init(void)
{
struct tty_driver *drv;
int err;
if (capi_ttyminors > CAPINC_MAX_PORTS)
capi_ttyminors = CAPINC_MAX_PORTS;
if (capi_ttyminors <= 0)
capi_ttyminors = CAPINC_NR_PORTS;
capiminors = kcalloc(capi_ttyminors, sizeof(struct capiminor *),
GFP_KERNEL);
if (!capiminors)
return -ENOMEM;
drv = tty_alloc_driver(capi_ttyminors, TTY_DRIVER_REAL_RAW |
TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(drv)) {
kfree(capiminors);
return PTR_ERR(drv);
}
drv->driver_name = "capi_nc";
drv->name = "capi!";
drv->major = 0;
drv->minor_start = 0;
drv->type = TTY_DRIVER_TYPE_SERIAL;
drv->subtype = SERIAL_TYPE_NORMAL;
drv->init_termios = tty_std_termios;
drv->init_termios.c_iflag = ICRNL;
drv->init_termios.c_oflag = OPOST | ONLCR;
drv->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
drv->init_termios.c_lflag = 0;
tty_set_operations(drv, &capinc_ops);
err = tty_register_driver(drv);
if (err) {
tty_driver_kref_put(drv);
kfree(capiminors);
printk(KERN_ERR "Couldn't register capi_nc driver\n");
return err;
}
capinc_tty_driver = drv;
return 0;
}
static void __exit capinc_tty_exit(void)
{
tty_unregister_driver(capinc_tty_driver);
tty_driver_kref_put(capinc_tty_driver);
kfree(capiminors);
}
#else /* !CONFIG_ISDN_CAPI_MIDDLEWARE */
static inline int capinc_tty_init(void)
{
return 0;
}
static inline void capinc_tty_exit(void) { }
#endif /* !CONFIG_ISDN_CAPI_MIDDLEWARE */
/* -------- /proc functions ----------------------------------------- */
/*
* /proc/capi/capi20:
* minor applid nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt
*/
static int __maybe_unused capi20_proc_show(struct seq_file *m, void *v)
{
struct capidev *cdev;
struct list_head *l;
mutex_lock(&capidev_list_lock);
list_for_each(l, &capidev_list) {
cdev = list_entry(l, struct capidev, list);
seq_printf(m, "0 %d %lu %lu %lu %lu\n",
cdev->ap.applid,
cdev->ap.nrecvctlpkt,
cdev->ap.nrecvdatapkt,
cdev->ap.nsentctlpkt,
cdev->ap.nsentdatapkt);
}
mutex_unlock(&capidev_list_lock);
return 0;
}
/*
* /proc/capi/capi20ncci:
* applid ncci
*/
static int __maybe_unused capi20ncci_proc_show(struct seq_file *m, void *v)
{
struct capidev *cdev;
struct capincci *np;
mutex_lock(&capidev_list_lock);
list_for_each_entry(cdev, &capidev_list, list) {
mutex_lock(&cdev->lock);
list_for_each_entry(np, &cdev->nccis, list)
seq_printf(m, "%d 0x%x\n", cdev->ap.applid, np->ncci);
mutex_unlock(&cdev->lock);
}
mutex_unlock(&capidev_list_lock);
return 0;
}
static void __init proc_init(void)
{
proc_create_single("capi/capi20", 0, NULL, capi20_proc_show);
proc_create_single("capi/capi20ncci", 0, NULL, capi20ncci_proc_show);
}
static void __exit proc_exit(void)
{
remove_proc_entry("capi/capi20", NULL);
remove_proc_entry("capi/capi20ncci", NULL);
}
/* -------- init function and module interface ---------------------- */
static int __init capi_init(void)
{
const char *compileinfo;
int major_ret;
int ret;
ret = kcapi_init();
if (ret)
return ret;
major_ret = register_chrdev(capi_major, "capi20", &capi_fops);
if (major_ret < 0) {
printk(KERN_ERR "capi20: unable to get major %d\n", capi_major);
kcapi_exit();
return major_ret;
}
capi_class = class_create("capi");
if (IS_ERR(capi_class)) {
unregister_chrdev(capi_major, "capi20");
kcapi_exit();
return PTR_ERR(capi_class);
}
device_create(capi_class, NULL, MKDEV(capi_major, 0), NULL, "capi20");
if (capinc_tty_init() < 0) {
device_destroy(capi_class, MKDEV(capi_major, 0));
class_destroy(capi_class);
unregister_chrdev(capi_major, "capi20");
kcapi_exit();
return -ENOMEM;
}
proc_init();
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
compileinfo = " (middleware)";
#else
compileinfo = " (no middleware)";
#endif
printk(KERN_NOTICE "CAPI 2.0 started up with major %d%s\n",
capi_major, compileinfo);
return 0;
}
static void __exit capi_exit(void)
{
proc_exit();
device_destroy(capi_class, MKDEV(capi_major, 0));
class_destroy(capi_class);
unregister_chrdev(capi_major, "capi20");
capinc_tty_exit();
kcapi_exit();
}
module_init(capi_init);
module_exit(capi_exit);
| linux-master | drivers/isdn/capi/capi.c |
/* $Id: kcapi.c,v 1.1.2.8 2004/03/26 19:57:20 armin Exp $
*
* Kernel CAPI 2.0 Module
*
* Copyright 1999 by Carsten Paeth <[email protected]>
* Copyright 2002 by Kai Germaschewski <[email protected]>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include "kcapi.h"
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/proc_fs.h>
#include <linux/sched/signal.h>
#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/capi.h>
#include <linux/kernelcapi.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/isdn/capicmd.h>
#include <linux/isdn/capiutil.h>
#include <linux/mutex.h>
#include <linux/rcupdate.h>
static int showcapimsgs;
static struct workqueue_struct *kcapi_wq;
module_param(showcapimsgs, uint, 0);
/* ------------------------------------------------------------- */
struct capictr_event {
struct work_struct work;
unsigned int type;
u32 controller;
};
/* ------------------------------------------------------------- */
static const struct capi_version driver_version = {2, 0, 1, 1 << 4};
static char driver_serial[CAPI_SERIAL_LEN] = "0004711";
static char capi_manufakturer[64] = "AVM Berlin";
#define NCCI2CTRL(ncci) (((ncci) >> 24) & 0x7f)
struct capi_ctr *capi_controller[CAPI_MAXCONTR];
DEFINE_MUTEX(capi_controller_lock);
struct capi20_appl *capi_applications[CAPI_MAXAPPL];
static int ncontrollers;
/* -------- controller ref counting -------------------------------------- */
static inline struct capi_ctr *
capi_ctr_get(struct capi_ctr *ctr)
{
if (!try_module_get(ctr->owner))
return NULL;
return ctr;
}
static inline void
capi_ctr_put(struct capi_ctr *ctr)
{
module_put(ctr->owner);
}
/* ------------------------------------------------------------- */
static inline struct capi_ctr *get_capi_ctr_by_nr(u16 contr)
{
if (contr < 1 || contr - 1 >= CAPI_MAXCONTR)
return NULL;
return capi_controller[contr - 1];
}
static inline struct capi20_appl *__get_capi_appl_by_nr(u16 applid)
{
lockdep_assert_held(&capi_controller_lock);
if (applid < 1 || applid - 1 >= CAPI_MAXAPPL)
return NULL;
return capi_applications[applid - 1];
}
static inline struct capi20_appl *get_capi_appl_by_nr(u16 applid)
{
if (applid < 1 || applid - 1 >= CAPI_MAXAPPL)
return NULL;
return rcu_dereference(capi_applications[applid - 1]);
}
/* -------- util functions ------------------------------------ */
static inline int capi_cmd_valid(u8 cmd)
{
switch (cmd) {
case CAPI_ALERT:
case CAPI_CONNECT:
case CAPI_CONNECT_ACTIVE:
case CAPI_CONNECT_B3_ACTIVE:
case CAPI_CONNECT_B3:
case CAPI_CONNECT_B3_T90_ACTIVE:
case CAPI_DATA_B3:
case CAPI_DISCONNECT_B3:
case CAPI_DISCONNECT:
case CAPI_FACILITY:
case CAPI_INFO:
case CAPI_LISTEN:
case CAPI_MANUFACTURER:
case CAPI_RESET_B3:
case CAPI_SELECT_B_PROTOCOL:
return 1;
}
return 0;
}
static inline int capi_subcmd_valid(u8 subcmd)
{
switch (subcmd) {
case CAPI_REQ:
case CAPI_CONF:
case CAPI_IND:
case CAPI_RESP:
return 1;
}
return 0;
}
/* ------------------------------------------------------------ */
static void
register_appl(struct capi_ctr *ctr, u16 applid, capi_register_params *rparam)
{
ctr = capi_ctr_get(ctr);
if (ctr)
ctr->register_appl(ctr, applid, rparam);
else
printk(KERN_WARNING "%s: cannot get controller resources\n",
__func__);
}
static void release_appl(struct capi_ctr *ctr, u16 applid)
{
DBG("applid %#x", applid);
ctr->release_appl(ctr, applid);
capi_ctr_put(ctr);
}
static void notify_up(u32 contr)
{
struct capi20_appl *ap;
struct capi_ctr *ctr;
u16 applid;
mutex_lock(&capi_controller_lock);
if (showcapimsgs & 1)
printk(KERN_DEBUG "kcapi: notify up contr %d\n", contr);
ctr = get_capi_ctr_by_nr(contr);
if (ctr) {
if (ctr->state == CAPI_CTR_RUNNING)
goto unlock_out;
ctr->state = CAPI_CTR_RUNNING;
for (applid = 1; applid <= CAPI_MAXAPPL; applid++) {
ap = __get_capi_appl_by_nr(applid);
if (ap)
register_appl(ctr, applid, &ap->rparam);
}
} else
printk(KERN_WARNING "%s: invalid contr %d\n", __func__, contr);
unlock_out:
mutex_unlock(&capi_controller_lock);
}
static void ctr_down(struct capi_ctr *ctr, int new_state)
{
struct capi20_appl *ap;
u16 applid;
if (ctr->state == CAPI_CTR_DETECTED || ctr->state == CAPI_CTR_DETACHED)
return;
ctr->state = new_state;
memset(ctr->manu, 0, sizeof(ctr->manu));
memset(&ctr->version, 0, sizeof(ctr->version));
memset(&ctr->profile, 0, sizeof(ctr->profile));
memset(ctr->serial, 0, sizeof(ctr->serial));
for (applid = 1; applid <= CAPI_MAXAPPL; applid++) {
ap = __get_capi_appl_by_nr(applid);
if (ap)
capi_ctr_put(ctr);
}
}
static void notify_down(u32 contr)
{
struct capi_ctr *ctr;
mutex_lock(&capi_controller_lock);
if (showcapimsgs & 1)
printk(KERN_DEBUG "kcapi: notify down contr %d\n", contr);
ctr = get_capi_ctr_by_nr(contr);
if (ctr)
ctr_down(ctr, CAPI_CTR_DETECTED);
else
printk(KERN_WARNING "%s: invalid contr %d\n", __func__, contr);
mutex_unlock(&capi_controller_lock);
}
static void do_notify_work(struct work_struct *work)
{
struct capictr_event *event =
container_of(work, struct capictr_event, work);
switch (event->type) {
case CAPICTR_UP:
notify_up(event->controller);
break;
case CAPICTR_DOWN:
notify_down(event->controller);
break;
}
kfree(event);
}
static int notify_push(unsigned int event_type, u32 controller)
{
struct capictr_event *event = kmalloc(sizeof(*event), GFP_ATOMIC);
if (!event)
return -ENOMEM;
INIT_WORK(&event->work, do_notify_work);
event->type = event_type;
event->controller = controller;
queue_work(kcapi_wq, &event->work);
return 0;
}
/* -------- Receiver ------------------------------------------ */
static void recv_handler(struct work_struct *work)
{
struct sk_buff *skb;
struct capi20_appl *ap =
container_of(work, struct capi20_appl, recv_work);
if ((!ap) || (ap->release_in_progress))
return;
mutex_lock(&ap->recv_mtx);
while ((skb = skb_dequeue(&ap->recv_queue))) {
if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_IND)
ap->nrecvdatapkt++;
else
ap->nrecvctlpkt++;
ap->recv_message(ap, skb);
}
mutex_unlock(&ap->recv_mtx);
}
/**
* capi_ctr_handle_message() - handle incoming CAPI message
* @ctr: controller descriptor structure.
* @appl: application ID.
* @skb: message.
*
* Called by hardware driver to pass a CAPI message to the application.
*/
void capi_ctr_handle_message(struct capi_ctr *ctr, u16 appl,
struct sk_buff *skb)
{
struct capi20_appl *ap;
int showctl = 0;
u8 cmd, subcmd;
_cdebbuf *cdb;
if (ctr->state != CAPI_CTR_RUNNING) {
cdb = capi_message2str(skb->data);
if (cdb) {
printk(KERN_INFO "kcapi: controller [%03d] not active, got: %s",
ctr->cnr, cdb->buf);
cdebbuf_free(cdb);
} else
printk(KERN_INFO "kcapi: controller [%03d] not active, cannot trace\n",
ctr->cnr);
goto error;
}
cmd = CAPIMSG_COMMAND(skb->data);
subcmd = CAPIMSG_SUBCOMMAND(skb->data);
if (cmd == CAPI_DATA_B3 && subcmd == CAPI_IND) {
ctr->nrecvdatapkt++;
if (ctr->traceflag > 2)
showctl |= 2;
} else {
ctr->nrecvctlpkt++;
if (ctr->traceflag)
showctl |= 2;
}
showctl |= (ctr->traceflag & 1);
if (showctl & 2) {
if (showctl & 1) {
printk(KERN_DEBUG "kcapi: got [%03d] id#%d %s len=%u\n",
ctr->cnr, CAPIMSG_APPID(skb->data),
capi_cmd2str(cmd, subcmd),
CAPIMSG_LEN(skb->data));
} else {
cdb = capi_message2str(skb->data);
if (cdb) {
printk(KERN_DEBUG "kcapi: got [%03d] %s\n",
ctr->cnr, cdb->buf);
cdebbuf_free(cdb);
} else
printk(KERN_DEBUG "kcapi: got [%03d] id#%d %s len=%u, cannot trace\n",
ctr->cnr, CAPIMSG_APPID(skb->data),
capi_cmd2str(cmd, subcmd),
CAPIMSG_LEN(skb->data));
}
}
rcu_read_lock();
ap = get_capi_appl_by_nr(CAPIMSG_APPID(skb->data));
if (!ap) {
rcu_read_unlock();
cdb = capi_message2str(skb->data);
if (cdb) {
printk(KERN_ERR "kcapi: handle_message: applid %d state released (%s)\n",
CAPIMSG_APPID(skb->data), cdb->buf);
cdebbuf_free(cdb);
} else
printk(KERN_ERR "kcapi: handle_message: applid %d state released (%s) cannot trace\n",
CAPIMSG_APPID(skb->data),
capi_cmd2str(cmd, subcmd));
goto error;
}
skb_queue_tail(&ap->recv_queue, skb);
queue_work(kcapi_wq, &ap->recv_work);
rcu_read_unlock();
return;
error:
kfree_skb(skb);
}
EXPORT_SYMBOL(capi_ctr_handle_message);
/**
* capi_ctr_ready() - signal CAPI controller ready
* @ctr: controller descriptor structure.
*
* Called by hardware driver to signal that the controller is up and running.
*/
void capi_ctr_ready(struct capi_ctr *ctr)
{
printk(KERN_NOTICE "kcapi: controller [%03d] \"%s\" ready.\n",
ctr->cnr, ctr->name);
notify_push(CAPICTR_UP, ctr->cnr);
}
EXPORT_SYMBOL(capi_ctr_ready);
/**
* capi_ctr_down() - signal CAPI controller not ready
* @ctr: controller descriptor structure.
*
* Called by hardware driver to signal that the controller is down and
* unavailable for use.
*/
void capi_ctr_down(struct capi_ctr *ctr)
{
printk(KERN_NOTICE "kcapi: controller [%03d] down.\n", ctr->cnr);
notify_push(CAPICTR_DOWN, ctr->cnr);
}
EXPORT_SYMBOL(capi_ctr_down);
/* ------------------------------------------------------------- */
/**
* attach_capi_ctr() - register CAPI controller
* @ctr: controller descriptor structure.
*
* Called by hardware driver to register a controller with the CAPI subsystem.
* Return value: 0 on success, error code < 0 on error
*/
int attach_capi_ctr(struct capi_ctr *ctr)
{
int i;
mutex_lock(&capi_controller_lock);
for (i = 0; i < CAPI_MAXCONTR; i++) {
if (!capi_controller[i])
break;
}
if (i == CAPI_MAXCONTR) {
mutex_unlock(&capi_controller_lock);
printk(KERN_ERR "kcapi: out of controller slots\n");
return -EBUSY;
}
capi_controller[i] = ctr;
ctr->nrecvctlpkt = 0;
ctr->nrecvdatapkt = 0;
ctr->nsentctlpkt = 0;
ctr->nsentdatapkt = 0;
ctr->cnr = i + 1;
ctr->state = CAPI_CTR_DETECTED;
ctr->blocked = 0;
ctr->traceflag = showcapimsgs;
sprintf(ctr->procfn, "capi/controllers/%d", ctr->cnr);
ctr->procent = proc_create_single_data(ctr->procfn, 0, NULL,
ctr->proc_show, ctr);
ncontrollers++;
mutex_unlock(&capi_controller_lock);
printk(KERN_NOTICE "kcapi: controller [%03d]: %s attached\n",
ctr->cnr, ctr->name);
return 0;
}
EXPORT_SYMBOL(attach_capi_ctr);
/**
* detach_capi_ctr() - unregister CAPI controller
* @ctr: controller descriptor structure.
*
* Called by hardware driver to remove the registration of a controller
* with the CAPI subsystem.
* Return value: 0 on success, error code < 0 on error
*/
int detach_capi_ctr(struct capi_ctr *ctr)
{
int err = 0;
mutex_lock(&capi_controller_lock);
ctr_down(ctr, CAPI_CTR_DETACHED);
if (ctr->cnr < 1 || ctr->cnr - 1 >= CAPI_MAXCONTR) {
err = -EINVAL;
goto unlock_out;
}
if (capi_controller[ctr->cnr - 1] != ctr) {
err = -EINVAL;
goto unlock_out;
}
capi_controller[ctr->cnr - 1] = NULL;
ncontrollers--;
if (ctr->procent)
remove_proc_entry(ctr->procfn, NULL);
printk(KERN_NOTICE "kcapi: controller [%03d]: %s unregistered\n",
ctr->cnr, ctr->name);
unlock_out:
mutex_unlock(&capi_controller_lock);
return err;
}
EXPORT_SYMBOL(detach_capi_ctr);
/* ------------------------------------------------------------- */
/* -------- CAPI2.0 Interface ---------------------------------- */
/* ------------------------------------------------------------- */
/**
* capi20_isinstalled() - CAPI 2.0 operation CAPI_INSTALLED
*
* Return value: CAPI result code (CAPI_NOERROR if at least one ISDN controller
* is ready for use, CAPI_REGNOTINSTALLED otherwise)
*/
u16 capi20_isinstalled(void)
{
u16 ret = CAPI_REGNOTINSTALLED;
int i;
mutex_lock(&capi_controller_lock);
for (i = 0; i < CAPI_MAXCONTR; i++)
if (capi_controller[i] &&
capi_controller[i]->state == CAPI_CTR_RUNNING) {
ret = CAPI_NOERROR;
break;
}
mutex_unlock(&capi_controller_lock);
return ret;
}
/**
* capi20_register() - CAPI 2.0 operation CAPI_REGISTER
* @ap: CAPI application descriptor structure.
*
* Register an application's presence with CAPI.
* A unique application ID is assigned and stored in @ap->applid.
* After this function returns successfully, the message receive
* callback function @ap->recv_message() may be called at any time
* until capi20_release() has been called for the same @ap.
* Return value: CAPI result code
*/
u16 capi20_register(struct capi20_appl *ap)
{
int i;
u16 applid;
DBG("");
if (ap->rparam.datablklen < 128)
return CAPI_LOGBLKSIZETOSMALL;
ap->nrecvctlpkt = 0;
ap->nrecvdatapkt = 0;
ap->nsentctlpkt = 0;
ap->nsentdatapkt = 0;
mutex_init(&ap->recv_mtx);
skb_queue_head_init(&ap->recv_queue);
INIT_WORK(&ap->recv_work, recv_handler);
ap->release_in_progress = 0;
mutex_lock(&capi_controller_lock);
for (applid = 1; applid <= CAPI_MAXAPPL; applid++) {
if (capi_applications[applid - 1] == NULL)
break;
}
if (applid > CAPI_MAXAPPL) {
mutex_unlock(&capi_controller_lock);
return CAPI_TOOMANYAPPLS;
}
ap->applid = applid;
capi_applications[applid - 1] = ap;
for (i = 0; i < CAPI_MAXCONTR; i++) {
if (!capi_controller[i] ||
capi_controller[i]->state != CAPI_CTR_RUNNING)
continue;
register_appl(capi_controller[i], applid, &ap->rparam);
}
mutex_unlock(&capi_controller_lock);
if (showcapimsgs & 1) {
printk(KERN_DEBUG "kcapi: appl %d up\n", applid);
}
return CAPI_NOERROR;
}
/**
* capi20_release() - CAPI 2.0 operation CAPI_RELEASE
* @ap: CAPI application descriptor structure.
*
* Terminate an application's registration with CAPI.
* After this function returns successfully, the message receive
* callback function @ap->recv_message() will no longer be called.
* Return value: CAPI result code
*/
u16 capi20_release(struct capi20_appl *ap)
{
int i;
DBG("applid %#x", ap->applid);
mutex_lock(&capi_controller_lock);
ap->release_in_progress = 1;
capi_applications[ap->applid - 1] = NULL;
synchronize_rcu();
for (i = 0; i < CAPI_MAXCONTR; i++) {
if (!capi_controller[i] ||
capi_controller[i]->state != CAPI_CTR_RUNNING)
continue;
release_appl(capi_controller[i], ap->applid);
}
mutex_unlock(&capi_controller_lock);
flush_workqueue(kcapi_wq);
skb_queue_purge(&ap->recv_queue);
if (showcapimsgs & 1) {
printk(KERN_DEBUG "kcapi: appl %d down\n", ap->applid);
}
return CAPI_NOERROR;
}
/**
* capi20_put_message() - CAPI 2.0 operation CAPI_PUT_MESSAGE
* @ap: CAPI application descriptor structure.
* @skb: CAPI message.
*
* Transfer a single message to CAPI.
* Return value: CAPI result code
*/
u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb)
{
struct capi_ctr *ctr;
int showctl = 0;
u8 cmd, subcmd;
DBG("applid %#x", ap->applid);
if (ncontrollers == 0)
return CAPI_REGNOTINSTALLED;
if ((ap->applid == 0) || ap->release_in_progress)
return CAPI_ILLAPPNR;
if (skb->len < 12
|| !capi_cmd_valid(CAPIMSG_COMMAND(skb->data))
|| !capi_subcmd_valid(CAPIMSG_SUBCOMMAND(skb->data)))
return CAPI_ILLCMDORSUBCMDORMSGTOSMALL;
/*
* The controller reference is protected by the existence of the
* application passed to us. We assume that the caller properly
* synchronizes this service with capi20_release.
*/
ctr = get_capi_ctr_by_nr(CAPIMSG_CONTROLLER(skb->data));
if (!ctr || ctr->state != CAPI_CTR_RUNNING)
return CAPI_REGNOTINSTALLED;
if (ctr->blocked)
return CAPI_SENDQUEUEFULL;
cmd = CAPIMSG_COMMAND(skb->data);
subcmd = CAPIMSG_SUBCOMMAND(skb->data);
if (cmd == CAPI_DATA_B3 && subcmd == CAPI_REQ) {
ctr->nsentdatapkt++;
ap->nsentdatapkt++;
if (ctr->traceflag > 2)
showctl |= 2;
} else {
ctr->nsentctlpkt++;
ap->nsentctlpkt++;
if (ctr->traceflag)
showctl |= 2;
}
showctl |= (ctr->traceflag & 1);
if (showctl & 2) {
if (showctl & 1) {
printk(KERN_DEBUG "kcapi: put [%03d] id#%d %s len=%u\n",
CAPIMSG_CONTROLLER(skb->data),
CAPIMSG_APPID(skb->data),
capi_cmd2str(cmd, subcmd),
CAPIMSG_LEN(skb->data));
} else {
_cdebbuf *cdb = capi_message2str(skb->data);
if (cdb) {
printk(KERN_DEBUG "kcapi: put [%03d] %s\n",
CAPIMSG_CONTROLLER(skb->data),
cdb->buf);
cdebbuf_free(cdb);
} else
printk(KERN_DEBUG "kcapi: put [%03d] id#%d %s len=%u cannot trace\n",
CAPIMSG_CONTROLLER(skb->data),
CAPIMSG_APPID(skb->data),
capi_cmd2str(cmd, subcmd),
CAPIMSG_LEN(skb->data));
}
}
return ctr->send_message(ctr, skb);
}
/**
* capi20_get_manufacturer() - CAPI 2.0 operation CAPI_GET_MANUFACTURER
* @contr: controller number.
* @buf: result buffer (64 bytes).
*
* Retrieve information about the manufacturer of the specified ISDN controller
* or (for @contr == 0) the driver itself.
* Return value: CAPI result code
*/
u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN])
{
struct capi_ctr *ctr;
u16 ret;
if (contr == 0) {
strncpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
return CAPI_NOERROR;
}
mutex_lock(&capi_controller_lock);
ctr = get_capi_ctr_by_nr(contr);
if (ctr && ctr->state == CAPI_CTR_RUNNING) {
strncpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
ret = CAPI_NOERROR;
} else
ret = CAPI_REGNOTINSTALLED;
mutex_unlock(&capi_controller_lock);
return ret;
}
/**
* capi20_get_version() - CAPI 2.0 operation CAPI_GET_VERSION
* @contr: controller number.
* @verp: result structure.
*
* Retrieve version information for the specified ISDN controller
* or (for @contr == 0) the driver itself.
* Return value: CAPI result code
*/
u16 capi20_get_version(u32 contr, struct capi_version *verp)
{
struct capi_ctr *ctr;
u16 ret;
if (contr == 0) {
*verp = driver_version;
return CAPI_NOERROR;
}
mutex_lock(&capi_controller_lock);
ctr = get_capi_ctr_by_nr(contr);
if (ctr && ctr->state == CAPI_CTR_RUNNING) {
memcpy(verp, &ctr->version, sizeof(capi_version));
ret = CAPI_NOERROR;
} else
ret = CAPI_REGNOTINSTALLED;
mutex_unlock(&capi_controller_lock);
return ret;
}
/**
* capi20_get_serial() - CAPI 2.0 operation CAPI_GET_SERIAL_NUMBER
* @contr: controller number.
* @serial: result buffer (8 bytes).
*
* Retrieve the serial number of the specified ISDN controller
* or (for @contr == 0) the driver itself.
* Return value: CAPI result code
*/
u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN])
{
struct capi_ctr *ctr;
u16 ret;
if (contr == 0) {
strscpy(serial, driver_serial, CAPI_SERIAL_LEN);
return CAPI_NOERROR;
}
mutex_lock(&capi_controller_lock);
ctr = get_capi_ctr_by_nr(contr);
if (ctr && ctr->state == CAPI_CTR_RUNNING) {
strscpy(serial, ctr->serial, CAPI_SERIAL_LEN);
ret = CAPI_NOERROR;
} else
ret = CAPI_REGNOTINSTALLED;
mutex_unlock(&capi_controller_lock);
return ret;
}
/**
* capi20_get_profile() - CAPI 2.0 operation CAPI_GET_PROFILE
* @contr: controller number.
* @profp: result structure.
*
* Retrieve capability information for the specified ISDN controller
* or (for @contr == 0) the number of installed controllers.
* Return value: CAPI result code
*/
u16 capi20_get_profile(u32 contr, struct capi_profile *profp)
{
struct capi_ctr *ctr;
u16 ret;
if (contr == 0) {
profp->ncontroller = ncontrollers;
return CAPI_NOERROR;
}
mutex_lock(&capi_controller_lock);
ctr = get_capi_ctr_by_nr(contr);
if (ctr && ctr->state == CAPI_CTR_RUNNING) {
memcpy(profp, &ctr->profile, sizeof(struct capi_profile));
ret = CAPI_NOERROR;
} else
ret = CAPI_REGNOTINSTALLED;
mutex_unlock(&capi_controller_lock);
return ret;
}
/**
* capi20_manufacturer() - CAPI 2.0 operation CAPI_MANUFACTURER
* @cmd: command.
* @data: parameter.
*
* Perform manufacturer specific command.
* Return value: CAPI result code
*/
int capi20_manufacturer(unsigned long cmd, void __user *data)
{
struct capi_ctr *ctr;
int retval;
switch (cmd) {
case KCAPI_CMD_TRACE:
{
kcapi_flagdef fdef;
if (copy_from_user(&fdef, data, sizeof(kcapi_flagdef)))
return -EFAULT;
mutex_lock(&capi_controller_lock);
ctr = get_capi_ctr_by_nr(fdef.contr);
if (ctr) {
ctr->traceflag = fdef.flag;
printk(KERN_INFO "kcapi: contr [%03d] set trace=%d\n",
ctr->cnr, ctr->traceflag);
retval = 0;
} else
retval = -ESRCH;
mutex_unlock(&capi_controller_lock);
return retval;
}
default:
printk(KERN_ERR "kcapi: manufacturer command %lu unknown.\n",
cmd);
break;
}
return -EINVAL;
}
/* ------------------------------------------------------------- */
/* -------- Init & Cleanup ------------------------------------- */
/* ------------------------------------------------------------- */
/*
* init / exit functions
*/
int __init kcapi_init(void)
{
int err;
kcapi_wq = alloc_workqueue("kcapi", 0, 0);
if (!kcapi_wq)
return -ENOMEM;
err = cdebug_init();
if (err) {
destroy_workqueue(kcapi_wq);
return err;
}
kcapi_proc_init();
return 0;
}
void kcapi_exit(void)
{
kcapi_proc_exit();
cdebug_exit();
destroy_workqueue(kcapi_wq);
}
| linux-master | drivers/isdn/capi/kcapi.c |
/* $Id: capiutil.c,v 1.13.6.4 2001/09/23 22:24:33 kai Exp $
*
* CAPI 2.0 convert capi message to capi message struct
*
* From CAPI 2.0 Development Kit AVM 1995 (msg.c)
* Rewritten for Linux 1996 by Carsten Paeth <[email protected]>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/isdn/capiutil.h>
#include <linux/slab.h>
#include "kcapi.h"
/* from CAPI2.0 DDK AVM Berlin GmbH */
typedef struct {
int typ;
size_t off;
} _cdef;
#define _CBYTE 1
#define _CWORD 2
#define _CDWORD 3
#define _CSTRUCT 4
#define _CMSTRUCT 5
#define _CEND 6
static _cdef cdef[] =
{
/*00 */
{_CEND},
/*01 */
{_CEND},
/*02 */
{_CEND},
/*03 */
{_CDWORD, offsetof(_cmsg, adr.adrController)},
/*04 */
{_CMSTRUCT, offsetof(_cmsg, AdditionalInfo)},
/*05 */
{_CSTRUCT, offsetof(_cmsg, B1configuration)},
/*06 */
{_CWORD, offsetof(_cmsg, B1protocol)},
/*07 */
{_CSTRUCT, offsetof(_cmsg, B2configuration)},
/*08 */
{_CWORD, offsetof(_cmsg, B2protocol)},
/*09 */
{_CSTRUCT, offsetof(_cmsg, B3configuration)},
/*0a */
{_CWORD, offsetof(_cmsg, B3protocol)},
/*0b */
{_CSTRUCT, offsetof(_cmsg, BC)},
/*0c */
{_CSTRUCT, offsetof(_cmsg, BChannelinformation)},
/*0d */
{_CMSTRUCT, offsetof(_cmsg, BProtocol)},
/*0e */
{_CSTRUCT, offsetof(_cmsg, CalledPartyNumber)},
/*0f */
{_CSTRUCT, offsetof(_cmsg, CalledPartySubaddress)},
/*10 */
{_CSTRUCT, offsetof(_cmsg, CallingPartyNumber)},
/*11 */
{_CSTRUCT, offsetof(_cmsg, CallingPartySubaddress)},
/*12 */
{_CDWORD, offsetof(_cmsg, CIPmask)},
/*13 */
{_CDWORD, offsetof(_cmsg, CIPmask2)},
/*14 */
{_CWORD, offsetof(_cmsg, CIPValue)},
/*15 */
{_CDWORD, offsetof(_cmsg, Class)},
/*16 */
{_CSTRUCT, offsetof(_cmsg, ConnectedNumber)},
/*17 */
{_CSTRUCT, offsetof(_cmsg, ConnectedSubaddress)},
/*18 */
{_CDWORD, offsetof(_cmsg, Data)},
/*19 */
{_CWORD, offsetof(_cmsg, DataHandle)},
/*1a */
{_CWORD, offsetof(_cmsg, DataLength)},
/*1b */
{_CSTRUCT, offsetof(_cmsg, FacilityConfirmationParameter)},
/*1c */
{_CSTRUCT, offsetof(_cmsg, Facilitydataarray)},
/*1d */
{_CSTRUCT, offsetof(_cmsg, FacilityIndicationParameter)},
/*1e */
{_CSTRUCT, offsetof(_cmsg, FacilityRequestParameter)},
/*1f */
{_CWORD, offsetof(_cmsg, FacilitySelector)},
/*20 */
{_CWORD, offsetof(_cmsg, Flags)},
/*21 */
{_CDWORD, offsetof(_cmsg, Function)},
/*22 */
{_CSTRUCT, offsetof(_cmsg, HLC)},
/*23 */
{_CWORD, offsetof(_cmsg, Info)},
/*24 */
{_CSTRUCT, offsetof(_cmsg, InfoElement)},
/*25 */
{_CDWORD, offsetof(_cmsg, InfoMask)},
/*26 */
{_CWORD, offsetof(_cmsg, InfoNumber)},
/*27 */
{_CSTRUCT, offsetof(_cmsg, Keypadfacility)},
/*28 */
{_CSTRUCT, offsetof(_cmsg, LLC)},
/*29 */
{_CSTRUCT, offsetof(_cmsg, ManuData)},
/*2a */
{_CDWORD, offsetof(_cmsg, ManuID)},
/*2b */
{_CSTRUCT, offsetof(_cmsg, NCPI)},
/*2c */
{_CWORD, offsetof(_cmsg, Reason)},
/*2d */
{_CWORD, offsetof(_cmsg, Reason_B3)},
/*2e */
{_CWORD, offsetof(_cmsg, Reject)},
/*2f */
{_CSTRUCT, offsetof(_cmsg, Useruserdata)}
};
static unsigned char *cpars[] =
{
/* ALERT_REQ */ [0x01] = "\x03\x04\x0c\x27\x2f\x1c\x01\x01",
/* CONNECT_REQ */ [0x02] = "\x03\x14\x0e\x10\x0f\x11\x0d\x06\x08\x0a\x05\x07\x09\x01\x0b\x28\x22\x04\x0c\x27\x2f\x1c\x01\x01",
/* DISCONNECT_REQ */ [0x04] = "\x03\x04\x0c\x27\x2f\x1c\x01\x01",
/* LISTEN_REQ */ [0x05] = "\x03\x25\x12\x13\x10\x11\x01",
/* INFO_REQ */ [0x08] = "\x03\x0e\x04\x0c\x27\x2f\x1c\x01\x01",
/* FACILITY_REQ */ [0x09] = "\x03\x1f\x1e\x01",
/* SELECT_B_PROTOCOL_REQ */ [0x0a] = "\x03\x0d\x06\x08\x0a\x05\x07\x09\x01\x01",
/* CONNECT_B3_REQ */ [0x0b] = "\x03\x2b\x01",
/* DISCONNECT_B3_REQ */ [0x0d] = "\x03\x2b\x01",
/* DATA_B3_REQ */ [0x0f] = "\x03\x18\x1a\x19\x20\x01",
/* RESET_B3_REQ */ [0x10] = "\x03\x2b\x01",
/* ALERT_CONF */ [0x13] = "\x03\x23\x01",
/* CONNECT_CONF */ [0x14] = "\x03\x23\x01",
/* DISCONNECT_CONF */ [0x16] = "\x03\x23\x01",
/* LISTEN_CONF */ [0x17] = "\x03\x23\x01",
/* MANUFACTURER_REQ */ [0x18] = "\x03\x2a\x15\x21\x29\x01",
/* INFO_CONF */ [0x1a] = "\x03\x23\x01",
/* FACILITY_CONF */ [0x1b] = "\x03\x23\x1f\x1b\x01",
/* SELECT_B_PROTOCOL_CONF */ [0x1c] = "\x03\x23\x01",
/* CONNECT_B3_CONF */ [0x1d] = "\x03\x23\x01",
/* DISCONNECT_B3_CONF */ [0x1f] = "\x03\x23\x01",
/* DATA_B3_CONF */ [0x21] = "\x03\x19\x23\x01",
/* RESET_B3_CONF */ [0x22] = "\x03\x23\x01",
/* CONNECT_IND */ [0x26] = "\x03\x14\x0e\x10\x0f\x11\x0b\x28\x22\x04\x0c\x27\x2f\x1c\x01\x01",
/* CONNECT_ACTIVE_IND */ [0x27] = "\x03\x16\x17\x28\x01",
/* DISCONNECT_IND */ [0x28] = "\x03\x2c\x01",
/* MANUFACTURER_CONF */ [0x2a] = "\x03\x2a\x15\x21\x29\x01",
/* INFO_IND */ [0x2c] = "\x03\x26\x24\x01",
/* FACILITY_IND */ [0x2d] = "\x03\x1f\x1d\x01",
/* CONNECT_B3_IND */ [0x2f] = "\x03\x2b\x01",
/* CONNECT_B3_ACTIVE_IND */ [0x30] = "\x03\x2b\x01",
/* DISCONNECT_B3_IND */ [0x31] = "\x03\x2d\x2b\x01",
/* DATA_B3_IND */ [0x33] = "\x03\x18\x1a\x19\x20\x01",
/* RESET_B3_IND */ [0x34] = "\x03\x2b\x01",
/* CONNECT_B3_T90_ACTIVE_IND */ [0x35] = "\x03\x2b\x01",
/* CONNECT_RESP */ [0x38] = "\x03\x2e\x0d\x06\x08\x0a\x05\x07\x09\x01\x16\x17\x28\x04\x0c\x27\x2f\x1c\x01\x01",
/* CONNECT_ACTIVE_RESP */ [0x39] = "\x03\x01",
/* DISCONNECT_RESP */ [0x3a] = "\x03\x01",
/* MANUFACTURER_IND */ [0x3c] = "\x03\x2a\x15\x21\x29\x01",
/* INFO_RESP */ [0x3e] = "\x03\x01",
/* FACILITY_RESP */ [0x3f] = "\x03\x1f\x01",
/* CONNECT_B3_RESP */ [0x41] = "\x03\x2e\x2b\x01",
/* CONNECT_B3_ACTIVE_RESP */ [0x42] = "\x03\x01",
/* DISCONNECT_B3_RESP */ [0x43] = "\x03\x01",
/* DATA_B3_RESP */ [0x45] = "\x03\x19\x01",
/* RESET_B3_RESP */ [0x46] = "\x03\x01",
/* CONNECT_B3_T90_ACTIVE_RESP */ [0x47] = "\x03\x01",
/* MANUFACTURER_RESP */ [0x4e] = "\x03\x2a\x15\x21\x29\x01",
};
/*-------------------------------------------------------*/
#define byteTLcpy(x, y) *(u8 *)(x) = *(u8 *)(y);
#define wordTLcpy(x, y) *(u16 *)(x) = *(u16 *)(y);
#define dwordTLcpy(x, y) memcpy(x, y, 4);
#define structTLcpy(x, y, l) memcpy(x, y, l)
#define structTLcpyovl(x, y, l) memmove(x, y, l)
#define byteTRcpy(x, y) *(u8 *)(y) = *(u8 *)(x);
#define wordTRcpy(x, y) *(u16 *)(y) = *(u16 *)(x);
#define dwordTRcpy(x, y) memcpy(y, x, 4);
#define structTRcpy(x, y, l) memcpy(y, x, l)
#define structTRcpyovl(x, y, l) memmove(y, x, l)
/*-------------------------------------------------------*/
static unsigned command_2_index(u8 c, u8 sc)
{
if (c & 0x80)
c = 0x9 + (c & 0x0f);
else if (c == 0x41)
c = 0x9 + 0x1;
if (c > 0x18)
c = 0x00;
return (sc & 3) * (0x9 + 0x9) + c;
}
/**
* capi_cmd2par() - find parameter string for CAPI 2.0 command/subcommand
* @cmd: command number
* @subcmd: subcommand number
*
* Return value: static string, NULL if command/subcommand unknown
*/
static unsigned char *capi_cmd2par(u8 cmd, u8 subcmd)
{
return cpars[command_2_index(cmd, subcmd)];
}
/*-------------------------------------------------------*/
#define TYP (cdef[cmsg->par[cmsg->p]].typ)
#define OFF (((u8 *)cmsg) + cdef[cmsg->par[cmsg->p]].off)
static void jumpcstruct(_cmsg *cmsg)
{
unsigned layer;
for (cmsg->p++, layer = 1; layer;) {
/* $$$$$ assert (cmsg->p); */
cmsg->p++;
switch (TYP) {
case _CMSTRUCT:
layer++;
break;
case _CEND:
layer--;
break;
}
}
}
/*-------------------------------------------------------*/
static char *mnames[] =
{
[0x01] = "ALERT_REQ",
[0x02] = "CONNECT_REQ",
[0x04] = "DISCONNECT_REQ",
[0x05] = "LISTEN_REQ",
[0x08] = "INFO_REQ",
[0x09] = "FACILITY_REQ",
[0x0a] = "SELECT_B_PROTOCOL_REQ",
[0x0b] = "CONNECT_B3_REQ",
[0x0d] = "DISCONNECT_B3_REQ",
[0x0f] = "DATA_B3_REQ",
[0x10] = "RESET_B3_REQ",
[0x13] = "ALERT_CONF",
[0x14] = "CONNECT_CONF",
[0x16] = "DISCONNECT_CONF",
[0x17] = "LISTEN_CONF",
[0x18] = "MANUFACTURER_REQ",
[0x1a] = "INFO_CONF",
[0x1b] = "FACILITY_CONF",
[0x1c] = "SELECT_B_PROTOCOL_CONF",
[0x1d] = "CONNECT_B3_CONF",
[0x1f] = "DISCONNECT_B3_CONF",
[0x21] = "DATA_B3_CONF",
[0x22] = "RESET_B3_CONF",
[0x26] = "CONNECT_IND",
[0x27] = "CONNECT_ACTIVE_IND",
[0x28] = "DISCONNECT_IND",
[0x2a] = "MANUFACTURER_CONF",
[0x2c] = "INFO_IND",
[0x2d] = "FACILITY_IND",
[0x2f] = "CONNECT_B3_IND",
[0x30] = "CONNECT_B3_ACTIVE_IND",
[0x31] = "DISCONNECT_B3_IND",
[0x33] = "DATA_B3_IND",
[0x34] = "RESET_B3_IND",
[0x35] = "CONNECT_B3_T90_ACTIVE_IND",
[0x38] = "CONNECT_RESP",
[0x39] = "CONNECT_ACTIVE_RESP",
[0x3a] = "DISCONNECT_RESP",
[0x3c] = "MANUFACTURER_IND",
[0x3e] = "INFO_RESP",
[0x3f] = "FACILITY_RESP",
[0x41] = "CONNECT_B3_RESP",
[0x42] = "CONNECT_B3_ACTIVE_RESP",
[0x43] = "DISCONNECT_B3_RESP",
[0x45] = "DATA_B3_RESP",
[0x46] = "RESET_B3_RESP",
[0x47] = "CONNECT_B3_T90_ACTIVE_RESP",
[0x4e] = "MANUFACTURER_RESP"
};
/**
* capi_cmd2str() - convert CAPI 2.0 command/subcommand number to name
* @cmd: command number
* @subcmd: subcommand number
*
* Return value: static string
*/
char *capi_cmd2str(u8 cmd, u8 subcmd)
{
char *result;
result = mnames[command_2_index(cmd, subcmd)];
if (result == NULL)
result = "INVALID_COMMAND";
return result;
}
/*-------------------------------------------------------*/
#ifdef CONFIG_CAPI_TRACE
/*-------------------------------------------------------*/
static char *pnames[] =
{
/*00 */ NULL,
/*01 */ NULL,
/*02 */ NULL,
/*03 */ "Controller/PLCI/NCCI",
/*04 */ "AdditionalInfo",
/*05 */ "B1configuration",
/*06 */ "B1protocol",
/*07 */ "B2configuration",
/*08 */ "B2protocol",
/*09 */ "B3configuration",
/*0a */ "B3protocol",
/*0b */ "BC",
/*0c */ "BChannelinformation",
/*0d */ "BProtocol",
/*0e */ "CalledPartyNumber",
/*0f */ "CalledPartySubaddress",
/*10 */ "CallingPartyNumber",
/*11 */ "CallingPartySubaddress",
/*12 */ "CIPmask",
/*13 */ "CIPmask2",
/*14 */ "CIPValue",
/*15 */ "Class",
/*16 */ "ConnectedNumber",
/*17 */ "ConnectedSubaddress",
/*18 */ "Data32",
/*19 */ "DataHandle",
/*1a */ "DataLength",
/*1b */ "FacilityConfirmationParameter",
/*1c */ "Facilitydataarray",
/*1d */ "FacilityIndicationParameter",
/*1e */ "FacilityRequestParameter",
/*1f */ "FacilitySelector",
/*20 */ "Flags",
/*21 */ "Function",
/*22 */ "HLC",
/*23 */ "Info",
/*24 */ "InfoElement",
/*25 */ "InfoMask",
/*26 */ "InfoNumber",
/*27 */ "Keypadfacility",
/*28 */ "LLC",
/*29 */ "ManuData",
/*2a */ "ManuID",
/*2b */ "NCPI",
/*2c */ "Reason",
/*2d */ "Reason_B3",
/*2e */ "Reject",
/*2f */ "Useruserdata"
};
#include <linux/stdarg.h>
/*-------------------------------------------------------*/
static _cdebbuf *bufprint(_cdebbuf *cdb, char *fmt, ...)
{
va_list f;
size_t n, r;
if (!cdb)
return NULL;
va_start(f, fmt);
r = cdb->size - cdb->pos;
n = vsnprintf(cdb->p, r, fmt, f);
va_end(f);
if (n >= r) {
/* truncated, need bigger buffer */
size_t ns = 2 * cdb->size;
u_char *nb;
while ((ns - cdb->pos) <= n)
ns *= 2;
nb = kmalloc(ns, GFP_ATOMIC);
if (!nb) {
cdebbuf_free(cdb);
return NULL;
}
memcpy(nb, cdb->buf, cdb->pos);
kfree(cdb->buf);
nb[cdb->pos] = 0;
cdb->buf = nb;
cdb->p = cdb->buf + cdb->pos;
cdb->size = ns;
va_start(f, fmt);
r = cdb->size - cdb->pos;
n = vsnprintf(cdb->p, r, fmt, f);
va_end(f);
}
cdb->p += n;
cdb->pos += n;
return cdb;
}
static _cdebbuf *printstructlen(_cdebbuf *cdb, u8 *m, unsigned len)
{
unsigned hex = 0;
if (!cdb)
return NULL;
for (; len; len--, m++)
if (isalnum(*m) || *m == ' ') {
if (hex)
cdb = bufprint(cdb, ">");
cdb = bufprint(cdb, "%c", *m);
hex = 0;
} else {
if (!hex)
cdb = bufprint(cdb, "<%02x", *m);
else
cdb = bufprint(cdb, " %02x", *m);
hex = 1;
}
if (hex)
cdb = bufprint(cdb, ">");
return cdb;
}
static _cdebbuf *printstruct(_cdebbuf *cdb, u8 *m)
{
unsigned len;
if (m[0] != 0xff) {
len = m[0];
m += 1;
} else {
len = ((u16 *) (m + 1))[0];
m += 3;
}
cdb = printstructlen(cdb, m, len);
return cdb;
}
/*-------------------------------------------------------*/
#define NAME (pnames[cmsg->par[cmsg->p]])
static _cdebbuf *protocol_message_2_pars(_cdebbuf *cdb, _cmsg *cmsg, int level)
{
if (!cmsg->par)
return NULL; /* invalid command/subcommand */
for (; TYP != _CEND; cmsg->p++) {
int slen = 29 + 3 - level;
int i;
if (!cdb)
return NULL;
cdb = bufprint(cdb, " ");
for (i = 0; i < level - 1; i++)
cdb = bufprint(cdb, " ");
switch (TYP) {
case _CBYTE:
cdb = bufprint(cdb, "%-*s = 0x%x\n", slen, NAME, *(u8 *) (cmsg->m + cmsg->l));
cmsg->l++;
break;
case _CWORD:
cdb = bufprint(cdb, "%-*s = 0x%x\n", slen, NAME, *(u16 *) (cmsg->m + cmsg->l));
cmsg->l += 2;
break;
case _CDWORD:
cdb = bufprint(cdb, "%-*s = 0x%lx\n", slen, NAME, *(u32 *) (cmsg->m + cmsg->l));
cmsg->l += 4;
break;
case _CSTRUCT:
cdb = bufprint(cdb, "%-*s = ", slen, NAME);
if (cmsg->m[cmsg->l] == '\0')
cdb = bufprint(cdb, "default");
else
cdb = printstruct(cdb, cmsg->m + cmsg->l);
cdb = bufprint(cdb, "\n");
if (cmsg->m[cmsg->l] != 0xff)
cmsg->l += 1 + cmsg->m[cmsg->l];
else
cmsg->l += 3 + *(u16 *) (cmsg->m + cmsg->l + 1);
break;
case _CMSTRUCT:
/*----- Metastruktur 0 -----*/
if (cmsg->m[cmsg->l] == '\0') {
cdb = bufprint(cdb, "%-*s = default\n", slen, NAME);
cmsg->l++;
jumpcstruct(cmsg);
} else {
char *name = NAME;
unsigned _l = cmsg->l;
cdb = bufprint(cdb, "%-*s\n", slen, name);
cmsg->l = (cmsg->m + _l)[0] == 255 ? cmsg->l + 3 : cmsg->l + 1;
cmsg->p++;
cdb = protocol_message_2_pars(cdb, cmsg, level + 1);
}
break;
}
}
return cdb;
}
/*-------------------------------------------------------*/
static _cdebbuf *g_debbuf;
static u_long g_debbuf_lock;
static _cmsg *g_cmsg;
static _cdebbuf *cdebbuf_alloc(void)
{
_cdebbuf *cdb;
if (likely(!test_and_set_bit(1, &g_debbuf_lock))) {
cdb = g_debbuf;
goto init;
} else
cdb = kmalloc(sizeof(_cdebbuf), GFP_ATOMIC);
if (!cdb)
return NULL;
cdb->buf = kmalloc(CDEBUG_SIZE, GFP_ATOMIC);
if (!cdb->buf) {
kfree(cdb);
return NULL;
}
cdb->size = CDEBUG_SIZE;
init:
cdb->buf[0] = 0;
cdb->p = cdb->buf;
cdb->pos = 0;
return cdb;
}
/**
* cdebbuf_free() - free CAPI debug buffer
* @cdb: buffer to free
*/
void cdebbuf_free(_cdebbuf *cdb)
{
if (likely(cdb == g_debbuf)) {
test_and_clear_bit(1, &g_debbuf_lock);
return;
}
if (likely(cdb))
kfree(cdb->buf);
kfree(cdb);
}
/**
* capi_message2str() - format CAPI 2.0 message for printing
* @msg: CAPI 2.0 message
*
* Allocates a CAPI debug buffer and fills it with a printable representation
* of the CAPI 2.0 message in @msg.
* Return value: allocated debug buffer, NULL on error
* The returned buffer should be freed by a call to cdebbuf_free() after use.
*/
_cdebbuf *capi_message2str(u8 *msg)
{
_cdebbuf *cdb;
_cmsg *cmsg;
cdb = cdebbuf_alloc();
if (unlikely(!cdb))
return NULL;
if (likely(cdb == g_debbuf))
cmsg = g_cmsg;
else
cmsg = kmalloc(sizeof(_cmsg), GFP_ATOMIC);
if (unlikely(!cmsg)) {
cdebbuf_free(cdb);
return NULL;
}
cmsg->m = msg;
cmsg->l = 8;
cmsg->p = 0;
byteTRcpy(cmsg->m + 4, &cmsg->Command);
byteTRcpy(cmsg->m + 5, &cmsg->Subcommand);
cmsg->par = capi_cmd2par(cmsg->Command, cmsg->Subcommand);
cdb = bufprint(cdb, "%-26s ID=%03d #0x%04x LEN=%04d\n",
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
((unsigned short *) msg)[1],
((unsigned short *) msg)[3],
((unsigned short *) msg)[0]);
cdb = protocol_message_2_pars(cdb, cmsg, 1);
if (unlikely(cmsg != g_cmsg))
kfree(cmsg);
return cdb;
}
int __init cdebug_init(void)
{
g_cmsg = kmalloc(sizeof(_cmsg), GFP_KERNEL);
if (!g_cmsg)
return -ENOMEM;
g_debbuf = kmalloc(sizeof(_cdebbuf), GFP_KERNEL);
if (!g_debbuf) {
kfree(g_cmsg);
return -ENOMEM;
}
g_debbuf->buf = kmalloc(CDEBUG_GSIZE, GFP_KERNEL);
if (!g_debbuf->buf) {
kfree(g_cmsg);
kfree(g_debbuf);
return -ENOMEM;
}
g_debbuf->size = CDEBUG_GSIZE;
g_debbuf->buf[0] = 0;
g_debbuf->p = g_debbuf->buf;
g_debbuf->pos = 0;
return 0;
}
void cdebug_exit(void)
{
if (g_debbuf)
kfree(g_debbuf->buf);
kfree(g_debbuf);
kfree(g_cmsg);
}
#else /* !CONFIG_CAPI_TRACE */
static _cdebbuf g_debbuf = {"CONFIG_CAPI_TRACE not enabled", NULL, 0, 0};
_cdebbuf *capi_message2str(u8 *msg)
{
return &g_debbuf;
}
_cdebbuf *capi_cmsg2str(_cmsg *cmsg)
{
return &g_debbuf;
}
void cdebbuf_free(_cdebbuf *cdb)
{
}
int __init cdebug_init(void)
{
return 0;
}
void cdebug_exit(void)
{
}
#endif
| linux-master | drivers/isdn/capi/capiutil.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* l1oip.c low level driver for tunneling layer 1 over IP
*
* NOTE: It is not compatible with TDMoIP nor "ISDN over IP".
*
* Author Andreas Eversberg ([email protected])
*/
/* module parameters:
* type:
Value 1 = BRI
Value 2 = PRI
Value 3 = BRI (multi channel frame, not supported yet)
Value 4 = PRI (multi channel frame, not supported yet)
A multi channel frame reduces overhead to a single frame for all
b-channels, but increases delay.
(NOTE: Multi channel frames are not implemented yet.)
* codec:
Value 0 = transparent (default)
Value 1 = transfer ALAW
Value 2 = transfer ULAW
Value 3 = transfer generic 4 bit compression.
* ulaw:
0 = we use a-Law (default)
1 = we use u-Law
* limit:
limitation of B-channels to control bandwidth (1...126)
BRI: 1 or 2
PRI: 1-30, 31-126 (126, because dchannel ist not counted here)
Also limited ressources are used for stack, resulting in less channels.
It is possible to have more channels than 30 in PRI mode, this must
be supported by the application.
* ip:
byte representation of remote ip address (127.0.0.1 -> 127,0,0,1)
If not given or four 0, no remote address is set.
For multiple interfaces, concat ip addresses. (127,0,0,1,127,0,0,1)
* port:
port number (local interface)
If not given or 0, port 931 is used for fist instance, 932 for next...
For multiple interfaces, different ports must be given.
* remoteport:
port number (remote interface)
If not given or 0, remote port equals local port
For multiple interfaces on equal sites, different ports must be given.
* ondemand:
0 = fixed (always transmit packets, even when remote side timed out)
1 = on demand (only transmit packets, when remote side is detected)
the default is 0
NOTE: ID must also be set for on demand.
* id:
optional value to identify frames. This value must be equal on both
peers and should be random. If omitted or 0, no ID is transmitted.
* debug:
NOTE: only one debug value must be given for all cards
enable debugging (see l1oip.h for debug options)
Special mISDN controls:
op = MISDN_CTRL_SETPEER*
p1 = bytes 0-3 : remote IP address in network order (left element first)
p2 = bytes 1-2 : remote port in network order (high byte first)
optional:
p2 = bytes 3-4 : local port in network order (high byte first)
op = MISDN_CTRL_UNSETPEER*
* Use l1oipctrl for comfortable setting or removing ip address.
(Layer 1 Over IP CTRL)
L1oIP-Protocol
--------------
Frame Header:
7 6 5 4 3 2 1 0
+---------------+
|Ver|T|I|Coding |
+---------------+
| ID byte 3 * |
+---------------+
| ID byte 2 * |
+---------------+
| ID byte 1 * |
+---------------+
| ID byte 0 * |
+---------------+
|M| Channel |
+---------------+
| Length * |
+---------------+
| Time Base MSB |
+---------------+
| Time Base LSB |
+---------------+
| Data.... |
...
| |
+---------------+
|M| Channel |
+---------------+
| Length * |
+---------------+
| Time Base MSB |
+---------------+
| Time Base LSB |
+---------------+
| Data.... |
...
* Only included in some cases.
- Ver = Version
If version is missmatch, the frame must be ignored.
- T = Type of interface
Must be 0 for S0 or 1 for E1.
- I = Id present
If bit is set, four ID bytes are included in frame.
- ID = Connection ID
Additional ID to prevent Denial of Service attacs. Also it prevents hijacking
connections with dynamic IP. The ID should be random and must not be 0.
- Coding = Type of codec
Must be 0 for no transcoding. Also for D-channel and other HDLC frames.
1 and 2 are reserved for explicitly use of a-LAW or u-LAW codec.
3 is used for generic table compressor.
- M = More channels to come. If this flag is 1, the following byte contains
the length of the channel data. After the data block, the next channel will
be defined. The flag for the last channel block (or if only one channel is
transmitted), must be 0 and no length is given.
- Channel = Channel number
0 reserved
1-3 channel data for S0 (3 is D-channel)
1-31 channel data for E1 (16 is D-channel)
32-127 channel data for extended E1 (16 is D-channel)
- The length is used if the M-flag is 1. It is used to find the next channel
inside frame.
NOTE: A value of 0 equals 256 bytes of data.
-> For larger data blocks, a single frame must be used.
-> For larger streams, a single frame or multiple blocks with same channel ID
must be used.
- Time Base = Timestamp of first sample in frame
The "Time Base" is used to rearange packets and to detect packet loss.
The 16 bits are sent in network order (MSB first) and count 1/8000 th of a
second. This causes a wrap around each 8,192 seconds. There is no requirement
for the initial "Time Base", but 0 should be used for the first packet.
In case of HDLC data, this timestamp counts the packet or byte number.
Two Timers:
After initialisation, a timer of 15 seconds is started. Whenever a packet is
transmitted, the timer is reset to 15 seconds again. If the timer expires, an
empty packet is transmitted. This keep the connection alive.
When a valid packet is received, a timer 65 seconds is started. The interface
become ACTIVE. If the timer expires, the interface becomes INACTIVE.
Dynamic IP handling:
To allow dynamic IP, the ID must be non 0. In this case, any packet with the
correct port number and ID will be accepted. If the remote side changes its IP
the new IP is used for all transmitted packets until it changes again.
On Demand:
If the ondemand parameter is given, the remote IP is set to 0 on timeout.
This will stop keepalive traffic to remote. If the remote is online again,
traffic will continue to the remote address. This is useful for road warriors.
This feature only works with ID set, otherwhise it is highly unsecure.
Socket and Thread
-----------------
The complete socket opening and closing is done by a thread.
When the thread opened a socket, the hc->socket descriptor is set. Whenever a
packet shall be sent to the socket, the hc->socket must be checked whether not
NULL. To prevent change in socket descriptor, the hc->socket_lock must be used.
To change the socket, a recall of l1oip_socket_open() will safely kill the
socket process and create a new one.
*/
#define L1OIP_VERSION 0 /* 0...3 */
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mISDNif.h>
#include <linux/mISDNhw.h>
#include <linux/mISDNdsp.h>
#include <linux/init.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
#include <net/sock.h>
#include "core.h"
#include "l1oip.h"
static const char *l1oip_revision = "2.00";
static int l1oip_cnt;
static DEFINE_SPINLOCK(l1oip_lock);
static LIST_HEAD(l1oip_ilist);
#define MAX_CARDS 16
static u_int type[MAX_CARDS];
static u_int codec[MAX_CARDS];
static u_int ip[MAX_CARDS * 4];
static u_int port[MAX_CARDS];
static u_int remoteport[MAX_CARDS];
static u_int ondemand[MAX_CARDS];
static u_int limit[MAX_CARDS];
static u_int id[MAX_CARDS];
static int debug;
static int ulaw;
MODULE_AUTHOR("Andreas Eversberg");
MODULE_LICENSE("GPL");
module_param_array(type, uint, NULL, S_IRUGO | S_IWUSR);
module_param_array(codec, uint, NULL, S_IRUGO | S_IWUSR);
module_param_array(ip, uint, NULL, S_IRUGO | S_IWUSR);
module_param_array(port, uint, NULL, S_IRUGO | S_IWUSR);
module_param_array(remoteport, uint, NULL, S_IRUGO | S_IWUSR);
module_param_array(ondemand, uint, NULL, S_IRUGO | S_IWUSR);
module_param_array(limit, uint, NULL, S_IRUGO | S_IWUSR);
module_param_array(id, uint, NULL, S_IRUGO | S_IWUSR);
module_param(ulaw, uint, S_IRUGO | S_IWUSR);
module_param(debug, uint, S_IRUGO | S_IWUSR);
/*
* send a frame via socket, if open and restart timer
*/
static int
l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
u16 timebase, u8 *buf, int len)
{
u8 *p;
u8 frame[MAX_DFRAME_LEN_L1 + 32];
struct socket *socket = NULL;
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: sending data to socket (len = %d)\n",
__func__, len);
p = frame;
/* restart timer */
if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ) && !hc->shutdown)
mod_timer(&hc->keep_tl, jiffies + L1OIP_KEEPALIVE * HZ);
else
hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ;
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: resetting timer\n", __func__);
/* drop if we have no remote ip or port */
if (!hc->sin_remote.sin_addr.s_addr || !hc->sin_remote.sin_port) {
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: dropping frame, because remote "
"IP is not set.\n", __func__);
return len;
}
/* assemble frame */
*p++ = (L1OIP_VERSION << 6) /* version and coding */
| (hc->pri ? 0x20 : 0x00) /* type */
| (hc->id ? 0x10 : 0x00) /* id */
| localcodec;
if (hc->id) {
*p++ = hc->id >> 24; /* id */
*p++ = hc->id >> 16;
*p++ = hc->id >> 8;
*p++ = hc->id;
}
*p++ = 0x00 + channel; /* m-flag, channel */
*p++ = timebase >> 8; /* time base */
*p++ = timebase;
if (buf && len) { /* add data to frame */
if (localcodec == 1 && ulaw)
l1oip_ulaw_to_alaw(buf, len, p);
else if (localcodec == 2 && !ulaw)
l1oip_alaw_to_ulaw(buf, len, p);
else if (localcodec == 3)
len = l1oip_law_to_4bit(buf, len, p,
&hc->chan[channel].codecstate);
else
memcpy(p, buf, len);
}
len += p - frame;
/* check for socket in safe condition */
spin_lock(&hc->socket_lock);
if (!hc->socket) {
spin_unlock(&hc->socket_lock);
return 0;
}
/* seize socket */
socket = hc->socket;
hc->socket = NULL;
spin_unlock(&hc->socket_lock);
/* send packet */
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: sending packet to socket (len "
"= %d)\n", __func__, len);
hc->sendiov.iov_base = frame;
hc->sendiov.iov_len = len;
len = kernel_sendmsg(socket, &hc->sendmsg, &hc->sendiov, 1, len);
/* give socket back */
hc->socket = socket; /* no locking required */
return len;
}
/*
* receive channel data from socket
*/
static void
l1oip_socket_recv(struct l1oip *hc, u8 remotecodec, u8 channel, u16 timebase,
u8 *buf, int len)
{
struct sk_buff *nskb;
struct bchannel *bch;
struct dchannel *dch;
u8 *p;
u32 rx_counter;
if (len == 0) {
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: received empty keepalive data, "
"ignoring\n", __func__);
return;
}
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: received data, sending to mISDN (%d)\n",
__func__, len);
if (channel < 1 || channel > 127) {
printk(KERN_WARNING "%s: packet error - channel %d out of "
"range\n", __func__, channel);
return;
}
dch = hc->chan[channel].dch;
bch = hc->chan[channel].bch;
if (!dch && !bch) {
printk(KERN_WARNING "%s: packet error - channel %d not in "
"stack\n", __func__, channel);
return;
}
/* prepare message */
nskb = mI_alloc_skb((remotecodec == 3) ? (len << 1) : len, GFP_ATOMIC);
if (!nskb) {
printk(KERN_ERR "%s: No mem for skb.\n", __func__);
return;
}
p = skb_put(nskb, (remotecodec == 3) ? (len << 1) : len);
if (remotecodec == 1 && ulaw)
l1oip_alaw_to_ulaw(buf, len, p);
else if (remotecodec == 2 && !ulaw)
l1oip_ulaw_to_alaw(buf, len, p);
else if (remotecodec == 3)
len = l1oip_4bit_to_law(buf, len, p);
else
memcpy(p, buf, len);
/* send message up */
if (dch && len >= 2) {
dch->rx_skb = nskb;
recv_Dchannel(dch);
}
if (bch) {
/* expand 16 bit sequence number to 32 bit sequence number */
rx_counter = hc->chan[channel].rx_counter;
if (((s16)(timebase - rx_counter)) >= 0) {
/* time has changed forward */
if (timebase >= (rx_counter & 0xffff))
rx_counter =
(rx_counter & 0xffff0000) | timebase;
else
rx_counter = ((rx_counter & 0xffff0000) + 0x10000)
| timebase;
} else {
/* time has changed backwards */
if (timebase < (rx_counter & 0xffff))
rx_counter =
(rx_counter & 0xffff0000) | timebase;
else
rx_counter = ((rx_counter & 0xffff0000) - 0x10000)
| timebase;
}
hc->chan[channel].rx_counter = rx_counter;
#ifdef REORDER_DEBUG
if (hc->chan[channel].disorder_flag) {
swap(hc->chan[channel].disorder_skb, nskb);
swap(hc->chan[channel].disorder_cnt, rx_counter);
}
hc->chan[channel].disorder_flag ^= 1;
if (nskb)
#endif
queue_ch_frame(&bch->ch, PH_DATA_IND, rx_counter, nskb);
}
}
/*
* parse frame and extract channel data
*/
static void
l1oip_socket_parse(struct l1oip *hc, struct sockaddr_in *sin, u8 *buf, int len)
{
u32 packet_id;
u8 channel;
u8 remotecodec;
u16 timebase;
int m, mlen;
int len_start = len; /* initial frame length */
struct dchannel *dch = hc->chan[hc->d_idx].dch;
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: received frame, parsing... (%d)\n",
__func__, len);
/* check length */
if (len < 1 + 1 + 2) {
printk(KERN_WARNING "%s: packet error - length %d below "
"4 bytes\n", __func__, len);
return;
}
/* check version */
if (((*buf) >> 6) != L1OIP_VERSION) {
printk(KERN_WARNING "%s: packet error - unknown version %d\n",
__func__, buf[0]>>6);
return;
}
/* check type */
if (((*buf) & 0x20) && !hc->pri) {
printk(KERN_WARNING "%s: packet error - received E1 packet "
"on S0 interface\n", __func__);
return;
}
if (!((*buf) & 0x20) && hc->pri) {
printk(KERN_WARNING "%s: packet error - received S0 packet "
"on E1 interface\n", __func__);
return;
}
/* get id flag */
packet_id = (*buf >> 4) & 1;
/* check coding */
remotecodec = (*buf) & 0x0f;
if (remotecodec > 3) {
printk(KERN_WARNING "%s: packet error - remotecodec %d "
"unsupported\n", __func__, remotecodec);
return;
}
buf++;
len--;
/* check packet_id */
if (packet_id) {
if (!hc->id) {
printk(KERN_WARNING "%s: packet error - packet has id "
"0x%x, but we have not\n", __func__, packet_id);
return;
}
if (len < 4) {
printk(KERN_WARNING "%s: packet error - packet too "
"short for ID value\n", __func__);
return;
}
packet_id = (*buf++) << 24;
packet_id += (*buf++) << 16;
packet_id += (*buf++) << 8;
packet_id += (*buf++);
len -= 4;
if (packet_id != hc->id) {
printk(KERN_WARNING "%s: packet error - ID mismatch, "
"got 0x%x, we 0x%x\n",
__func__, packet_id, hc->id);
return;
}
} else {
if (hc->id) {
printk(KERN_WARNING "%s: packet error - packet has no "
"ID, but we have\n", __func__);
return;
}
}
multiframe:
if (len < 1) {
printk(KERN_WARNING "%s: packet error - packet too short, "
"channel expected at position %d.\n",
__func__, len-len_start + 1);
return;
}
/* get channel and multiframe flag */
channel = *buf & 0x7f;
m = *buf >> 7;
buf++;
len--;
/* check length on multiframe */
if (m) {
if (len < 1) {
printk(KERN_WARNING "%s: packet error - packet too "
"short, length expected at position %d.\n",
__func__, len_start - len - 1);
return;
}
mlen = *buf++;
len--;
if (mlen == 0)
mlen = 256;
if (len < mlen + 3) {
printk(KERN_WARNING "%s: packet error - length %d at "
"position %d exceeds total length %d.\n",
__func__, mlen, len_start-len - 1, len_start);
return;
}
if (len == mlen + 3) {
printk(KERN_WARNING "%s: packet error - length %d at "
"position %d will not allow additional "
"packet.\n",
__func__, mlen, len_start-len + 1);
return;
}
} else
mlen = len - 2; /* single frame, subtract timebase */
if (len < 2) {
printk(KERN_WARNING "%s: packet error - packet too short, time "
"base expected at position %d.\n",
__func__, len-len_start + 1);
return;
}
/* get time base */
timebase = (*buf++) << 8;
timebase |= (*buf++);
len -= 2;
/* if inactive, we send up a PH_ACTIVATE and activate */
if (!test_bit(FLG_ACTIVE, &dch->Flags)) {
if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET))
printk(KERN_DEBUG "%s: interface become active due to "
"received packet\n", __func__);
test_and_set_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
NULL, GFP_ATOMIC);
}
/* distribute packet */
l1oip_socket_recv(hc, remotecodec, channel, timebase, buf, mlen);
buf += mlen;
len -= mlen;
/* multiframe */
if (m)
goto multiframe;
/* restart timer */
if ((time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) ||
!hc->timeout_on) &&
!hc->shutdown) {
hc->timeout_on = 1;
mod_timer(&hc->timeout_tl, jiffies + L1OIP_TIMEOUT * HZ);
} else /* only adjust timer */
hc->timeout_tl.expires = jiffies + L1OIP_TIMEOUT * HZ;
/* if ip or source port changes */
if ((hc->sin_remote.sin_addr.s_addr != sin->sin_addr.s_addr)
|| (hc->sin_remote.sin_port != sin->sin_port)) {
if (debug & DEBUG_L1OIP_SOCKET)
printk(KERN_DEBUG "%s: remote address changes from "
"0x%08x to 0x%08x (port %d to %d)\n", __func__,
ntohl(hc->sin_remote.sin_addr.s_addr),
ntohl(sin->sin_addr.s_addr),
ntohs(hc->sin_remote.sin_port),
ntohs(sin->sin_port));
hc->sin_remote.sin_addr.s_addr = sin->sin_addr.s_addr;
hc->sin_remote.sin_port = sin->sin_port;
}
}
/*
* socket stuff
*/
static int
l1oip_socket_thread(void *data)
{
struct l1oip *hc = (struct l1oip *)data;
int ret = 0;
struct sockaddr_in sin_rx;
struct kvec iov;
struct msghdr msg = {.msg_name = &sin_rx,
.msg_namelen = sizeof(sin_rx)};
unsigned char *recvbuf;
size_t recvbuf_size = 1500;
int recvlen;
struct socket *socket = NULL;
DECLARE_COMPLETION_ONSTACK(wait);
/* allocate buffer memory */
recvbuf = kmalloc(recvbuf_size, GFP_KERNEL);
if (!recvbuf) {
printk(KERN_ERR "%s: Failed to alloc recvbuf.\n", __func__);
ret = -ENOMEM;
goto fail;
}
iov.iov_base = recvbuf;
iov.iov_len = recvbuf_size;
/* make daemon */
allow_signal(SIGTERM);
/* create socket */
if (sock_create(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &socket)) {
printk(KERN_ERR "%s: Failed to create socket.\n", __func__);
ret = -EIO;
goto fail;
}
/* set incoming address */
hc->sin_local.sin_family = AF_INET;
hc->sin_local.sin_addr.s_addr = INADDR_ANY;
hc->sin_local.sin_port = htons((unsigned short)hc->localport);
/* set outgoing address */
hc->sin_remote.sin_family = AF_INET;
hc->sin_remote.sin_addr.s_addr = htonl(hc->remoteip);
hc->sin_remote.sin_port = htons((unsigned short)hc->remoteport);
/* bind to incoming port */
if (socket->ops->bind(socket, (struct sockaddr *)&hc->sin_local,
sizeof(hc->sin_local))) {
printk(KERN_ERR "%s: Failed to bind socket to port %d.\n",
__func__, hc->localport);
ret = -EINVAL;
goto fail;
}
/* check sk */
if (socket->sk == NULL) {
printk(KERN_ERR "%s: socket->sk == NULL\n", __func__);
ret = -EIO;
goto fail;
}
/* build send message */
hc->sendmsg.msg_name = &hc->sin_remote;
hc->sendmsg.msg_namelen = sizeof(hc->sin_remote);
hc->sendmsg.msg_control = NULL;
hc->sendmsg.msg_controllen = 0;
/* give away socket */
spin_lock(&hc->socket_lock);
hc->socket = socket;
spin_unlock(&hc->socket_lock);
/* read loop */
if (debug & DEBUG_L1OIP_SOCKET)
printk(KERN_DEBUG "%s: socket created and open\n",
__func__);
while (!signal_pending(current)) {
iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, recvbuf_size);
recvlen = sock_recvmsg(socket, &msg, 0);
if (recvlen > 0) {
l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen);
} else {
if (debug & DEBUG_L1OIP_SOCKET)
printk(KERN_WARNING
"%s: broken pipe on socket\n", __func__);
}
}
/* get socket back, check first if in use, maybe by send function */
spin_lock(&hc->socket_lock);
/* if hc->socket is NULL, it is in use until it is given back */
while (!hc->socket) {
spin_unlock(&hc->socket_lock);
schedule_timeout(HZ / 10);
spin_lock(&hc->socket_lock);
}
hc->socket = NULL;
spin_unlock(&hc->socket_lock);
if (debug & DEBUG_L1OIP_SOCKET)
printk(KERN_DEBUG "%s: socket thread terminating\n",
__func__);
fail:
/* free recvbuf */
kfree(recvbuf);
/* close socket */
if (socket)
sock_release(socket);
/* if we got killed, signal completion */
complete(&hc->socket_complete);
hc->socket_thread = NULL; /* show termination of thread */
if (debug & DEBUG_L1OIP_SOCKET)
printk(KERN_DEBUG "%s: socket thread terminated\n",
__func__);
return ret;
}
static void
l1oip_socket_close(struct l1oip *hc)
{
struct dchannel *dch = hc->chan[hc->d_idx].dch;
/* kill thread */
if (hc->socket_thread) {
if (debug & DEBUG_L1OIP_SOCKET)
printk(KERN_DEBUG "%s: socket thread exists, "
"killing...\n", __func__);
send_sig(SIGTERM, hc->socket_thread, 0);
wait_for_completion(&hc->socket_complete);
}
/* if active, we send up a PH_DEACTIVATE and deactivate */
if (test_bit(FLG_ACTIVE, &dch->Flags)) {
if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET))
printk(KERN_DEBUG "%s: interface become deactivated "
"due to timeout\n", __func__);
test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
NULL, GFP_ATOMIC);
}
}
static int
l1oip_socket_open(struct l1oip *hc)
{
/* in case of reopen, we need to close first */
l1oip_socket_close(hc);
init_completion(&hc->socket_complete);
/* create receive process */
hc->socket_thread = kthread_run(l1oip_socket_thread, hc, "l1oip_%s",
hc->name);
if (IS_ERR(hc->socket_thread)) {
int err = PTR_ERR(hc->socket_thread);
printk(KERN_ERR "%s: Failed (%d) to create socket process.\n",
__func__, err);
hc->socket_thread = NULL;
sock_release(hc->socket);
return err;
}
if (debug & DEBUG_L1OIP_SOCKET)
printk(KERN_DEBUG "%s: socket thread created\n", __func__);
return 0;
}
static void
l1oip_send_bh(struct work_struct *work)
{
struct l1oip *hc = container_of(work, struct l1oip, workq);
if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET))
printk(KERN_DEBUG "%s: keepalive timer expired, sending empty "
"frame on dchannel\n", __func__);
/* send an empty l1oip frame at D-channel */
l1oip_socket_send(hc, 0, hc->d_idx, 0, 0, NULL, 0);
}
/*
* timer stuff
*/
static void
l1oip_keepalive(struct timer_list *t)
{
struct l1oip *hc = from_timer(hc, t, keep_tl);
schedule_work(&hc->workq);
}
static void
l1oip_timeout(struct timer_list *t)
{
struct l1oip *hc = from_timer(hc, t,
timeout_tl);
struct dchannel *dch = hc->chan[hc->d_idx].dch;
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: timeout timer expired, turn layer one "
"down.\n", __func__);
hc->timeout_on = 0; /* state that timer must be initialized next time */
/* if timeout, we send up a PH_DEACTIVATE and deactivate */
if (test_bit(FLG_ACTIVE, &dch->Flags)) {
if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET))
printk(KERN_DEBUG "%s: interface become deactivated "
"due to timeout\n", __func__);
test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
NULL, GFP_ATOMIC);
}
/* if we have ondemand set, we remove ip address */
if (hc->ondemand) {
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: on demand causes ip address to "
"be removed\n", __func__);
hc->sin_remote.sin_addr.s_addr = 0;
}
}
/*
* message handling
*/
static int
handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
struct dchannel *dch = container_of(dev, struct dchannel, dev);
struct l1oip *hc = dch->hw;
struct mISDNhead *hh = mISDN_HEAD_P(skb);
int ret = -EINVAL;
int l, ll;
unsigned char *p;
switch (hh->prim) {
case PH_DATA_REQ:
if (skb->len < 1) {
printk(KERN_WARNING "%s: skb too small\n",
__func__);
break;
}
if (skb->len > MAX_DFRAME_LEN_L1 || skb->len > L1OIP_MAX_LEN) {
printk(KERN_WARNING "%s: skb too large\n",
__func__);
break;
}
/* send frame */
p = skb->data;
l = skb->len;
while (l) {
/*
* This is technically bounded by L1OIP_MAX_PERFRAME but
* MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME
*/
ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1;
l1oip_socket_send(hc, 0, dch->slot, 0,
hc->chan[dch->slot].tx_counter++, p, ll);
p += ll;
l -= ll;
}
skb_trim(skb, 0);
queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb);
return 0;
case PH_ACTIVATE_REQ:
if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET))
printk(KERN_DEBUG "%s: PH_ACTIVATE channel %d (1..%d)\n"
, __func__, dch->slot, hc->b_num + 1);
skb_trim(skb, 0);
if (test_bit(FLG_ACTIVE, &dch->Flags))
queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb);
else
queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb);
return 0;
case PH_DEACTIVATE_REQ:
if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET))
printk(KERN_DEBUG "%s: PH_DEACTIVATE channel %d "
"(1..%d)\n", __func__, dch->slot,
hc->b_num + 1);
skb_trim(skb, 0);
if (test_bit(FLG_ACTIVE, &dch->Flags))
queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb);
else
queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb);
return 0;
}
if (!ret)
dev_kfree_skb(skb);
return ret;
}
static int
channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq)
{
int ret = 0;
struct l1oip *hc = dch->hw;
switch (cq->op) {
case MISDN_CTRL_GETOP:
cq->op = MISDN_CTRL_SETPEER | MISDN_CTRL_UNSETPEER
| MISDN_CTRL_GETPEER;
break;
case MISDN_CTRL_SETPEER:
hc->remoteip = (u32)cq->p1;
hc->remoteport = cq->p2 & 0xffff;
hc->localport = cq->p2 >> 16;
if (!hc->remoteport)
hc->remoteport = hc->localport;
if (debug & DEBUG_L1OIP_SOCKET)
printk(KERN_DEBUG "%s: got new ip address from user "
"space.\n", __func__);
l1oip_socket_open(hc);
break;
case MISDN_CTRL_UNSETPEER:
if (debug & DEBUG_L1OIP_SOCKET)
printk(KERN_DEBUG "%s: removing ip address.\n",
__func__);
hc->remoteip = 0;
l1oip_socket_open(hc);
break;
case MISDN_CTRL_GETPEER:
if (debug & DEBUG_L1OIP_SOCKET)
printk(KERN_DEBUG "%s: getting ip address.\n",
__func__);
cq->p1 = hc->remoteip;
cq->p2 = hc->remoteport | (hc->localport << 16);
break;
default:
printk(KERN_WARNING "%s: unknown Op %x\n",
__func__, cq->op);
ret = -EINVAL;
break;
}
return ret;
}
static int
open_dchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq)
{
if (debug & DEBUG_HW_OPEN)
printk(KERN_DEBUG "%s: dev(%d) open from %p\n", __func__,
dch->dev.id, __builtin_return_address(0));
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
if ((dch->dev.D.protocol != ISDN_P_NONE) &&
(dch->dev.D.protocol != rq->protocol)) {
if (debug & DEBUG_HW_OPEN)
printk(KERN_WARNING "%s: change protocol %x to %x\n",
__func__, dch->dev.D.protocol, rq->protocol);
}
if (dch->dev.D.protocol != rq->protocol)
dch->dev.D.protocol = rq->protocol;
if (test_bit(FLG_ACTIVE, &dch->Flags)) {
_queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY,
0, NULL, GFP_KERNEL);
}
rq->ch = &dch->dev.D;
if (!try_module_get(THIS_MODULE))
printk(KERN_WARNING "%s:cannot get module\n", __func__);
return 0;
}
static int
open_bchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq)
{
struct bchannel *bch;
int ch;
if (!test_channelmap(rq->adr.channel, dch->dev.channelmap))
return -EINVAL;
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
ch = rq->adr.channel; /* BRI: 1=B1 2=B2 PRI: 1..15,17.. */
bch = hc->chan[ch].bch;
if (!bch) {
printk(KERN_ERR "%s:internal error ch %d has no bch\n",
__func__, ch);
return -EINVAL;
}
if (test_and_set_bit(FLG_OPEN, &bch->Flags))
return -EBUSY; /* b-channel can be only open once */
bch->ch.protocol = rq->protocol;
rq->ch = &bch->ch;
if (!try_module_get(THIS_MODULE))
printk(KERN_WARNING "%s:cannot get module\n", __func__);
return 0;
}
static int
l1oip_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
{
struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
struct dchannel *dch = container_of(dev, struct dchannel, dev);
struct l1oip *hc = dch->hw;
struct channel_req *rq;
int err = 0;
if (dch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: cmd:%x %p\n",
__func__, cmd, arg);
switch (cmd) {
case OPEN_CHANNEL:
rq = arg;
switch (rq->protocol) {
case ISDN_P_TE_S0:
case ISDN_P_NT_S0:
if (hc->pri) {
err = -EINVAL;
break;
}
err = open_dchannel(hc, dch, rq);
break;
case ISDN_P_TE_E1:
case ISDN_P_NT_E1:
if (!hc->pri) {
err = -EINVAL;
break;
}
err = open_dchannel(hc, dch, rq);
break;
default:
err = open_bchannel(hc, dch, rq);
}
break;
case CLOSE_CHANNEL:
if (debug & DEBUG_HW_OPEN)
printk(KERN_DEBUG "%s: dev(%d) close from %p\n",
__func__, dch->dev.id,
__builtin_return_address(0));
module_put(THIS_MODULE);
break;
case CONTROL_CHANNEL:
err = channel_dctrl(dch, arg);
break;
default:
if (dch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: unknown command %x\n",
__func__, cmd);
err = -EINVAL;
}
return err;
}
static int
handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct bchannel *bch = container_of(ch, struct bchannel, ch);
struct l1oip *hc = bch->hw;
int ret = -EINVAL;
struct mISDNhead *hh = mISDN_HEAD_P(skb);
int l, ll;
unsigned char *p;
switch (hh->prim) {
case PH_DATA_REQ:
if (skb->len <= 0) {
printk(KERN_WARNING "%s: skb too small\n",
__func__);
break;
}
if (skb->len > MAX_DFRAME_LEN_L1 || skb->len > L1OIP_MAX_LEN) {
printk(KERN_WARNING "%s: skb too large\n",
__func__);
break;
}
/* check for AIS / ulaw-silence */
l = skb->len;
if (!memchr_inv(skb->data, 0xff, l)) {
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: got AIS, not sending, "
"but counting\n", __func__);
hc->chan[bch->slot].tx_counter += l;
skb_trim(skb, 0);
queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb);
return 0;
}
/* check for silence */
l = skb->len;
if (!memchr_inv(skb->data, 0x2a, l)) {
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: got silence, not sending"
", but counting\n", __func__);
hc->chan[bch->slot].tx_counter += l;
skb_trim(skb, 0);
queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb);
return 0;
}
/* send frame */
p = skb->data;
l = skb->len;
while (l) {
/*
* This is technically bounded by L1OIP_MAX_PERFRAME but
* MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME
*/
ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1;
l1oip_socket_send(hc, hc->codec, bch->slot, 0,
hc->chan[bch->slot].tx_counter, p, ll);
hc->chan[bch->slot].tx_counter += ll;
p += ll;
l -= ll;
}
skb_trim(skb, 0);
queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb);
return 0;
case PH_ACTIVATE_REQ:
if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET))
printk(KERN_DEBUG "%s: PH_ACTIVATE channel %d (1..%d)\n"
, __func__, bch->slot, hc->b_num + 1);
hc->chan[bch->slot].codecstate = 0;
test_and_set_bit(FLG_ACTIVE, &bch->Flags);
skb_trim(skb, 0);
queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb);
return 0;
case PH_DEACTIVATE_REQ:
if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET))
printk(KERN_DEBUG "%s: PH_DEACTIVATE channel %d "
"(1..%d)\n", __func__, bch->slot,
hc->b_num + 1);
test_and_clear_bit(FLG_ACTIVE, &bch->Flags);
skb_trim(skb, 0);
queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb);
return 0;
}
if (!ret)
dev_kfree_skb(skb);
return ret;
}
static int
channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
{
int ret = 0;
struct dsp_features *features =
(struct dsp_features *)(*((u_long *)&cq->p1));
switch (cq->op) {
case MISDN_CTRL_GETOP:
cq->op = MISDN_CTRL_HW_FEATURES_OP;
break;
case MISDN_CTRL_HW_FEATURES: /* fill features structure */
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: HW_FEATURE request\n",
__func__);
/* create confirm */
features->unclocked = 1;
features->unordered = 1;
break;
default:
printk(KERN_WARNING "%s: unknown Op %x\n",
__func__, cq->op);
ret = -EINVAL;
break;
}
return ret;
}
static int
l1oip_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
{
struct bchannel *bch = container_of(ch, struct bchannel, ch);
int err = -EINVAL;
if (bch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: cmd:%x %p\n",
__func__, cmd, arg);
switch (cmd) {
case CLOSE_CHANNEL:
test_and_clear_bit(FLG_OPEN, &bch->Flags);
test_and_clear_bit(FLG_ACTIVE, &bch->Flags);
ch->protocol = ISDN_P_NONE;
ch->peer = NULL;
module_put(THIS_MODULE);
err = 0;
break;
case CONTROL_CHANNEL:
err = channel_bctrl(bch, arg);
break;
default:
printk(KERN_WARNING "%s: unknown prim(%x)\n",
__func__, cmd);
}
return err;
}
/*
* cleanup module and stack
*/
static void
release_card(struct l1oip *hc)
{
int ch;
hc->shutdown = true;
timer_shutdown_sync(&hc->keep_tl);
timer_shutdown_sync(&hc->timeout_tl);
cancel_work_sync(&hc->workq);
if (hc->socket_thread)
l1oip_socket_close(hc);
if (hc->registered && hc->chan[hc->d_idx].dch)
mISDN_unregister_device(&hc->chan[hc->d_idx].dch->dev);
for (ch = 0; ch < 128; ch++) {
if (hc->chan[ch].dch) {
mISDN_freedchannel(hc->chan[ch].dch);
kfree(hc->chan[ch].dch);
}
if (hc->chan[ch].bch) {
mISDN_freebchannel(hc->chan[ch].bch);
kfree(hc->chan[ch].bch);
#ifdef REORDER_DEBUG
dev_kfree_skb(hc->chan[ch].disorder_skb);
#endif
}
}
spin_lock(&l1oip_lock);
list_del(&hc->list);
spin_unlock(&l1oip_lock);
kfree(hc);
}
static void
l1oip_cleanup(void)
{
struct l1oip *hc, *next;
list_for_each_entry_safe(hc, next, &l1oip_ilist, list)
release_card(hc);
l1oip_4bit_free();
}
/*
* module and stack init
*/
static int
init_card(struct l1oip *hc, int pri, int bundle)
{
struct dchannel *dch;
struct bchannel *bch;
int ret;
int i, ch;
spin_lock_init(&hc->socket_lock);
hc->idx = l1oip_cnt;
hc->pri = pri;
hc->d_idx = pri ? 16 : 3;
hc->b_num = pri ? 30 : 2;
hc->bundle = bundle;
if (hc->pri)
sprintf(hc->name, "l1oip-e1.%d", l1oip_cnt + 1);
else
sprintf(hc->name, "l1oip-s0.%d", l1oip_cnt + 1);
switch (codec[l1oip_cnt]) {
case 0: /* as is */
case 1: /* alaw */
case 2: /* ulaw */
case 3: /* 4bit */
break;
default:
printk(KERN_ERR "Codec(%d) not supported.\n",
codec[l1oip_cnt]);
return -EINVAL;
}
hc->codec = codec[l1oip_cnt];
if (debug & DEBUG_L1OIP_INIT)
printk(KERN_DEBUG "%s: using codec %d\n",
__func__, hc->codec);
if (id[l1oip_cnt] == 0) {
printk(KERN_WARNING "Warning: No 'id' value given or "
"0, this is highly unsecure. Please use 32 "
"bit random number 0x...\n");
}
hc->id = id[l1oip_cnt];
if (debug & DEBUG_L1OIP_INIT)
printk(KERN_DEBUG "%s: using id 0x%x\n", __func__, hc->id);
hc->ondemand = ondemand[l1oip_cnt];
if (hc->ondemand && !hc->id) {
printk(KERN_ERR "%s: ondemand option only allowed in "
"conjunction with non 0 ID\n", __func__);
return -EINVAL;
}
if (limit[l1oip_cnt])
hc->b_num = limit[l1oip_cnt];
if (!pri && hc->b_num > 2) {
printk(KERN_ERR "Maximum limit for BRI interface is 2 "
"channels.\n");
return -EINVAL;
}
if (pri && hc->b_num > 126) {
printk(KERN_ERR "Maximum limit for PRI interface is 126 "
"channels.\n");
return -EINVAL;
}
if (pri && hc->b_num > 30) {
printk(KERN_WARNING "Maximum limit for BRI interface is 30 "
"channels.\n");
printk(KERN_WARNING "Your selection of %d channels must be "
"supported by application.\n", hc->limit);
}
hc->remoteip = ip[l1oip_cnt << 2] << 24
| ip[(l1oip_cnt << 2) + 1] << 16
| ip[(l1oip_cnt << 2) + 2] << 8
| ip[(l1oip_cnt << 2) + 3];
hc->localport = port[l1oip_cnt]?:(L1OIP_DEFAULTPORT + l1oip_cnt);
if (remoteport[l1oip_cnt])
hc->remoteport = remoteport[l1oip_cnt];
else
hc->remoteport = hc->localport;
if (debug & DEBUG_L1OIP_INIT)
printk(KERN_DEBUG "%s: using local port %d remote ip "
"%d.%d.%d.%d port %d ondemand %d\n", __func__,
hc->localport, hc->remoteip >> 24,
(hc->remoteip >> 16) & 0xff,
(hc->remoteip >> 8) & 0xff, hc->remoteip & 0xff,
hc->remoteport, hc->ondemand);
dch = kzalloc(sizeof(struct dchannel), GFP_KERNEL);
if (!dch)
return -ENOMEM;
dch->debug = debug;
mISDN_initdchannel(dch, MAX_DFRAME_LEN_L1, NULL);
dch->hw = hc;
if (pri)
dch->dev.Dprotocols = (1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1);
else
dch->dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0);
dch->dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
(1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
dch->dev.D.send = handle_dmsg;
dch->dev.D.ctrl = l1oip_dctrl;
dch->dev.nrbchan = hc->b_num;
dch->slot = hc->d_idx;
hc->chan[hc->d_idx].dch = dch;
i = 1;
for (ch = 0; ch < dch->dev.nrbchan; ch++) {
if (ch == 15)
i++;
bch = kzalloc(sizeof(struct bchannel), GFP_KERNEL);
if (!bch) {
printk(KERN_ERR "%s: no memory for bchannel\n",
__func__);
return -ENOMEM;
}
bch->nr = i + ch;
bch->slot = i + ch;
bch->debug = debug;
mISDN_initbchannel(bch, MAX_DATA_MEM, 0);
bch->hw = hc;
bch->ch.send = handle_bmsg;
bch->ch.ctrl = l1oip_bctrl;
bch->ch.nr = i + ch;
list_add(&bch->ch.list, &dch->dev.bchannels);
hc->chan[i + ch].bch = bch;
set_channelmap(bch->nr, dch->dev.channelmap);
}
/* TODO: create a parent device for this driver */
ret = mISDN_register_device(&dch->dev, NULL, hc->name);
if (ret)
return ret;
hc->registered = 1;
if (debug & DEBUG_L1OIP_INIT)
printk(KERN_DEBUG "%s: Setting up network card(%d)\n",
__func__, l1oip_cnt + 1);
ret = l1oip_socket_open(hc);
if (ret)
return ret;
timer_setup(&hc->keep_tl, l1oip_keepalive, 0);
hc->keep_tl.expires = jiffies + 2 * HZ; /* two seconds first time */
add_timer(&hc->keep_tl);
timer_setup(&hc->timeout_tl, l1oip_timeout, 0);
hc->timeout_on = 0; /* state that we have timer off */
return 0;
}
static int __init
l1oip_init(void)
{
int pri, bundle;
struct l1oip *hc;
int ret;
printk(KERN_INFO "mISDN: Layer-1-over-IP driver Rev. %s\n",
l1oip_revision);
if (l1oip_4bit_alloc(ulaw))
return -ENOMEM;
l1oip_cnt = 0;
while (l1oip_cnt < MAX_CARDS && type[l1oip_cnt]) {
switch (type[l1oip_cnt] & 0xff) {
case 1:
pri = 0;
bundle = 0;
break;
case 2:
pri = 1;
bundle = 0;
break;
case 3:
pri = 0;
bundle = 1;
break;
case 4:
pri = 1;
bundle = 1;
break;
default:
printk(KERN_ERR "Card type(%d) not supported.\n",
type[l1oip_cnt] & 0xff);
l1oip_cleanup();
return -EINVAL;
}
if (debug & DEBUG_L1OIP_INIT)
printk(KERN_DEBUG "%s: interface %d is %s with %s.\n",
__func__, l1oip_cnt, pri ? "PRI" : "BRI",
bundle ? "bundled IP packet for all B-channels" :
"separate IP packets for every B-channel");
hc = kzalloc(sizeof(struct l1oip), GFP_ATOMIC);
if (!hc) {
printk(KERN_ERR "No kmem for L1-over-IP driver.\n");
l1oip_cleanup();
return -ENOMEM;
}
INIT_WORK(&hc->workq, (void *)l1oip_send_bh);
spin_lock(&l1oip_lock);
list_add_tail(&hc->list, &l1oip_ilist);
spin_unlock(&l1oip_lock);
ret = init_card(hc, pri, bundle);
if (ret) {
l1oip_cleanup();
return ret;
}
l1oip_cnt++;
}
printk(KERN_INFO "%d virtual devices registered\n", l1oip_cnt);
return 0;
}
module_init(l1oip_init);
module_exit(l1oip_cleanup);
| linux-master | drivers/isdn/mISDN/l1oip_core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Author Karsten Keil <[email protected]>
*
* Copyright 2008 by Karsten Keil <[email protected]>
*/
#include <linux/slab.h>
#include <linux/mISDNif.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/sched/cputime.h>
#include <linux/signal.h>
#include "core.h"
static u_int *debug;
static inline void
_queue_message(struct mISDNstack *st, struct sk_buff *skb)
{
struct mISDNhead *hh = mISDN_HEAD_P(skb);
if (*debug & DEBUG_QUEUE_FUNC)
printk(KERN_DEBUG "%s prim(%x) id(%x) %p\n",
__func__, hh->prim, hh->id, skb);
skb_queue_tail(&st->msgq, skb);
if (likely(!test_bit(mISDN_STACK_STOPPED, &st->status))) {
test_and_set_bit(mISDN_STACK_WORK, &st->status);
wake_up_interruptible(&st->workq);
}
}
static int
mISDN_queue_message(struct mISDNchannel *ch, struct sk_buff *skb)
{
_queue_message(ch->st, skb);
return 0;
}
static struct mISDNchannel *
get_channel4id(struct mISDNstack *st, u_int id)
{
struct mISDNchannel *ch;
mutex_lock(&st->lmutex);
list_for_each_entry(ch, &st->layer2, list) {
if (id == ch->nr)
goto unlock;
}
ch = NULL;
unlock:
mutex_unlock(&st->lmutex);
return ch;
}
static void
send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
{
struct sock *sk;
struct sk_buff *cskb = NULL;
read_lock(&sl->lock);
sk_for_each(sk, &sl->head) {
if (sk->sk_state != MISDN_BOUND)
continue;
if (!cskb)
cskb = skb_copy(skb, GFP_ATOMIC);
if (!cskb) {
printk(KERN_WARNING "%s no skb\n", __func__);
break;
}
if (!sock_queue_rcv_skb(sk, cskb))
cskb = NULL;
}
read_unlock(&sl->lock);
dev_kfree_skb(cskb);
}
static void
send_layer2(struct mISDNstack *st, struct sk_buff *skb)
{
struct sk_buff *cskb;
struct mISDNhead *hh = mISDN_HEAD_P(skb);
struct mISDNchannel *ch;
int ret;
if (!st)
return;
mutex_lock(&st->lmutex);
if ((hh->id & MISDN_ID_ADDR_MASK) == MISDN_ID_ANY) { /* L2 for all */
list_for_each_entry(ch, &st->layer2, list) {
if (list_is_last(&ch->list, &st->layer2)) {
cskb = skb;
skb = NULL;
} else {
cskb = skb_copy(skb, GFP_KERNEL);
}
if (cskb) {
ret = ch->send(ch, cskb);
if (ret) {
if (*debug & DEBUG_SEND_ERR)
printk(KERN_DEBUG
"%s ch%d prim(%x) addr(%x)"
" err %d\n",
__func__, ch->nr,
hh->prim, ch->addr, ret);
dev_kfree_skb(cskb);
}
} else {
printk(KERN_WARNING "%s ch%d addr %x no mem\n",
__func__, ch->nr, ch->addr);
goto out;
}
}
} else {
list_for_each_entry(ch, &st->layer2, list) {
if ((hh->id & MISDN_ID_ADDR_MASK) == ch->addr) {
ret = ch->send(ch, skb);
if (!ret)
skb = NULL;
goto out;
}
}
ret = st->dev->teimgr->ctrl(st->dev->teimgr, CHECK_DATA, skb);
if (!ret)
skb = NULL;
else if (*debug & DEBUG_SEND_ERR)
printk(KERN_DEBUG
"%s mgr prim(%x) err %d\n",
__func__, hh->prim, ret);
}
out:
mutex_unlock(&st->lmutex);
dev_kfree_skb(skb);
}
static inline int
send_msg_to_layer(struct mISDNstack *st, struct sk_buff *skb)
{
struct mISDNhead *hh = mISDN_HEAD_P(skb);
struct mISDNchannel *ch;
int lm;
lm = hh->prim & MISDN_LAYERMASK;
if (*debug & DEBUG_QUEUE_FUNC)
printk(KERN_DEBUG "%s prim(%x) id(%x) %p\n",
__func__, hh->prim, hh->id, skb);
if (lm == 0x1) {
if (!hlist_empty(&st->l1sock.head)) {
__net_timestamp(skb);
send_socklist(&st->l1sock, skb);
}
return st->layer1->send(st->layer1, skb);
} else if (lm == 0x2) {
if (!hlist_empty(&st->l1sock.head))
send_socklist(&st->l1sock, skb);
send_layer2(st, skb);
return 0;
} else if (lm == 0x4) {
ch = get_channel4id(st, hh->id);
if (ch)
return ch->send(ch, skb);
else
printk(KERN_WARNING
"%s: dev(%s) prim(%x) id(%x) no channel\n",
__func__, dev_name(&st->dev->dev), hh->prim,
hh->id);
} else if (lm == 0x8) {
WARN_ON(lm == 0x8);
ch = get_channel4id(st, hh->id);
if (ch)
return ch->send(ch, skb);
else
printk(KERN_WARNING
"%s: dev(%s) prim(%x) id(%x) no channel\n",
__func__, dev_name(&st->dev->dev), hh->prim,
hh->id);
} else {
/* broadcast not handled yet */
printk(KERN_WARNING "%s: dev(%s) prim %x not delivered\n",
__func__, dev_name(&st->dev->dev), hh->prim);
}
return -ESRCH;
}
static void
do_clear_stack(struct mISDNstack *st)
{
}
static int
mISDNStackd(void *data)
{
struct mISDNstack *st = data;
#ifdef MISDN_MSG_STATS
u64 utime, stime;
#endif
int err = 0;
sigfillset(¤t->blocked);
if (*debug & DEBUG_MSG_THREAD)
printk(KERN_DEBUG "mISDNStackd %s started\n",
dev_name(&st->dev->dev));
if (st->notify != NULL) {
complete(st->notify);
st->notify = NULL;
}
for (;;) {
struct sk_buff *skb;
if (unlikely(test_bit(mISDN_STACK_STOPPED, &st->status))) {
test_and_clear_bit(mISDN_STACK_WORK, &st->status);
test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
} else
test_and_set_bit(mISDN_STACK_RUNNING, &st->status);
while (test_bit(mISDN_STACK_WORK, &st->status)) {
skb = skb_dequeue(&st->msgq);
if (!skb) {
test_and_clear_bit(mISDN_STACK_WORK,
&st->status);
/* test if a race happens */
skb = skb_dequeue(&st->msgq);
if (!skb)
continue;
test_and_set_bit(mISDN_STACK_WORK,
&st->status);
}
#ifdef MISDN_MSG_STATS
st->msg_cnt++;
#endif
err = send_msg_to_layer(st, skb);
if (unlikely(err)) {
if (*debug & DEBUG_SEND_ERR)
printk(KERN_DEBUG
"%s: %s prim(%x) id(%x) "
"send call(%d)\n",
__func__, dev_name(&st->dev->dev),
mISDN_HEAD_PRIM(skb),
mISDN_HEAD_ID(skb), err);
dev_kfree_skb(skb);
continue;
}
if (unlikely(test_bit(mISDN_STACK_STOPPED,
&st->status))) {
test_and_clear_bit(mISDN_STACK_WORK,
&st->status);
test_and_clear_bit(mISDN_STACK_RUNNING,
&st->status);
break;
}
}
if (test_bit(mISDN_STACK_CLEARING, &st->status)) {
test_and_set_bit(mISDN_STACK_STOPPED, &st->status);
test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
do_clear_stack(st);
test_and_clear_bit(mISDN_STACK_CLEARING, &st->status);
test_and_set_bit(mISDN_STACK_RESTART, &st->status);
}
if (test_and_clear_bit(mISDN_STACK_RESTART, &st->status)) {
test_and_clear_bit(mISDN_STACK_STOPPED, &st->status);
test_and_set_bit(mISDN_STACK_RUNNING, &st->status);
if (!skb_queue_empty(&st->msgq))
test_and_set_bit(mISDN_STACK_WORK,
&st->status);
}
if (test_bit(mISDN_STACK_ABORT, &st->status))
break;
if (st->notify != NULL) {
complete(st->notify);
st->notify = NULL;
}
#ifdef MISDN_MSG_STATS
st->sleep_cnt++;
#endif
test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status);
wait_event_interruptible(st->workq, (st->status &
mISDN_STACK_ACTION_MASK));
if (*debug & DEBUG_MSG_THREAD)
printk(KERN_DEBUG "%s: %s wake status %08lx\n",
__func__, dev_name(&st->dev->dev), st->status);
test_and_set_bit(mISDN_STACK_ACTIVE, &st->status);
test_and_clear_bit(mISDN_STACK_WAKEUP, &st->status);
if (test_bit(mISDN_STACK_STOPPED, &st->status)) {
test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
#ifdef MISDN_MSG_STATS
st->stopped_cnt++;
#endif
}
}
#ifdef MISDN_MSG_STATS
printk(KERN_DEBUG "mISDNStackd daemon for %s proceed %d "
"msg %d sleep %d stopped\n",
dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt,
st->stopped_cnt);
task_cputime(st->thread, &utime, &stime);
printk(KERN_DEBUG
"mISDNStackd daemon for %s utime(%llu) stime(%llu)\n",
dev_name(&st->dev->dev), utime, stime);
printk(KERN_DEBUG
"mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n",
dev_name(&st->dev->dev), st->thread->nvcsw, st->thread->nivcsw);
printk(KERN_DEBUG "mISDNStackd daemon for %s killed now\n",
dev_name(&st->dev->dev));
#endif
test_and_set_bit(mISDN_STACK_KILLED, &st->status);
test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status);
test_and_clear_bit(mISDN_STACK_ABORT, &st->status);
skb_queue_purge(&st->msgq);
st->thread = NULL;
if (st->notify != NULL) {
complete(st->notify);
st->notify = NULL;
}
return 0;
}
static int
l1_receive(struct mISDNchannel *ch, struct sk_buff *skb)
{
if (!ch->st)
return -ENODEV;
__net_timestamp(skb);
_queue_message(ch->st, skb);
return 0;
}
void
set_channel_address(struct mISDNchannel *ch, u_int sapi, u_int tei)
{
ch->addr = sapi | (tei << 8);
}
void
__add_layer2(struct mISDNchannel *ch, struct mISDNstack *st)
{
list_add_tail(&ch->list, &st->layer2);
}
void
add_layer2(struct mISDNchannel *ch, struct mISDNstack *st)
{
mutex_lock(&st->lmutex);
__add_layer2(ch, st);
mutex_unlock(&st->lmutex);
}
static int
st_own_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
{
if (!ch->st || !ch->st->layer1)
return -EINVAL;
return ch->st->layer1->ctrl(ch->st->layer1, cmd, arg);
}
int
create_stack(struct mISDNdevice *dev)
{
struct mISDNstack *newst;
int err;
DECLARE_COMPLETION_ONSTACK(done);
newst = kzalloc(sizeof(struct mISDNstack), GFP_KERNEL);
if (!newst) {
printk(KERN_ERR "kmalloc mISDN_stack failed\n");
return -ENOMEM;
}
newst->dev = dev;
INIT_LIST_HEAD(&newst->layer2);
INIT_HLIST_HEAD(&newst->l1sock.head);
rwlock_init(&newst->l1sock.lock);
init_waitqueue_head(&newst->workq);
skb_queue_head_init(&newst->msgq);
mutex_init(&newst->lmutex);
dev->D.st = newst;
err = create_teimanager(dev);
if (err) {
printk(KERN_ERR "kmalloc teimanager failed\n");
kfree(newst);
return err;
}
dev->teimgr->peer = &newst->own;
dev->teimgr->recv = mISDN_queue_message;
dev->teimgr->st = newst;
newst->layer1 = &dev->D;
dev->D.recv = l1_receive;
dev->D.peer = &newst->own;
newst->own.st = newst;
newst->own.ctrl = st_own_ctrl;
newst->own.send = mISDN_queue_message;
newst->own.recv = mISDN_queue_message;
if (*debug & DEBUG_CORE_FUNC)
printk(KERN_DEBUG "%s: st(%s)\n", __func__,
dev_name(&newst->dev->dev));
newst->notify = &done;
newst->thread = kthread_run(mISDNStackd, (void *)newst, "mISDN_%s",
dev_name(&newst->dev->dev));
if (IS_ERR(newst->thread)) {
err = PTR_ERR(newst->thread);
printk(KERN_ERR
"mISDN:cannot create kernel thread for %s (%d)\n",
dev_name(&newst->dev->dev), err);
delete_teimanager(dev->teimgr);
kfree(newst);
} else
wait_for_completion(&done);
return err;
}
int
connect_layer1(struct mISDNdevice *dev, struct mISDNchannel *ch,
u_int protocol, struct sockaddr_mISDN *adr)
{
struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch);
struct channel_req rq;
int err;
if (*debug & DEBUG_CORE_FUNC)
printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
__func__, dev_name(&dev->dev), protocol, adr->dev,
adr->channel, adr->sapi, adr->tei);
switch (protocol) {
case ISDN_P_NT_S0:
case ISDN_P_NT_E1:
case ISDN_P_TE_S0:
case ISDN_P_TE_E1:
ch->recv = mISDN_queue_message;
ch->peer = &dev->D.st->own;
ch->st = dev->D.st;
rq.protocol = protocol;
rq.adr.channel = adr->channel;
err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
printk(KERN_DEBUG "%s: ret %d (dev %d)\n", __func__, err,
dev->id);
if (err)
return err;
write_lock_bh(&dev->D.st->l1sock.lock);
sk_add_node(&msk->sk, &dev->D.st->l1sock.head);
write_unlock_bh(&dev->D.st->l1sock.lock);
break;
default:
return -ENOPROTOOPT;
}
return 0;
}
int
connect_Bstack(struct mISDNdevice *dev, struct mISDNchannel *ch,
u_int protocol, struct sockaddr_mISDN *adr)
{
struct channel_req rq, rq2;
int pmask, err;
struct Bprotocol *bp;
if (*debug & DEBUG_CORE_FUNC)
printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
__func__, dev_name(&dev->dev), protocol,
adr->dev, adr->channel, adr->sapi,
adr->tei);
ch->st = dev->D.st;
pmask = 1 << (protocol & ISDN_P_B_MASK);
if (pmask & dev->Bprotocols) {
rq.protocol = protocol;
rq.adr = *adr;
err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
if (err)
return err;
ch->recv = rq.ch->send;
ch->peer = rq.ch;
rq.ch->recv = ch->send;
rq.ch->peer = ch;
rq.ch->st = dev->D.st;
} else {
bp = get_Bprotocol4mask(pmask);
if (!bp)
return -ENOPROTOOPT;
rq2.protocol = protocol;
rq2.adr = *adr;
rq2.ch = ch;
err = bp->create(&rq2);
if (err)
return err;
ch->recv = rq2.ch->send;
ch->peer = rq2.ch;
rq2.ch->st = dev->D.st;
rq.protocol = rq2.protocol;
rq.adr = *adr;
err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
if (err) {
rq2.ch->ctrl(rq2.ch, CLOSE_CHANNEL, NULL);
return err;
}
rq2.ch->recv = rq.ch->send;
rq2.ch->peer = rq.ch;
rq.ch->recv = rq2.ch->send;
rq.ch->peer = rq2.ch;
rq.ch->st = dev->D.st;
}
ch->protocol = protocol;
ch->nr = rq.ch->nr;
return 0;
}
int
create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch,
u_int protocol, struct sockaddr_mISDN *adr)
{
struct channel_req rq;
int err;
if (*debug & DEBUG_CORE_FUNC)
printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
__func__, dev_name(&dev->dev), protocol,
adr->dev, adr->channel, adr->sapi,
adr->tei);
rq.protocol = ISDN_P_TE_S0;
if (dev->Dprotocols & (1 << ISDN_P_TE_E1))
rq.protocol = ISDN_P_TE_E1;
switch (protocol) {
case ISDN_P_LAPD_NT:
rq.protocol = ISDN_P_NT_S0;
if (dev->Dprotocols & (1 << ISDN_P_NT_E1))
rq.protocol = ISDN_P_NT_E1;
fallthrough;
case ISDN_P_LAPD_TE:
ch->recv = mISDN_queue_message;
ch->peer = &dev->D.st->own;
ch->st = dev->D.st;
rq.adr.channel = 0;
err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
printk(KERN_DEBUG "%s: ret 1 %d\n", __func__, err);
if (err)
break;
rq.protocol = protocol;
rq.adr = *adr;
rq.ch = ch;
err = dev->teimgr->ctrl(dev->teimgr, OPEN_CHANNEL, &rq);
printk(KERN_DEBUG "%s: ret 2 %d\n", __func__, err);
if (!err) {
if ((protocol == ISDN_P_LAPD_NT) && !rq.ch)
break;
add_layer2(rq.ch, dev->D.st);
rq.ch->recv = mISDN_queue_message;
rq.ch->peer = &dev->D.st->own;
rq.ch->ctrl(rq.ch, OPEN_CHANNEL, NULL); /* can't fail */
}
break;
default:
err = -EPROTONOSUPPORT;
}
return err;
}
void
delete_channel(struct mISDNchannel *ch)
{
struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch);
struct mISDNchannel *pch;
if (!ch->st) {
printk(KERN_WARNING "%s: no stack\n", __func__);
return;
}
if (*debug & DEBUG_CORE_FUNC)
printk(KERN_DEBUG "%s: st(%s) protocol(%x)\n", __func__,
dev_name(&ch->st->dev->dev), ch->protocol);
if (ch->protocol >= ISDN_P_B_START) {
if (ch->peer) {
ch->peer->ctrl(ch->peer, CLOSE_CHANNEL, NULL);
ch->peer = NULL;
}
return;
}
switch (ch->protocol) {
case ISDN_P_NT_S0:
case ISDN_P_TE_S0:
case ISDN_P_NT_E1:
case ISDN_P_TE_E1:
write_lock_bh(&ch->st->l1sock.lock);
sk_del_node_init(&msk->sk);
write_unlock_bh(&ch->st->l1sock.lock);
ch->st->dev->D.ctrl(&ch->st->dev->D, CLOSE_CHANNEL, NULL);
break;
case ISDN_P_LAPD_TE:
pch = get_channel4id(ch->st, ch->nr);
if (pch) {
mutex_lock(&ch->st->lmutex);
list_del(&pch->list);
mutex_unlock(&ch->st->lmutex);
pch->ctrl(pch, CLOSE_CHANNEL, NULL);
pch = ch->st->dev->teimgr;
pch->ctrl(pch, CLOSE_CHANNEL, NULL);
} else
printk(KERN_WARNING "%s: no l2 channel\n",
__func__);
break;
case ISDN_P_LAPD_NT:
pch = ch->st->dev->teimgr;
if (pch) {
pch->ctrl(pch, CLOSE_CHANNEL, NULL);
} else
printk(KERN_WARNING "%s: no l2 channel\n",
__func__);
break;
default:
break;
}
return;
}
void
delete_stack(struct mISDNdevice *dev)
{
struct mISDNstack *st = dev->D.st;
DECLARE_COMPLETION_ONSTACK(done);
if (*debug & DEBUG_CORE_FUNC)
printk(KERN_DEBUG "%s: st(%s)\n", __func__,
dev_name(&st->dev->dev));
if (dev->teimgr)
delete_teimanager(dev->teimgr);
if (st->thread) {
if (st->notify) {
printk(KERN_WARNING "%s: notifier in use\n",
__func__);
complete(st->notify);
}
st->notify = &done;
test_and_set_bit(mISDN_STACK_ABORT, &st->status);
test_and_set_bit(mISDN_STACK_WAKEUP, &st->status);
wake_up_interruptible(&st->workq);
wait_for_completion(&done);
}
if (!list_empty(&st->layer2))
printk(KERN_WARNING "%s: layer2 list not empty\n",
__func__);
if (!hlist_empty(&st->l1sock.head))
printk(KERN_WARNING "%s: layer1 list not empty\n",
__func__);
kfree(st);
}
void
mISDN_initstack(u_int *dp)
{
debug = dp;
}
| linux-master | drivers/isdn/mISDN/stack.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Author Karsten Keil <[email protected]>
*
* Copyright 2008 by Karsten Keil <[email protected]>
*/
#include <linux/mISDNif.h>
#include <linux/slab.h>
#include "core.h"
#include "fsm.h"
#include "layer2.h"
static u_int *debug;
static
struct Fsm l2fsm = {NULL, 0, 0, NULL, NULL};
static char *strL2State[] =
{
"ST_L2_1",
"ST_L2_2",
"ST_L2_3",
"ST_L2_4",
"ST_L2_5",
"ST_L2_6",
"ST_L2_7",
"ST_L2_8",
};
enum {
EV_L2_UI,
EV_L2_SABME,
EV_L2_DISC,
EV_L2_DM,
EV_L2_UA,
EV_L2_FRMR,
EV_L2_SUPER,
EV_L2_I,
EV_L2_DL_DATA,
EV_L2_ACK_PULL,
EV_L2_DL_UNITDATA,
EV_L2_DL_ESTABLISH_REQ,
EV_L2_DL_RELEASE_REQ,
EV_L2_MDL_ASSIGN,
EV_L2_MDL_REMOVE,
EV_L2_MDL_ERROR,
EV_L1_DEACTIVATE,
EV_L2_T200,
EV_L2_T203,
EV_L2_T200I,
EV_L2_T203I,
EV_L2_SET_OWN_BUSY,
EV_L2_CLEAR_OWN_BUSY,
EV_L2_FRAME_ERROR,
};
#define L2_EVENT_COUNT (EV_L2_FRAME_ERROR + 1)
static char *strL2Event[] =
{
"EV_L2_UI",
"EV_L2_SABME",
"EV_L2_DISC",
"EV_L2_DM",
"EV_L2_UA",
"EV_L2_FRMR",
"EV_L2_SUPER",
"EV_L2_I",
"EV_L2_DL_DATA",
"EV_L2_ACK_PULL",
"EV_L2_DL_UNITDATA",
"EV_L2_DL_ESTABLISH_REQ",
"EV_L2_DL_RELEASE_REQ",
"EV_L2_MDL_ASSIGN",
"EV_L2_MDL_REMOVE",
"EV_L2_MDL_ERROR",
"EV_L1_DEACTIVATE",
"EV_L2_T200",
"EV_L2_T203",
"EV_L2_T200I",
"EV_L2_T203I",
"EV_L2_SET_OWN_BUSY",
"EV_L2_CLEAR_OWN_BUSY",
"EV_L2_FRAME_ERROR",
};
static void
l2m_debug(struct FsmInst *fi, char *fmt, ...)
{
struct layer2 *l2 = fi->userdata;
struct va_format vaf;
va_list va;
if (!(*debug & DEBUG_L2_FSM))
return;
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
printk(KERN_DEBUG "%s l2 (sapi %d tei %d): %pV\n",
mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei, &vaf);
va_end(va);
}
inline u_int
l2headersize(struct layer2 *l2, int ui)
{
return ((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) +
(test_bit(FLG_LAPD, &l2->flag) ? 2 : 1);
}
inline u_int
l2addrsize(struct layer2 *l2)
{
return test_bit(FLG_LAPD, &l2->flag) ? 2 : 1;
}
static u_int
l2_newid(struct layer2 *l2)
{
u_int id;
id = l2->next_id++;
if (id == 0x7fff)
l2->next_id = 1;
id <<= 16;
id |= l2->tei << 8;
id |= l2->sapi;
return id;
}
static void
l2up(struct layer2 *l2, u_int prim, struct sk_buff *skb)
{
int err;
if (!l2->up)
return;
mISDN_HEAD_PRIM(skb) = prim;
mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr;
err = l2->up->send(l2->up, skb);
if (err) {
printk(KERN_WARNING "%s: dev %s err=%d\n", __func__,
mISDNDevName4ch(&l2->ch), err);
dev_kfree_skb(skb);
}
}
static void
l2up_create(struct layer2 *l2, u_int prim, int len, void *arg)
{
struct sk_buff *skb;
struct mISDNhead *hh;
int err;
if (!l2->up)
return;
skb = mI_alloc_skb(len, GFP_ATOMIC);
if (!skb)
return;
hh = mISDN_HEAD_P(skb);
hh->prim = prim;
hh->id = (l2->ch.nr << 16) | l2->ch.addr;
if (len)
skb_put_data(skb, arg, len);
err = l2->up->send(l2->up, skb);
if (err) {
printk(KERN_WARNING "%s: dev %s err=%d\n", __func__,
mISDNDevName4ch(&l2->ch), err);
dev_kfree_skb(skb);
}
}
static int
l2down_skb(struct layer2 *l2, struct sk_buff *skb) {
int ret;
ret = l2->ch.recv(l2->ch.peer, skb);
if (ret && (*debug & DEBUG_L2_RECV))
printk(KERN_DEBUG "l2down_skb: dev %s ret(%d)\n",
mISDNDevName4ch(&l2->ch), ret);
return ret;
}
static int
l2down_raw(struct layer2 *l2, struct sk_buff *skb)
{
struct mISDNhead *hh = mISDN_HEAD_P(skb);
if (hh->prim == PH_DATA_REQ) {
if (test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
skb_queue_tail(&l2->down_queue, skb);
return 0;
}
l2->down_id = mISDN_HEAD_ID(skb);
}
return l2down_skb(l2, skb);
}
static int
l2down(struct layer2 *l2, u_int prim, u_int id, struct sk_buff *skb)
{
struct mISDNhead *hh = mISDN_HEAD_P(skb);
hh->prim = prim;
hh->id = id;
return l2down_raw(l2, skb);
}
static int
l2down_create(struct layer2 *l2, u_int prim, u_int id, int len, void *arg)
{
struct sk_buff *skb;
int err;
struct mISDNhead *hh;
skb = mI_alloc_skb(len, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
hh = mISDN_HEAD_P(skb);
hh->prim = prim;
hh->id = id;
if (len)
skb_put_data(skb, arg, len);
err = l2down_raw(l2, skb);
if (err)
dev_kfree_skb(skb);
return err;
}
static int
ph_data_confirm(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) {
struct sk_buff *nskb = skb;
int ret = -EAGAIN;
if (test_bit(FLG_L1_NOTREADY, &l2->flag)) {
if (hh->id == l2->down_id) {
nskb = skb_dequeue(&l2->down_queue);
if (nskb) {
l2->down_id = mISDN_HEAD_ID(nskb);
if (l2down_skb(l2, nskb)) {
dev_kfree_skb(nskb);
l2->down_id = MISDN_ID_NONE;
}
} else
l2->down_id = MISDN_ID_NONE;
if (ret) {
dev_kfree_skb(skb);
ret = 0;
}
if (l2->down_id == MISDN_ID_NONE) {
test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
}
}
}
if (!test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
nskb = skb_dequeue(&l2->down_queue);
if (nskb) {
l2->down_id = mISDN_HEAD_ID(nskb);
if (l2down_skb(l2, nskb)) {
dev_kfree_skb(nskb);
l2->down_id = MISDN_ID_NONE;
test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
}
} else
test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
}
return ret;
}
static void
l2_timeout(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb;
struct mISDNhead *hh;
skb = mI_alloc_skb(0, GFP_ATOMIC);
if (!skb) {
printk(KERN_WARNING "%s: L2(%d,%d) nr:%x timer %s no skb\n",
mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei,
l2->ch.nr, event == EV_L2_T200 ? "T200" : "T203");
return;
}
hh = mISDN_HEAD_P(skb);
hh->prim = event == EV_L2_T200 ? DL_TIMER200_IND : DL_TIMER203_IND;
hh->id = l2->ch.nr;
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s: L2(%d,%d) nr:%x timer %s expired\n",
mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei,
l2->ch.nr, event == EV_L2_T200 ? "T200" : "T203");
if (l2->ch.st)
l2->ch.st->own.recv(&l2->ch.st->own, skb);
}
static int
l2mgr(struct layer2 *l2, u_int prim, void *arg) {
long c = (long)arg;
printk(KERN_WARNING "l2mgr: dev %s addr:%x prim %x %c\n",
mISDNDevName4ch(&l2->ch), l2->id, prim, (char)c);
if (test_bit(FLG_LAPD, &l2->flag) &&
!test_bit(FLG_FIXED_TEI, &l2->flag)) {
switch (c) {
case 'C':
case 'D':
case 'G':
case 'H':
l2_tei(l2, prim, (u_long)arg);
break;
}
}
return 0;
}
static void
set_peer_busy(struct layer2 *l2) {
test_and_set_bit(FLG_PEER_BUSY, &l2->flag);
if (skb_queue_len(&l2->i_queue) || skb_queue_len(&l2->ui_queue))
test_and_set_bit(FLG_L2BLOCK, &l2->flag);
}
static void
clear_peer_busy(struct layer2 *l2) {
if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag))
test_and_clear_bit(FLG_L2BLOCK, &l2->flag);
}
static void
InitWin(struct layer2 *l2)
{
int i;
for (i = 0; i < MAX_WINDOW; i++)
l2->windowar[i] = NULL;
}
static int
freewin(struct layer2 *l2)
{
int i, cnt = 0;
for (i = 0; i < MAX_WINDOW; i++) {
if (l2->windowar[i]) {
cnt++;
dev_kfree_skb(l2->windowar[i]);
l2->windowar[i] = NULL;
}
}
return cnt;
}
static void
ReleaseWin(struct layer2 *l2)
{
int cnt = freewin(l2);
if (cnt)
printk(KERN_WARNING
"isdnl2 freed %d skbuffs in release\n", cnt);
}
inline unsigned int
cansend(struct layer2 *l2)
{
unsigned int p1;
if (test_bit(FLG_MOD128, &l2->flag))
p1 = (l2->vs - l2->va) % 128;
else
p1 = (l2->vs - l2->va) % 8;
return (p1 < l2->window) && !test_bit(FLG_PEER_BUSY, &l2->flag);
}
inline void
clear_exception(struct layer2 *l2)
{
test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
test_and_clear_bit(FLG_REJEXC, &l2->flag);
test_and_clear_bit(FLG_OWN_BUSY, &l2->flag);
clear_peer_busy(l2);
}
static int
sethdraddr(struct layer2 *l2, u_char *header, int rsp)
{
u_char *ptr = header;
int crbit = rsp;
if (test_bit(FLG_LAPD, &l2->flag)) {
if (test_bit(FLG_LAPD_NET, &l2->flag))
crbit = !crbit;
*ptr++ = (l2->sapi << 2) | (crbit ? 2 : 0);
*ptr++ = (l2->tei << 1) | 1;
return 2;
} else {
if (test_bit(FLG_ORIG, &l2->flag))
crbit = !crbit;
if (crbit)
*ptr++ = l2->addr.B;
else
*ptr++ = l2->addr.A;
return 1;
}
}
static inline void
enqueue_super(struct layer2 *l2, struct sk_buff *skb)
{
if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
dev_kfree_skb(skb);
}
static inline void
enqueue_ui(struct layer2 *l2, struct sk_buff *skb)
{
if (l2->tm)
l2_tei(l2, MDL_STATUS_UI_IND, 0);
if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
dev_kfree_skb(skb);
}
inline int
IsUI(u_char *data)
{
return (data[0] & 0xef) == UI;
}
inline int
IsUA(u_char *data)
{
return (data[0] & 0xef) == UA;
}
inline int
IsDM(u_char *data)
{
return (data[0] & 0xef) == DM;
}
inline int
IsDISC(u_char *data)
{
return (data[0] & 0xef) == DISC;
}
inline int
IsRR(u_char *data, struct layer2 *l2)
{
if (test_bit(FLG_MOD128, &l2->flag))
return data[0] == RR;
else
return (data[0] & 0xf) == 1;
}
inline int
IsSFrame(u_char *data, struct layer2 *l2)
{
register u_char d = *data;
if (!test_bit(FLG_MOD128, &l2->flag))
d &= 0xf;
return ((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c);
}
inline int
IsSABME(u_char *data, struct layer2 *l2)
{
u_char d = data[0] & ~0x10;
return test_bit(FLG_MOD128, &l2->flag) ? d == SABME : d == SABM;
}
inline int
IsREJ(u_char *data, struct layer2 *l2)
{
return test_bit(FLG_MOD128, &l2->flag) ?
data[0] == REJ : (data[0] & 0xf) == REJ;
}
inline int
IsFRMR(u_char *data)
{
return (data[0] & 0xef) == FRMR;
}
inline int
IsRNR(u_char *data, struct layer2 *l2)
{
return test_bit(FLG_MOD128, &l2->flag) ?
data[0] == RNR : (data[0] & 0xf) == RNR;
}
static int
iframe_error(struct layer2 *l2, struct sk_buff *skb)
{
u_int i;
int rsp = *skb->data & 0x2;
i = l2addrsize(l2) + (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1);
if (test_bit(FLG_ORIG, &l2->flag))
rsp = !rsp;
if (rsp)
return 'L';
if (skb->len < i)
return 'N';
if ((skb->len - i) > l2->maxlen)
return 'O';
return 0;
}
static int
super_error(struct layer2 *l2, struct sk_buff *skb)
{
if (skb->len != l2addrsize(l2) +
(test_bit(FLG_MOD128, &l2->flag) ? 2 : 1))
return 'N';
return 0;
}
static int
unnum_error(struct layer2 *l2, struct sk_buff *skb, int wantrsp)
{
int rsp = (*skb->data & 0x2) >> 1;
if (test_bit(FLG_ORIG, &l2->flag))
rsp = !rsp;
if (rsp != wantrsp)
return 'L';
if (skb->len != l2addrsize(l2) + 1)
return 'N';
return 0;
}
static int
UI_error(struct layer2 *l2, struct sk_buff *skb)
{
int rsp = *skb->data & 0x2;
if (test_bit(FLG_ORIG, &l2->flag))
rsp = !rsp;
if (rsp)
return 'L';
if (skb->len > l2->maxlen + l2addrsize(l2) + 1)
return 'O';
return 0;
}
static int
FRMR_error(struct layer2 *l2, struct sk_buff *skb)
{
u_int headers = l2addrsize(l2) + 1;
u_char *datap = skb->data + headers;
int rsp = *skb->data & 0x2;
if (test_bit(FLG_ORIG, &l2->flag))
rsp = !rsp;
if (!rsp)
return 'L';
if (test_bit(FLG_MOD128, &l2->flag)) {
if (skb->len < headers + 5)
return 'N';
else if (*debug & DEBUG_L2)
l2m_debug(&l2->l2m,
"FRMR information %2x %2x %2x %2x %2x",
datap[0], datap[1], datap[2], datap[3], datap[4]);
} else {
if (skb->len < headers + 3)
return 'N';
else if (*debug & DEBUG_L2)
l2m_debug(&l2->l2m,
"FRMR information %2x %2x %2x",
datap[0], datap[1], datap[2]);
}
return 0;
}
static unsigned int
legalnr(struct layer2 *l2, unsigned int nr)
{
if (test_bit(FLG_MOD128, &l2->flag))
return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128);
else
return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8);
}
static void
setva(struct layer2 *l2, unsigned int nr)
{
struct sk_buff *skb;
while (l2->va != nr) {
l2->va++;
if (test_bit(FLG_MOD128, &l2->flag))
l2->va %= 128;
else
l2->va %= 8;
if (l2->windowar[l2->sow]) {
skb_trim(l2->windowar[l2->sow], 0);
skb_queue_tail(&l2->tmp_queue, l2->windowar[l2->sow]);
l2->windowar[l2->sow] = NULL;
}
l2->sow = (l2->sow + 1) % l2->window;
}
skb = skb_dequeue(&l2->tmp_queue);
while (skb) {
dev_kfree_skb(skb);
skb = skb_dequeue(&l2->tmp_queue);
}
}
static void
send_uframe(struct layer2 *l2, struct sk_buff *skb, u_char cmd, u_char cr)
{
u_char tmp[MAX_L2HEADER_LEN];
int i;
i = sethdraddr(l2, tmp, cr);
tmp[i++] = cmd;
if (skb)
skb_trim(skb, 0);
else {
skb = mI_alloc_skb(i, GFP_ATOMIC);
if (!skb) {
printk(KERN_WARNING "%s: can't alloc skbuff in %s\n",
mISDNDevName4ch(&l2->ch), __func__);
return;
}
}
skb_put_data(skb, tmp, i);
enqueue_super(l2, skb);
}
inline u_char
get_PollFlag(struct layer2 *l2, struct sk_buff *skb)
{
return skb->data[l2addrsize(l2)] & 0x10;
}
inline u_char
get_PollFlagFree(struct layer2 *l2, struct sk_buff *skb)
{
u_char PF;
PF = get_PollFlag(l2, skb);
dev_kfree_skb(skb);
return PF;
}
inline void
start_t200(struct layer2 *l2, int i)
{
mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
test_and_set_bit(FLG_T200_RUN, &l2->flag);
}
inline void
restart_t200(struct layer2 *l2, int i)
{
mISDN_FsmRestartTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
test_and_set_bit(FLG_T200_RUN, &l2->flag);
}
inline void
stop_t200(struct layer2 *l2, int i)
{
if (test_and_clear_bit(FLG_T200_RUN, &l2->flag))
mISDN_FsmDelTimer(&l2->t200, i);
}
inline void
st5_dl_release_l2l3(struct layer2 *l2)
{
int pr;
if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
pr = DL_RELEASE_CNF;
else
pr = DL_RELEASE_IND;
l2up_create(l2, pr, 0, NULL);
}
inline void
lapb_dl_release_l2l3(struct layer2 *l2, int f)
{
if (test_bit(FLG_LAPB, &l2->flag))
l2down_create(l2, PH_DEACTIVATE_REQ, l2_newid(l2), 0, NULL);
l2up_create(l2, f, 0, NULL);
}
static void
establishlink(struct FsmInst *fi)
{
struct layer2 *l2 = fi->userdata;
u_char cmd;
clear_exception(l2);
l2->rc = 0;
cmd = (test_bit(FLG_MOD128, &l2->flag) ? SABME : SABM) | 0x10;
send_uframe(l2, NULL, cmd, CMD);
mISDN_FsmDelTimer(&l2->t203, 1);
restart_t200(l2, 1);
test_and_clear_bit(FLG_PEND_REL, &l2->flag);
freewin(l2);
mISDN_FsmChangeState(fi, ST_L2_5);
}
static void
l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg)
{
struct sk_buff *skb = arg;
struct layer2 *l2 = fi->userdata;
if (get_PollFlagFree(l2, skb))
l2mgr(l2, MDL_ERROR_IND, (void *) 'C');
else
l2mgr(l2, MDL_ERROR_IND, (void *) 'D');
}
static void
l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
{
struct sk_buff *skb = arg;
struct layer2 *l2 = fi->userdata;
if (get_PollFlagFree(l2, skb))
l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
else {
l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
establishlink(fi);
test_and_clear_bit(FLG_L3_INIT, &l2->flag);
}
}
static void
l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
{
struct sk_buff *skb = arg;
struct layer2 *l2 = fi->userdata;
if (get_PollFlagFree(l2, skb))
l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
else
l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
establishlink(fi);
test_and_clear_bit(FLG_L3_INIT, &l2->flag);
}
static void
l2_go_st3(struct FsmInst *fi, int event, void *arg)
{
dev_kfree_skb((struct sk_buff *)arg);
mISDN_FsmChangeState(fi, ST_L2_3);
}
static void
l2_mdl_assign(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
mISDN_FsmChangeState(fi, ST_L2_3);
dev_kfree_skb((struct sk_buff *)arg);
l2_tei(l2, MDL_ASSIGN_IND, 0);
}
static void
l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_queue_tail(&l2->ui_queue, skb);
mISDN_FsmChangeState(fi, ST_L2_2);
l2_tei(l2, MDL_ASSIGN_IND, 0);
}
static void
l2_queue_ui(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_queue_tail(&l2->ui_queue, skb);
}
static void
tx_ui(struct layer2 *l2)
{
struct sk_buff *skb;
u_char header[MAX_L2HEADER_LEN];
int i;
i = sethdraddr(l2, header, CMD);
if (test_bit(FLG_LAPD_NET, &l2->flag))
header[1] = 0xff; /* tei 127 */
header[i++] = UI;
while ((skb = skb_dequeue(&l2->ui_queue))) {
memcpy(skb_push(skb, i), header, i);
enqueue_ui(l2, skb);
}
}
static void
l2_send_ui(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_queue_tail(&l2->ui_queue, skb);
tx_ui(l2);
}
static void
l2_got_ui(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_pull(skb, l2headersize(l2, 1));
/*
* in states 1-3 for broadcast
*/
if (l2->tm)
l2_tei(l2, MDL_STATUS_UI_IND, 0);
l2up(l2, DL_UNITDATA_IND, skb);
}
static void
l2_establish(struct FsmInst *fi, int event, void *arg)
{
struct sk_buff *skb = arg;
struct layer2 *l2 = fi->userdata;
establishlink(fi);
test_and_set_bit(FLG_L3_INIT, &l2->flag);
dev_kfree_skb(skb);
}
static void
l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg)
{
struct sk_buff *skb = arg;
struct layer2 *l2 = fi->userdata;
skb_queue_purge(&l2->i_queue);
test_and_set_bit(FLG_L3_INIT, &l2->flag);
test_and_clear_bit(FLG_PEND_REL, &l2->flag);
dev_kfree_skb(skb);
}
static void
l2_l3_reestablish(struct FsmInst *fi, int event, void *arg)
{
struct sk_buff *skb = arg;
struct layer2 *l2 = fi->userdata;
skb_queue_purge(&l2->i_queue);
establishlink(fi);
test_and_set_bit(FLG_L3_INIT, &l2->flag);
dev_kfree_skb(skb);
}
static void
l2_release(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_trim(skb, 0);
l2up(l2, DL_RELEASE_CNF, skb);
}
static void
l2_pend_rel(struct FsmInst *fi, int event, void *arg)
{
struct sk_buff *skb = arg;
struct layer2 *l2 = fi->userdata;
test_and_set_bit(FLG_PEND_REL, &l2->flag);
dev_kfree_skb(skb);
}
static void
l2_disconnect(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_queue_purge(&l2->i_queue);
freewin(l2);
mISDN_FsmChangeState(fi, ST_L2_6);
l2->rc = 0;
send_uframe(l2, NULL, DISC | 0x10, CMD);
mISDN_FsmDelTimer(&l2->t203, 1);
restart_t200(l2, 2);
dev_kfree_skb(skb);
}
static void
l2_start_multi(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
l2->vs = 0;
l2->va = 0;
l2->vr = 0;
l2->sow = 0;
clear_exception(l2);
send_uframe(l2, NULL, UA | get_PollFlag(l2, skb), RSP);
mISDN_FsmChangeState(fi, ST_L2_7);
mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
skb_trim(skb, 0);
l2up(l2, DL_ESTABLISH_IND, skb);
if (l2->tm)
l2_tei(l2, MDL_STATUS_UP_IND, 0);
}
static void
l2_send_UA(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
}
static void
l2_send_DM(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
send_uframe(l2, skb, DM | get_PollFlag(l2, skb), RSP);
}
static void
l2_restart_multi(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
int est = 0;
send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
l2mgr(l2, MDL_ERROR_IND, (void *) 'F');
if (l2->vs != l2->va) {
skb_queue_purge(&l2->i_queue);
est = 1;
}
clear_exception(l2);
l2->vs = 0;
l2->va = 0;
l2->vr = 0;
l2->sow = 0;
mISDN_FsmChangeState(fi, ST_L2_7);
stop_t200(l2, 3);
mISDN_FsmRestartTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
if (est)
l2up_create(l2, DL_ESTABLISH_IND, 0, NULL);
/* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
* MGR_SHORTSTATUS | INDICATION, SSTATUS_L2_ESTABLISHED,
* 0, NULL, 0);
*/
if (skb_queue_len(&l2->i_queue) && cansend(l2))
mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
}
static void
l2_stop_multi(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
mISDN_FsmChangeState(fi, ST_L2_4);
mISDN_FsmDelTimer(&l2->t203, 3);
stop_t200(l2, 4);
send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
skb_queue_purge(&l2->i_queue);
freewin(l2);
lapb_dl_release_l2l3(l2, DL_RELEASE_IND);
if (l2->tm)
l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
}
static void
l2_connected(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
int pr = -1;
if (!get_PollFlag(l2, skb)) {
l2_mdl_error_ua(fi, event, arg);
return;
}
dev_kfree_skb(skb);
if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
l2_disconnect(fi, event, NULL);
if (test_and_clear_bit(FLG_L3_INIT, &l2->flag)) {
pr = DL_ESTABLISH_CNF;
} else if (l2->vs != l2->va) {
skb_queue_purge(&l2->i_queue);
pr = DL_ESTABLISH_IND;
}
stop_t200(l2, 5);
l2->vr = 0;
l2->vs = 0;
l2->va = 0;
l2->sow = 0;
mISDN_FsmChangeState(fi, ST_L2_7);
mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 4);
if (pr != -1)
l2up_create(l2, pr, 0, NULL);
if (skb_queue_len(&l2->i_queue) && cansend(l2))
mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
if (l2->tm)
l2_tei(l2, MDL_STATUS_UP_IND, 0);
}
static void
l2_released(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
if (!get_PollFlag(l2, skb)) {
l2_mdl_error_ua(fi, event, arg);
return;
}
dev_kfree_skb(skb);
stop_t200(l2, 6);
lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
mISDN_FsmChangeState(fi, ST_L2_4);
if (l2->tm)
l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
}
static void
l2_reestablish(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
if (!get_PollFlagFree(l2, skb)) {
establishlink(fi);
test_and_set_bit(FLG_L3_INIT, &l2->flag);
}
}
static void
l2_st5_dm_release(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
if (get_PollFlagFree(l2, skb)) {
stop_t200(l2, 7);
if (!test_bit(FLG_L3_INIT, &l2->flag))
skb_queue_purge(&l2->i_queue);
if (test_bit(FLG_LAPB, &l2->flag))
l2down_create(l2, PH_DEACTIVATE_REQ,
l2_newid(l2), 0, NULL);
st5_dl_release_l2l3(l2);
mISDN_FsmChangeState(fi, ST_L2_4);
if (l2->tm)
l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
}
}
static void
l2_st6_dm_release(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
if (get_PollFlagFree(l2, skb)) {
stop_t200(l2, 8);
lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
mISDN_FsmChangeState(fi, ST_L2_4);
if (l2->tm)
l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
}
}
static void
enquiry_cr(struct layer2 *l2, u_char typ, u_char cr, u_char pf)
{
struct sk_buff *skb;
u_char tmp[MAX_L2HEADER_LEN];
int i;
i = sethdraddr(l2, tmp, cr);
if (test_bit(FLG_MOD128, &l2->flag)) {
tmp[i++] = typ;
tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0);
} else
tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
skb = mI_alloc_skb(i, GFP_ATOMIC);
if (!skb) {
printk(KERN_WARNING "%s: isdnl2 can't alloc sbbuff in %s\n",
mISDNDevName4ch(&l2->ch), __func__);
return;
}
skb_put_data(skb, tmp, i);
enqueue_super(l2, skb);
}
inline void
enquiry_response(struct layer2 *l2)
{
if (test_bit(FLG_OWN_BUSY, &l2->flag))
enquiry_cr(l2, RNR, RSP, 1);
else
enquiry_cr(l2, RR, RSP, 1);
test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
}
inline void
transmit_enquiry(struct layer2 *l2)
{
if (test_bit(FLG_OWN_BUSY, &l2->flag))
enquiry_cr(l2, RNR, CMD, 1);
else
enquiry_cr(l2, RR, CMD, 1);
test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
start_t200(l2, 9);
}
static void
nrerrorrecovery(struct FsmInst *fi)
{
struct layer2 *l2 = fi->userdata;
l2mgr(l2, MDL_ERROR_IND, (void *) 'J');
establishlink(fi);
test_and_clear_bit(FLG_L3_INIT, &l2->flag);
}
static void
invoke_retransmission(struct layer2 *l2, unsigned int nr)
{
u_int p1;
if (l2->vs != nr) {
while (l2->vs != nr) {
(l2->vs)--;
if (test_bit(FLG_MOD128, &l2->flag)) {
l2->vs %= 128;
p1 = (l2->vs - l2->va) % 128;
} else {
l2->vs %= 8;
p1 = (l2->vs - l2->va) % 8;
}
p1 = (p1 + l2->sow) % l2->window;
if (l2->windowar[p1])
skb_queue_head(&l2->i_queue, l2->windowar[p1]);
else
printk(KERN_WARNING
"%s: windowar[%d] is NULL\n",
mISDNDevName4ch(&l2->ch), p1);
l2->windowar[p1] = NULL;
}
mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
}
}
static void
l2_st7_got_super(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
int PollFlag, rsp, typ = RR;
unsigned int nr;
rsp = *skb->data & 0x2;
if (test_bit(FLG_ORIG, &l2->flag))
rsp = !rsp;
skb_pull(skb, l2addrsize(l2));
if (IsRNR(skb->data, l2)) {
set_peer_busy(l2);
typ = RNR;
} else
clear_peer_busy(l2);
if (IsREJ(skb->data, l2))
typ = REJ;
if (test_bit(FLG_MOD128, &l2->flag)) {
PollFlag = (skb->data[1] & 0x1) == 0x1;
nr = skb->data[1] >> 1;
} else {
PollFlag = (skb->data[0] & 0x10);
nr = (skb->data[0] >> 5) & 0x7;
}
dev_kfree_skb(skb);
if (PollFlag) {
if (rsp)
l2mgr(l2, MDL_ERROR_IND, (void *) 'A');
else
enquiry_response(l2);
}
if (legalnr(l2, nr)) {
if (typ == REJ) {
setva(l2, nr);
invoke_retransmission(l2, nr);
stop_t200(l2, 10);
if (mISDN_FsmAddTimer(&l2->t203, l2->T203,
EV_L2_T203, NULL, 6))
l2m_debug(&l2->l2m, "Restart T203 ST7 REJ");
} else if ((nr == l2->vs) && (typ == RR)) {
setva(l2, nr);
stop_t200(l2, 11);
mISDN_FsmRestartTimer(&l2->t203, l2->T203,
EV_L2_T203, NULL, 7);
} else if ((l2->va != nr) || (typ == RNR)) {
setva(l2, nr);
if (typ != RR)
mISDN_FsmDelTimer(&l2->t203, 9);
restart_t200(l2, 12);
}
if (skb_queue_len(&l2->i_queue) && (typ == RR))
mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
} else
nrerrorrecovery(fi);
}
static void
l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
if (!test_bit(FLG_L3_INIT, &l2->flag))
skb_queue_tail(&l2->i_queue, skb);
else
dev_kfree_skb(skb);
}
static void
l2_feed_i_pull(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_queue_tail(&l2->i_queue, skb);
mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
}
static void
l2_feed_iqueue(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_queue_tail(&l2->i_queue, skb);
}
static void
l2_got_iframe(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
int PollFlag, i;
u_int ns, nr;
i = l2addrsize(l2);
if (test_bit(FLG_MOD128, &l2->flag)) {
PollFlag = ((skb->data[i + 1] & 0x1) == 0x1);
ns = skb->data[i] >> 1;
nr = (skb->data[i + 1] >> 1) & 0x7f;
} else {
PollFlag = (skb->data[i] & 0x10);
ns = (skb->data[i] >> 1) & 0x7;
nr = (skb->data[i] >> 5) & 0x7;
}
if (test_bit(FLG_OWN_BUSY, &l2->flag)) {
dev_kfree_skb(skb);
if (PollFlag)
enquiry_response(l2);
} else {
if (l2->vr == ns) {
l2->vr++;
if (test_bit(FLG_MOD128, &l2->flag))
l2->vr %= 128;
else
l2->vr %= 8;
test_and_clear_bit(FLG_REJEXC, &l2->flag);
if (PollFlag)
enquiry_response(l2);
else
test_and_set_bit(FLG_ACK_PEND, &l2->flag);
skb_pull(skb, l2headersize(l2, 0));
l2up(l2, DL_DATA_IND, skb);
} else {
/* n(s)!=v(r) */
dev_kfree_skb(skb);
if (test_and_set_bit(FLG_REJEXC, &l2->flag)) {
if (PollFlag)
enquiry_response(l2);
} else {
enquiry_cr(l2, REJ, RSP, PollFlag);
test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
}
}
}
if (legalnr(l2, nr)) {
if (!test_bit(FLG_PEER_BUSY, &l2->flag) &&
(fi->state == ST_L2_7)) {
if (nr == l2->vs) {
stop_t200(l2, 13);
mISDN_FsmRestartTimer(&l2->t203, l2->T203,
EV_L2_T203, NULL, 7);
} else if (nr != l2->va)
restart_t200(l2, 14);
}
setva(l2, nr);
} else {
nrerrorrecovery(fi);
return;
}
if (skb_queue_len(&l2->i_queue) && (fi->state == ST_L2_7))
mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
if (test_and_clear_bit(FLG_ACK_PEND, &l2->flag))
enquiry_cr(l2, RR, RSP, 0);
}
static void
l2_got_tei(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
u_int info;
l2->tei = (signed char)(long)arg;
set_channel_address(&l2->ch, l2->sapi, l2->tei);
info = DL_INFO_L2_CONNECT;
l2up_create(l2, DL_INFORMATION_IND, sizeof(info), &info);
if (fi->state == ST_L2_3) {
establishlink(fi);
test_and_set_bit(FLG_L3_INIT, &l2->flag);
} else
mISDN_FsmChangeState(fi, ST_L2_4);
if (skb_queue_len(&l2->ui_queue))
tx_ui(l2);
}
static void
l2_st5_tout_200(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
if (test_bit(FLG_LAPD, &l2->flag) &&
test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
} else if (l2->rc == l2->N200) {
mISDN_FsmChangeState(fi, ST_L2_4);
test_and_clear_bit(FLG_T200_RUN, &l2->flag);
skb_queue_purge(&l2->i_queue);
l2mgr(l2, MDL_ERROR_IND, (void *) 'G');
if (test_bit(FLG_LAPB, &l2->flag))
l2down_create(l2, PH_DEACTIVATE_REQ,
l2_newid(l2), 0, NULL);
st5_dl_release_l2l3(l2);
if (l2->tm)
l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
} else {
l2->rc++;
mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
send_uframe(l2, NULL, (test_bit(FLG_MOD128, &l2->flag) ?
SABME : SABM) | 0x10, CMD);
}
}
static void
l2_st6_tout_200(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
if (test_bit(FLG_LAPD, &l2->flag) &&
test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
} else if (l2->rc == l2->N200) {
mISDN_FsmChangeState(fi, ST_L2_4);
test_and_clear_bit(FLG_T200_RUN, &l2->flag);
l2mgr(l2, MDL_ERROR_IND, (void *) 'H');
lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
if (l2->tm)
l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
} else {
l2->rc++;
mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200,
NULL, 9);
send_uframe(l2, NULL, DISC | 0x10, CMD);
}
}
static void
l2_st7_tout_200(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
if (test_bit(FLG_LAPD, &l2->flag) &&
test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
return;
}
test_and_clear_bit(FLG_T200_RUN, &l2->flag);
l2->rc = 0;
mISDN_FsmChangeState(fi, ST_L2_8);
transmit_enquiry(l2);
l2->rc++;
}
static void
l2_st8_tout_200(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
if (test_bit(FLG_LAPD, &l2->flag) &&
test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
return;
}
test_and_clear_bit(FLG_T200_RUN, &l2->flag);
if (l2->rc == l2->N200) {
l2mgr(l2, MDL_ERROR_IND, (void *) 'I');
establishlink(fi);
test_and_clear_bit(FLG_L3_INIT, &l2->flag);
} else {
transmit_enquiry(l2);
l2->rc++;
}
}
static void
l2_st7_tout_203(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
if (test_bit(FLG_LAPD, &l2->flag) &&
test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 9);
return;
}
mISDN_FsmChangeState(fi, ST_L2_8);
transmit_enquiry(l2);
l2->rc = 0;
}
static void
l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb, *nskb;
u_char header[MAX_L2HEADER_LEN];
u_int i, p1;
if (!cansend(l2))
return;
skb = skb_dequeue(&l2->i_queue);
if (!skb)
return;
i = sethdraddr(l2, header, CMD);
if (test_bit(FLG_MOD128, &l2->flag)) {
header[i++] = l2->vs << 1;
header[i++] = l2->vr << 1;
} else
header[i++] = (l2->vr << 5) | (l2->vs << 1);
nskb = skb_realloc_headroom(skb, i);
if (!nskb) {
printk(KERN_WARNING "%s: no headroom(%d) copy for IFrame\n",
mISDNDevName4ch(&l2->ch), i);
skb_queue_head(&l2->i_queue, skb);
return;
}
if (test_bit(FLG_MOD128, &l2->flag)) {
p1 = (l2->vs - l2->va) % 128;
l2->vs = (l2->vs + 1) % 128;
} else {
p1 = (l2->vs - l2->va) % 8;
l2->vs = (l2->vs + 1) % 8;
}
p1 = (p1 + l2->sow) % l2->window;
if (l2->windowar[p1]) {
printk(KERN_WARNING "%s: l2 try overwrite ack queue entry %d\n",
mISDNDevName4ch(&l2->ch), p1);
dev_kfree_skb(l2->windowar[p1]);
}
l2->windowar[p1] = skb;
memcpy(skb_push(nskb, i), header, i);
l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb);
test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) {
mISDN_FsmDelTimer(&l2->t203, 13);
mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 11);
}
}
static void
l2_st8_got_super(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
int PollFlag, rsp, rnr = 0;
unsigned int nr;
rsp = *skb->data & 0x2;
if (test_bit(FLG_ORIG, &l2->flag))
rsp = !rsp;
skb_pull(skb, l2addrsize(l2));
if (IsRNR(skb->data, l2)) {
set_peer_busy(l2);
rnr = 1;
} else
clear_peer_busy(l2);
if (test_bit(FLG_MOD128, &l2->flag)) {
PollFlag = (skb->data[1] & 0x1) == 0x1;
nr = skb->data[1] >> 1;
} else {
PollFlag = (skb->data[0] & 0x10);
nr = (skb->data[0] >> 5) & 0x7;
}
dev_kfree_skb(skb);
if (rsp && PollFlag) {
if (legalnr(l2, nr)) {
if (rnr) {
restart_t200(l2, 15);
} else {
stop_t200(l2, 16);
mISDN_FsmAddTimer(&l2->t203, l2->T203,
EV_L2_T203, NULL, 5);
setva(l2, nr);
}
invoke_retransmission(l2, nr);
mISDN_FsmChangeState(fi, ST_L2_7);
if (skb_queue_len(&l2->i_queue) && cansend(l2))
mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
} else
nrerrorrecovery(fi);
} else {
if (!rsp && PollFlag)
enquiry_response(l2);
if (legalnr(l2, nr))
setva(l2, nr);
else
nrerrorrecovery(fi);
}
}
static void
l2_got_FRMR(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_pull(skb, l2addrsize(l2) + 1);
if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */
(IsUA(skb->data) && (fi->state == ST_L2_7))) {
l2mgr(l2, MDL_ERROR_IND, (void *) 'K');
establishlink(fi);
test_and_clear_bit(FLG_L3_INIT, &l2->flag);
}
dev_kfree_skb(skb);
}
static void
l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
skb_queue_purge(&l2->ui_queue);
l2->tei = GROUP_TEI;
mISDN_FsmChangeState(fi, ST_L2_1);
}
static void
l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
skb_queue_purge(&l2->ui_queue);
l2->tei = GROUP_TEI;
l2up_create(l2, DL_RELEASE_IND, 0, NULL);
mISDN_FsmChangeState(fi, ST_L2_1);
}
static void
l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
skb_queue_purge(&l2->i_queue);
skb_queue_purge(&l2->ui_queue);
freewin(l2);
l2->tei = GROUP_TEI;
stop_t200(l2, 17);
st5_dl_release_l2l3(l2);
mISDN_FsmChangeState(fi, ST_L2_1);
}
static void
l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
skb_queue_purge(&l2->ui_queue);
l2->tei = GROUP_TEI;
stop_t200(l2, 18);
l2up_create(l2, DL_RELEASE_IND, 0, NULL);
mISDN_FsmChangeState(fi, ST_L2_1);
}
static void
l2_tei_remove(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
skb_queue_purge(&l2->i_queue);
skb_queue_purge(&l2->ui_queue);
freewin(l2);
l2->tei = GROUP_TEI;
stop_t200(l2, 17);
mISDN_FsmDelTimer(&l2->t203, 19);
l2up_create(l2, DL_RELEASE_IND, 0, NULL);
/* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
* MGR_SHORTSTATUS_IND, SSTATUS_L2_RELEASED,
* 0, NULL, 0);
*/
mISDN_FsmChangeState(fi, ST_L2_1);
}
static void
l2_st14_persistent_da(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_queue_purge(&l2->i_queue);
skb_queue_purge(&l2->ui_queue);
if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
l2up(l2, DL_RELEASE_IND, skb);
else
dev_kfree_skb(skb);
}
static void
l2_st5_persistent_da(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_queue_purge(&l2->i_queue);
skb_queue_purge(&l2->ui_queue);
freewin(l2);
stop_t200(l2, 19);
st5_dl_release_l2l3(l2);
mISDN_FsmChangeState(fi, ST_L2_4);
if (l2->tm)
l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
dev_kfree_skb(skb);
}
static void
l2_st6_persistent_da(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_queue_purge(&l2->ui_queue);
stop_t200(l2, 20);
l2up(l2, DL_RELEASE_CNF, skb);
mISDN_FsmChangeState(fi, ST_L2_4);
if (l2->tm)
l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
}
static void
l2_persistent_da(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_queue_purge(&l2->i_queue);
skb_queue_purge(&l2->ui_queue);
freewin(l2);
stop_t200(l2, 19);
mISDN_FsmDelTimer(&l2->t203, 19);
l2up(l2, DL_RELEASE_IND, skb);
mISDN_FsmChangeState(fi, ST_L2_4);
if (l2->tm)
l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
}
static void
l2_set_own_busy(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
if (!test_and_set_bit(FLG_OWN_BUSY, &l2->flag)) {
enquiry_cr(l2, RNR, RSP, 0);
test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
}
dev_kfree_skb(skb);
}
static void
l2_clear_own_busy(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
if (!test_and_clear_bit(FLG_OWN_BUSY, &l2->flag)) {
enquiry_cr(l2, RR, RSP, 0);
test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
}
dev_kfree_skb(skb);
}
static void
l2_frame_error(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
l2mgr(l2, MDL_ERROR_IND, arg);
}
static void
l2_frame_error_reest(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
l2mgr(l2, MDL_ERROR_IND, arg);
establishlink(fi);
test_and_clear_bit(FLG_L3_INIT, &l2->flag);
}
static struct FsmNode L2FnList[] =
{
{ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign},
{ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3},
{ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish},
{ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3},
{ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
{ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
{ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release},
{ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel},
{ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect},
{ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect},
{ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest},
{ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull},
{ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue},
{ST_L2_1, EV_L2_DL_UNITDATA, l2_queue_ui_assign},
{ST_L2_2, EV_L2_DL_UNITDATA, l2_queue_ui},
{ST_L2_3, EV_L2_DL_UNITDATA, l2_queue_ui},
{ST_L2_4, EV_L2_DL_UNITDATA, l2_send_ui},
{ST_L2_5, EV_L2_DL_UNITDATA, l2_send_ui},
{ST_L2_6, EV_L2_DL_UNITDATA, l2_send_ui},
{ST_L2_7, EV_L2_DL_UNITDATA, l2_send_ui},
{ST_L2_8, EV_L2_DL_UNITDATA, l2_send_ui},
{ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei},
{ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei},
{ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei},
{ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove},
{ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove},
{ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove},
{ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove},
{ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove},
{ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove},
{ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove},
{ST_L2_4, EV_L2_SABME, l2_start_multi},
{ST_L2_5, EV_L2_SABME, l2_send_UA},
{ST_L2_6, EV_L2_SABME, l2_send_DM},
{ST_L2_7, EV_L2_SABME, l2_restart_multi},
{ST_L2_8, EV_L2_SABME, l2_restart_multi},
{ST_L2_4, EV_L2_DISC, l2_send_DM},
{ST_L2_5, EV_L2_DISC, l2_send_DM},
{ST_L2_6, EV_L2_DISC, l2_send_UA},
{ST_L2_7, EV_L2_DISC, l2_stop_multi},
{ST_L2_8, EV_L2_DISC, l2_stop_multi},
{ST_L2_4, EV_L2_UA, l2_mdl_error_ua},
{ST_L2_5, EV_L2_UA, l2_connected},
{ST_L2_6, EV_L2_UA, l2_released},
{ST_L2_7, EV_L2_UA, l2_mdl_error_ua},
{ST_L2_8, EV_L2_UA, l2_mdl_error_ua},
{ST_L2_4, EV_L2_DM, l2_reestablish},
{ST_L2_5, EV_L2_DM, l2_st5_dm_release},
{ST_L2_6, EV_L2_DM, l2_st6_dm_release},
{ST_L2_7, EV_L2_DM, l2_mdl_error_dm},
{ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm},
{ST_L2_1, EV_L2_UI, l2_got_ui},
{ST_L2_2, EV_L2_UI, l2_got_ui},
{ST_L2_3, EV_L2_UI, l2_got_ui},
{ST_L2_4, EV_L2_UI, l2_got_ui},
{ST_L2_5, EV_L2_UI, l2_got_ui},
{ST_L2_6, EV_L2_UI, l2_got_ui},
{ST_L2_7, EV_L2_UI, l2_got_ui},
{ST_L2_8, EV_L2_UI, l2_got_ui},
{ST_L2_7, EV_L2_FRMR, l2_got_FRMR},
{ST_L2_8, EV_L2_FRMR, l2_got_FRMR},
{ST_L2_7, EV_L2_SUPER, l2_st7_got_super},
{ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
{ST_L2_7, EV_L2_I, l2_got_iframe},
{ST_L2_8, EV_L2_I, l2_got_iframe},
{ST_L2_5, EV_L2_T200, l2_timeout},
{ST_L2_6, EV_L2_T200, l2_timeout},
{ST_L2_7, EV_L2_T200, l2_timeout},
{ST_L2_8, EV_L2_T200, l2_timeout},
{ST_L2_7, EV_L2_T203, l2_timeout},
{ST_L2_5, EV_L2_T200I, l2_st5_tout_200},
{ST_L2_6, EV_L2_T200I, l2_st6_tout_200},
{ST_L2_7, EV_L2_T200I, l2_st7_tout_200},
{ST_L2_8, EV_L2_T200I, l2_st8_tout_200},
{ST_L2_7, EV_L2_T203I, l2_st7_tout_203},
{ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
{ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
{ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
{ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
{ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
{ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error},
{ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error},
{ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error},
{ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest},
{ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest},
{ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistent_da},
{ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove},
{ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove},
{ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistent_da},
{ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistent_da},
{ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistent_da},
{ST_L2_7, EV_L1_DEACTIVATE, l2_persistent_da},
{ST_L2_8, EV_L1_DEACTIVATE, l2_persistent_da},
};
static int
ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
{
u_char *datap = skb->data;
int ret = -EINVAL;
int psapi, ptei;
u_int l;
int c = 0;
l = l2addrsize(l2);
if (skb->len <= l) {
mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *) 'N');
return ret;
}
if (test_bit(FLG_LAPD, &l2->flag)) { /* Maybe not needed */
psapi = *datap++;
ptei = *datap++;
if ((psapi & 1) || !(ptei & 1)) {
printk(KERN_WARNING
"%s l2 D-channel frame wrong EA0/EA1\n",
mISDNDevName4ch(&l2->ch));
return ret;
}
psapi >>= 2;
ptei >>= 1;
if (psapi != l2->sapi) {
/* not our business */
if (*debug & DEBUG_L2)
printk(KERN_DEBUG "%s: sapi %d/%d mismatch\n",
mISDNDevName4ch(&l2->ch), psapi,
l2->sapi);
dev_kfree_skb(skb);
return 0;
}
if ((ptei != l2->tei) && (ptei != GROUP_TEI)) {
/* not our business */
if (*debug & DEBUG_L2)
printk(KERN_DEBUG "%s: tei %d/%d mismatch\n",
mISDNDevName4ch(&l2->ch), ptei, l2->tei);
dev_kfree_skb(skb);
return 0;
}
} else
datap += l;
if (!(*datap & 1)) { /* I-Frame */
c = iframe_error(l2, skb);
if (!c)
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_I, skb);
} else if (IsSFrame(datap, l2)) { /* S-Frame */
c = super_error(l2, skb);
if (!c)
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SUPER, skb);
} else if (IsUI(datap)) {
c = UI_error(l2, skb);
if (!c)
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UI, skb);
} else if (IsSABME(datap, l2)) {
c = unnum_error(l2, skb, CMD);
if (!c)
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SABME, skb);
} else if (IsUA(datap)) {
c = unnum_error(l2, skb, RSP);
if (!c)
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UA, skb);
} else if (IsDISC(datap)) {
c = unnum_error(l2, skb, CMD);
if (!c)
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DISC, skb);
} else if (IsDM(datap)) {
c = unnum_error(l2, skb, RSP);
if (!c)
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DM, skb);
} else if (IsFRMR(datap)) {
c = FRMR_error(l2, skb);
if (!c)
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_FRMR, skb);
} else
c = 'L';
if (c) {
printk(KERN_WARNING "%s:l2 D-channel frame error %c\n",
mISDNDevName4ch(&l2->ch), c);
mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
}
return ret;
}
static int
l2_send(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct layer2 *l2 = container_of(ch, struct layer2, ch);
struct mISDNhead *hh = mISDN_HEAD_P(skb);
int ret = -EINVAL;
if (*debug & DEBUG_L2_RECV)
printk(KERN_DEBUG "%s: %s prim(%x) id(%x) sapi(%d) tei(%d)\n",
__func__, mISDNDevName4ch(&l2->ch), hh->prim, hh->id,
l2->sapi, l2->tei);
if (hh->prim == DL_INTERN_MSG) {
struct mISDNhead *chh = hh + 1; /* saved copy */
*hh = *chh;
if (*debug & DEBUG_L2_RECV)
printk(KERN_DEBUG "%s: prim(%x) id(%x) internal msg\n",
mISDNDevName4ch(&l2->ch), hh->prim, hh->id);
}
switch (hh->prim) {
case PH_DATA_IND:
ret = ph_data_indication(l2, hh, skb);
break;
case PH_DATA_CNF:
ret = ph_data_confirm(l2, hh, skb);
break;
case PH_ACTIVATE_IND:
test_and_set_bit(FLG_L1_ACTIV, &l2->flag);
l2up_create(l2, MPH_ACTIVATE_IND, 0, NULL);
if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
ret = mISDN_FsmEvent(&l2->l2m,
EV_L2_DL_ESTABLISH_REQ, skb);
break;
case PH_DEACTIVATE_IND:
test_and_clear_bit(FLG_L1_ACTIV, &l2->flag);
l2up_create(l2, MPH_DEACTIVATE_IND, 0, NULL);
ret = mISDN_FsmEvent(&l2->l2m, EV_L1_DEACTIVATE, skb);
break;
case MPH_INFORMATION_IND:
if (!l2->up)
break;
ret = l2->up->send(l2->up, skb);
break;
case DL_DATA_REQ:
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_DATA, skb);
break;
case DL_UNITDATA_REQ:
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_UNITDATA, skb);
break;
case DL_ESTABLISH_REQ:
if (test_bit(FLG_LAPB, &l2->flag))
test_and_set_bit(FLG_ORIG, &l2->flag);
if (test_bit(FLG_L1_ACTIV, &l2->flag)) {
if (test_bit(FLG_LAPD, &l2->flag) ||
test_bit(FLG_ORIG, &l2->flag))
ret = mISDN_FsmEvent(&l2->l2m,
EV_L2_DL_ESTABLISH_REQ, skb);
} else {
if (test_bit(FLG_LAPD, &l2->flag) ||
test_bit(FLG_ORIG, &l2->flag)) {
test_and_set_bit(FLG_ESTAB_PEND,
&l2->flag);
}
ret = l2down(l2, PH_ACTIVATE_REQ, l2_newid(l2),
skb);
}
break;
case DL_RELEASE_REQ:
if (test_bit(FLG_LAPB, &l2->flag))
l2down_create(l2, PH_DEACTIVATE_REQ,
l2_newid(l2), 0, NULL);
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ,
skb);
break;
case DL_TIMER200_IND:
mISDN_FsmEvent(&l2->l2m, EV_L2_T200I, NULL);
break;
case DL_TIMER203_IND:
mISDN_FsmEvent(&l2->l2m, EV_L2_T203I, NULL);
break;
default:
if (*debug & DEBUG_L2)
l2m_debug(&l2->l2m, "l2 unknown pr %04x",
hh->prim);
}
if (ret) {
dev_kfree_skb(skb);
ret = 0;
}
return ret;
}
int
tei_l2(struct layer2 *l2, u_int cmd, u_long arg)
{
int ret = -EINVAL;
if (*debug & DEBUG_L2_TEI)
printk(KERN_DEBUG "%s: cmd(%x) in %s\n",
mISDNDevName4ch(&l2->ch), cmd, __func__);
switch (cmd) {
case (MDL_ASSIGN_REQ):
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg);
break;
case (MDL_REMOVE_REQ):
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_REMOVE, NULL);
break;
case (MDL_ERROR_IND):
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
break;
case (MDL_ERROR_RSP):
/* ETS 300-125 5.3.2.1 Test: TC13010 */
printk(KERN_NOTICE "%s: MDL_ERROR|REQ (tei_l2)\n",
mISDNDevName4ch(&l2->ch));
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
break;
}
return ret;
}
static void
release_l2(struct layer2 *l2)
{
mISDN_FsmDelTimer(&l2->t200, 21);
mISDN_FsmDelTimer(&l2->t203, 16);
skb_queue_purge(&l2->i_queue);
skb_queue_purge(&l2->ui_queue);
skb_queue_purge(&l2->down_queue);
ReleaseWin(l2);
if (test_bit(FLG_LAPD, &l2->flag)) {
TEIrelease(l2);
if (l2->ch.st)
l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D,
CLOSE_CHANNEL, NULL);
}
kfree(l2);
}
static int
l2_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
{
struct layer2 *l2 = container_of(ch, struct layer2, ch);
u_int info;
if (*debug & DEBUG_L2_CTRL)
printk(KERN_DEBUG "%s: %s cmd(%x)\n",
mISDNDevName4ch(ch), __func__, cmd);
switch (cmd) {
case OPEN_CHANNEL:
if (test_bit(FLG_LAPD, &l2->flag)) {
set_channel_address(&l2->ch, l2->sapi, l2->tei);
info = DL_INFO_L2_CONNECT;
l2up_create(l2, DL_INFORMATION_IND,
sizeof(info), &info);
}
break;
case CLOSE_CHANNEL:
if (l2->ch.peer)
l2->ch.peer->ctrl(l2->ch.peer, CLOSE_CHANNEL, NULL);
release_l2(l2);
break;
}
return 0;
}
struct layer2 *
create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei,
int sapi)
{
struct layer2 *l2;
struct channel_req rq;
l2 = kzalloc(sizeof(struct layer2), GFP_KERNEL);
if (!l2) {
printk(KERN_ERR "kzalloc layer2 failed\n");
return NULL;
}
l2->next_id = 1;
l2->down_id = MISDN_ID_NONE;
l2->up = ch;
l2->ch.st = ch->st;
l2->ch.send = l2_send;
l2->ch.ctrl = l2_ctrl;
switch (protocol) {
case ISDN_P_LAPD_NT:
test_and_set_bit(FLG_LAPD, &l2->flag);
test_and_set_bit(FLG_LAPD_NET, &l2->flag);
test_and_set_bit(FLG_MOD128, &l2->flag);
l2->sapi = sapi;
l2->maxlen = MAX_DFRAME_LEN;
if (test_bit(OPTION_L2_PMX, &options))
l2->window = 7;
else
l2->window = 1;
if (test_bit(OPTION_L2_PTP, &options))
test_and_set_bit(FLG_PTP, &l2->flag);
if (test_bit(OPTION_L2_FIXEDTEI, &options))
test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
l2->tei = tei;
l2->T200 = 1000;
l2->N200 = 3;
l2->T203 = 10000;
if (test_bit(OPTION_L2_PMX, &options))
rq.protocol = ISDN_P_NT_E1;
else
rq.protocol = ISDN_P_NT_S0;
rq.adr.channel = 0;
l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
break;
case ISDN_P_LAPD_TE:
test_and_set_bit(FLG_LAPD, &l2->flag);
test_and_set_bit(FLG_MOD128, &l2->flag);
test_and_set_bit(FLG_ORIG, &l2->flag);
l2->sapi = sapi;
l2->maxlen = MAX_DFRAME_LEN;
if (test_bit(OPTION_L2_PMX, &options))
l2->window = 7;
else
l2->window = 1;
if (test_bit(OPTION_L2_PTP, &options))
test_and_set_bit(FLG_PTP, &l2->flag);
if (test_bit(OPTION_L2_FIXEDTEI, &options))
test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
l2->tei = tei;
l2->T200 = 1000;
l2->N200 = 3;
l2->T203 = 10000;
if (test_bit(OPTION_L2_PMX, &options))
rq.protocol = ISDN_P_TE_E1;
else
rq.protocol = ISDN_P_TE_S0;
rq.adr.channel = 0;
l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
break;
case ISDN_P_B_X75SLP:
test_and_set_bit(FLG_LAPB, &l2->flag);
l2->window = 7;
l2->maxlen = MAX_DATA_SIZE;
l2->T200 = 1000;
l2->N200 = 4;
l2->T203 = 5000;
l2->addr.A = 3;
l2->addr.B = 1;
break;
default:
printk(KERN_ERR "layer2 create failed prt %x\n",
protocol);
kfree(l2);
return NULL;
}
skb_queue_head_init(&l2->i_queue);
skb_queue_head_init(&l2->ui_queue);
skb_queue_head_init(&l2->down_queue);
skb_queue_head_init(&l2->tmp_queue);
InitWin(l2);
l2->l2m.fsm = &l2fsm;
if (test_bit(FLG_LAPB, &l2->flag) ||
test_bit(FLG_FIXED_TEI, &l2->flag) ||
test_bit(FLG_LAPD_NET, &l2->flag))
l2->l2m.state = ST_L2_4;
else
l2->l2m.state = ST_L2_1;
l2->l2m.debug = *debug;
l2->l2m.userdata = l2;
l2->l2m.userint = 0;
l2->l2m.printdebug = l2m_debug;
mISDN_FsmInitTimer(&l2->l2m, &l2->t200);
mISDN_FsmInitTimer(&l2->l2m, &l2->t203);
return l2;
}
static int
x75create(struct channel_req *crq)
{
struct layer2 *l2;
if (crq->protocol != ISDN_P_B_X75SLP)
return -EPROTONOSUPPORT;
l2 = create_l2(crq->ch, crq->protocol, 0, 0, 0);
if (!l2)
return -ENOMEM;
crq->ch = &l2->ch;
crq->protocol = ISDN_P_B_HDLC;
return 0;
}
static struct Bprotocol X75SLP = {
.Bprotocols = (1 << (ISDN_P_B_X75SLP & ISDN_P_B_MASK)),
.name = "X75SLP",
.create = x75create
};
int
Isdnl2_Init(u_int *deb)
{
int res;
debug = deb;
mISDN_register_Bprotocol(&X75SLP);
l2fsm.state_count = L2_STATE_COUNT;
l2fsm.event_count = L2_EVENT_COUNT;
l2fsm.strEvent = strL2Event;
l2fsm.strState = strL2State;
res = mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
if (res)
goto error;
res = TEIInit(deb);
if (res)
goto error_fsm;
return 0;
error_fsm:
mISDN_FsmFree(&l2fsm);
error:
mISDN_unregister_Bprotocol(&X75SLP);
return res;
}
void
Isdnl2_cleanup(void)
{
mISDN_unregister_Bprotocol(&X75SLP);
TEIFree();
mISDN_FsmFree(&l2fsm);
}
| linux-master | drivers/isdn/mISDN/layer2.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dsp_hwec.c:
* builtin mISDN dsp pipeline element for enabling the hw echocanceller
*
* Copyright (C) 2007, Nadi Sarrar
*
* Nadi Sarrar <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/mISDNdsp.h>
#include <linux/mISDNif.h>
#include "core.h"
#include "dsp.h"
#include "dsp_hwec.h"
static struct mISDN_dsp_element_arg args[] = {
{ "deftaps", "128", "Set the number of taps of cancellation." },
};
static struct mISDN_dsp_element dsp_hwec_p = {
.name = "hwec",
.new = NULL,
.free = NULL,
.process_tx = NULL,
.process_rx = NULL,
.num_args = ARRAY_SIZE(args),
.args = args,
};
struct mISDN_dsp_element *dsp_hwec = &dsp_hwec_p;
void dsp_hwec_enable(struct dsp *dsp, const char *arg)
{
int deftaps = 128,
len;
struct mISDN_ctrl_req cq;
if (!dsp) {
printk(KERN_ERR "%s: failed to enable hwec: dsp is NULL\n",
__func__);
return;
}
if (!arg)
goto _do;
len = strlen(arg);
if (!len)
goto _do;
{
char *dup, *tok, *name, *val;
int tmp;
dup = kstrdup(arg, GFP_ATOMIC);
if (!dup)
return;
while ((tok = strsep(&dup, ","))) {
if (!strlen(tok))
continue;
name = strsep(&tok, "=");
val = tok;
if (!val)
continue;
if (!strcmp(name, "deftaps")) {
if (sscanf(val, "%d", &tmp) == 1)
deftaps = tmp;
}
}
kfree(dup);
}
_do:
printk(KERN_DEBUG "%s: enabling hwec with deftaps=%d\n",
__func__, deftaps);
memset(&cq, 0, sizeof(cq));
cq.op = MISDN_CTRL_HFC_ECHOCAN_ON;
cq.p1 = deftaps;
if (!dsp->ch.peer->ctrl(&dsp->ch, CONTROL_CHANNEL, &cq)) {
printk(KERN_DEBUG "%s: CONTROL_CHANNEL failed\n",
__func__);
return;
}
}
void dsp_hwec_disable(struct dsp *dsp)
{
struct mISDN_ctrl_req cq;
if (!dsp) {
printk(KERN_ERR "%s: failed to disable hwec: dsp is NULL\n",
__func__);
return;
}
printk(KERN_DEBUG "%s: disabling hwec\n", __func__);
memset(&cq, 0, sizeof(cq));
cq.op = MISDN_CTRL_HFC_ECHOCAN_OFF;
if (!dsp->ch.peer->ctrl(&dsp->ch, CONTROL_CHANNEL, &cq)) {
printk(KERN_DEBUG "%s: CONTROL_CHANNEL failed\n",
__func__);
return;
}
}
int dsp_hwec_init(void)
{
mISDN_dsp_element_register(dsp_hwec);
return 0;
}
void dsp_hwec_exit(void)
{
mISDN_dsp_element_unregister(dsp_hwec);
}
| linux-master | drivers/isdn/mISDN/dsp_hwec.c |
/*
* Author Andreas Eversberg ([email protected])
* Based on source code structure by
* Karsten Keil ([email protected])
*
* This file is (c) under GNU PUBLIC LICENSE
*
* Thanks to Karsten Keil (great drivers)
* Cologne Chip (great chips)
*
* This module does:
* Real-time tone generation
* DTMF detection
* Real-time cross-connection and conferrence
* Compensate jitter due to system load and hardware fault.
* All features are done in kernel space and will be realized
* using hardware, if available and supported by chip set.
* Blowfish encryption/decryption
*/
/* STRUCTURE:
*
* The dsp module provides layer 2 for b-channels (64kbit). It provides
* transparent audio forwarding with special digital signal processing:
*
* - (1) generation of tones
* - (2) detection of dtmf tones
* - (3) crossconnecting and conferences (clocking)
* - (4) echo generation for delay test
* - (5) volume control
* - (6) disable receive data
* - (7) pipeline
* - (8) encryption/decryption
*
* Look:
* TX RX
* ------upper layer------
* | ^
* | |(6)
* v |
* +-----+-------------+-----+
* |(3)(4) |
* | CMX |
* | |
* | +-------------+
* | | ^
* | | |
* |+---------+| +----+----+
* ||(1) || |(2) |
* || || | |
* || Tones || | DTMF |
* || || | |
* || || | |
* |+----+----+| +----+----+
* +-----+-----+ ^
* | |
* v |
* +----+----+ +----+----+
* |(5) | |(5) |
* | | | |
* |TX Volume| |RX Volume|
* | | | |
* | | | |
* +----+----+ +----+----+
* | ^
* | |
* v |
* +----+-------------+----+
* |(7) |
* | |
* | Pipeline Processing |
* | |
* | |
* +----+-------------+----+
* | ^
* | |
* v |
* +----+----+ +----+----+
* |(8) | |(8) |
* | | | |
* | Encrypt | | Decrypt |
* | | | |
* | | | |
* +----+----+ +----+----+
* | ^
* | |
* v |
* ------card layer------
* TX RX
*
* Above you can see the logical data flow. If software is used to do the
* process, it is actually the real data flow. If hardware is used, data
* may not flow, but hardware commands to the card, to provide the data flow
* as shown.
*
* NOTE: The channel must be activated in order to make dsp work, even if
* no data flow to the upper layer is intended. Activation can be done
* after and before controlling the setting using PH_CONTROL requests.
*
* DTMF: Will be detected by hardware if possible. It is done before CMX
* processing.
*
* Tones: Will be generated via software if endless looped audio fifos are
* not supported by hardware. Tones will override all data from CMX.
* It is not required to join a conference to use tones at any time.
*
* CMX: Is transparent when not used. When it is used, it will do
* crossconnections and conferences via software if not possible through
* hardware. If hardware capability is available, hardware is used.
*
* Echo: Is generated by CMX and is used to check performance of hard and
* software CMX.
*
* The CMX has special functions for conferences with one, two and more
* members. It will allow different types of data flow. Receive and transmit
* data to/form upper layer may be switched on/off individually without losing
* features of CMX, Tones and DTMF.
*
* Echo Cancellation: Sometimes we like to cancel echo from the interface.
* Note that a VoIP call may not have echo caused by the IP phone. The echo
* is generated by the telephone line connected to it. Because the delay
* is high, it becomes an echo. RESULT: Echo Cachelation is required if
* both echo AND delay is applied to an interface.
* Remember that software CMX always generates a more or less delay.
*
* If all used features can be realized in hardware, and if transmit and/or
* receive data ist disabled, the card may not send/receive any data at all.
* Not receiving is useful if only announcements are played. Not sending is
* useful if an answering machine records audio. Not sending and receiving is
* useful during most states of the call. If supported by hardware, tones
* will be played without cpu load. Small PBXs and NT-Mode applications will
* not need expensive hardware when processing calls.
*
*
* LOCKING:
*
* When data is received from upper or lower layer (card), the complete dsp
* module is locked by a global lock. This lock MUST lock irq, because it
* must lock timer events by DSP poll timer.
* When data is ready to be transmitted down, the data is queued and sent
* outside lock and timer event.
* PH_CONTROL must not change any settings, join or split conference members
* during process of data.
*
* HDLC:
*
* It works quite the same as transparent, except that HDLC data is forwarded
* to all other conference members if no hardware bridging is possible.
* Send data will be writte to sendq. Sendq will be sent if confirm is received.
* Conference cannot join, if one member is not hdlc.
*
*/
#include <linux/delay.h>
#include <linux/gfp.h>
#include <linux/mISDNif.h>
#include <linux/mISDNdsp.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include "core.h"
#include "dsp.h"
static const char *mISDN_dsp_revision = "2.0";
static int debug;
static int options;
static int poll;
static int dtmfthreshold = 100;
MODULE_AUTHOR("Andreas Eversberg");
module_param(debug, uint, S_IRUGO | S_IWUSR);
module_param(options, uint, S_IRUGO | S_IWUSR);
module_param(poll, uint, S_IRUGO | S_IWUSR);
module_param(dtmfthreshold, uint, S_IRUGO | S_IWUSR);
MODULE_LICENSE("GPL");
/*int spinnest = 0;*/
DEFINE_SPINLOCK(dsp_lock); /* global dsp lock */
LIST_HEAD(dsp_ilist);
LIST_HEAD(conf_ilist);
int dsp_debug;
int dsp_options;
int dsp_poll, dsp_tics;
/* check if rx may be turned off or must be turned on */
static void
dsp_rx_off_member(struct dsp *dsp)
{
struct mISDN_ctrl_req cq;
int rx_off = 1;
memset(&cq, 0, sizeof(cq));
if (!dsp->features_rx_off)
return;
/* not disabled */
if (!dsp->rx_disabled)
rx_off = 0;
/* software dtmf */
else if (dsp->dtmf.software)
rx_off = 0;
/* echo in software */
else if (dsp->echo.software)
rx_off = 0;
/* bridge in software */
else if (dsp->conf && dsp->conf->software)
rx_off = 0;
/* data is not required by user space and not required
* for echo dtmf detection, soft-echo, soft-bridging */
if (rx_off == dsp->rx_is_off)
return;
if (!dsp->ch.peer) {
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: no peer, no rx_off\n",
__func__);
return;
}
cq.op = MISDN_CTRL_RX_OFF;
cq.p1 = rx_off;
if (dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq)) {
printk(KERN_DEBUG "%s: 2nd CONTROL_CHANNEL failed\n",
__func__);
return;
}
dsp->rx_is_off = rx_off;
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: %s set rx_off = %d\n",
__func__, dsp->name, rx_off);
}
static void
dsp_rx_off(struct dsp *dsp)
{
struct dsp_conf_member *member;
if (dsp_options & DSP_OPT_NOHARDWARE)
return;
/* no conf */
if (!dsp->conf) {
dsp_rx_off_member(dsp);
return;
}
/* check all members in conf */
list_for_each_entry(member, &dsp->conf->mlist, list) {
dsp_rx_off_member(member->dsp);
}
}
/* enable "fill empty" feature */
static void
dsp_fill_empty(struct dsp *dsp)
{
struct mISDN_ctrl_req cq;
memset(&cq, 0, sizeof(cq));
if (!dsp->ch.peer) {
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: no peer, no fill_empty\n",
__func__);
return;
}
cq.op = MISDN_CTRL_FILL_EMPTY;
cq.p1 = 1;
cq.p2 = dsp_silence;
if (dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq)) {
printk(KERN_DEBUG "%s: CONTROL_CHANNEL failed\n",
__func__);
return;
}
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: %s set fill_empty = 1\n",
__func__, dsp->name);
}
static int
dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb)
{
struct sk_buff *nskb;
int ret = 0;
int cont;
u8 *data;
int len;
if (skb->len < sizeof(int)) {
printk(KERN_ERR "%s: PH_CONTROL message too short\n", __func__);
return -EINVAL;
}
cont = *((int *)skb->data);
len = skb->len - sizeof(int);
data = skb->data + sizeof(int);
switch (cont) {
case DTMF_TONE_START: /* turn on DTMF */
if (dsp->hdlc) {
ret = -EINVAL;
break;
}
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: start dtmf\n", __func__);
if (len == sizeof(int)) {
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_NOTICE "changing DTMF Threshold "
"to %d\n", *((int *)data));
dsp->dtmf.treshold = (*(int *)data) * 10000;
}
dsp->dtmf.enable = 1;
/* init goertzel */
dsp_dtmf_goertzel_init(dsp);
/* check dtmf hardware */
dsp_dtmf_hardware(dsp);
dsp_rx_off(dsp);
break;
case DTMF_TONE_STOP: /* turn off DTMF */
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: stop dtmf\n", __func__);
dsp->dtmf.enable = 0;
dsp->dtmf.hardware = 0;
dsp->dtmf.software = 0;
break;
case DSP_CONF_JOIN: /* join / update conference */
if (len < sizeof(int)) {
ret = -EINVAL;
break;
}
if (*((u32 *)data) == 0)
goto conf_split;
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: join conference %d\n",
__func__, *((u32 *)data));
ret = dsp_cmx_conf(dsp, *((u32 *)data));
/* dsp_cmx_hardware will also be called here */
dsp_rx_off(dsp);
if (dsp_debug & DEBUG_DSP_CMX)
dsp_cmx_debug(dsp);
break;
case DSP_CONF_SPLIT: /* remove from conference */
conf_split:
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: release conference\n", __func__);
ret = dsp_cmx_conf(dsp, 0);
/* dsp_cmx_hardware will also be called here */
if (dsp_debug & DEBUG_DSP_CMX)
dsp_cmx_debug(dsp);
dsp_rx_off(dsp);
break;
case DSP_TONE_PATT_ON: /* play tone */
if (dsp->hdlc) {
ret = -EINVAL;
break;
}
if (len < sizeof(int)) {
ret = -EINVAL;
break;
}
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: turn tone 0x%x on\n",
__func__, *((int *)skb->data));
ret = dsp_tone(dsp, *((int *)data));
if (!ret) {
dsp_cmx_hardware(dsp->conf, dsp);
dsp_rx_off(dsp);
}
if (!dsp->tone.tone)
goto tone_off;
break;
case DSP_TONE_PATT_OFF: /* stop tone */
if (dsp->hdlc) {
ret = -EINVAL;
break;
}
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: turn tone off\n", __func__);
dsp_tone(dsp, 0);
dsp_cmx_hardware(dsp->conf, dsp);
dsp_rx_off(dsp);
/* reset tx buffers (user space data) */
tone_off:
dsp->rx_W = 0;
dsp->rx_R = 0;
break;
case DSP_VOL_CHANGE_TX: /* change volume */
if (dsp->hdlc) {
ret = -EINVAL;
break;
}
if (len < sizeof(int)) {
ret = -EINVAL;
break;
}
dsp->tx_volume = *((int *)data);
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: change tx vol to %d\n",
__func__, dsp->tx_volume);
dsp_cmx_hardware(dsp->conf, dsp);
dsp_dtmf_hardware(dsp);
dsp_rx_off(dsp);
break;
case DSP_VOL_CHANGE_RX: /* change volume */
if (dsp->hdlc) {
ret = -EINVAL;
break;
}
if (len < sizeof(int)) {
ret = -EINVAL;
break;
}
dsp->rx_volume = *((int *)data);
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: change rx vol to %d\n",
__func__, dsp->tx_volume);
dsp_cmx_hardware(dsp->conf, dsp);
dsp_dtmf_hardware(dsp);
dsp_rx_off(dsp);
break;
case DSP_ECHO_ON: /* enable echo */
dsp->echo.software = 1; /* soft echo */
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: enable cmx-echo\n", __func__);
dsp_cmx_hardware(dsp->conf, dsp);
dsp_rx_off(dsp);
if (dsp_debug & DEBUG_DSP_CMX)
dsp_cmx_debug(dsp);
break;
case DSP_ECHO_OFF: /* disable echo */
dsp->echo.software = 0;
dsp->echo.hardware = 0;
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: disable cmx-echo\n", __func__);
dsp_cmx_hardware(dsp->conf, dsp);
dsp_rx_off(dsp);
if (dsp_debug & DEBUG_DSP_CMX)
dsp_cmx_debug(dsp);
break;
case DSP_RECEIVE_ON: /* enable receive to user space */
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: enable receive to user "
"space\n", __func__);
dsp->rx_disabled = 0;
dsp_rx_off(dsp);
break;
case DSP_RECEIVE_OFF: /* disable receive to user space */
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: disable receive to "
"user space\n", __func__);
dsp->rx_disabled = 1;
dsp_rx_off(dsp);
break;
case DSP_MIX_ON: /* enable mixing of tx data */
if (dsp->hdlc) {
ret = -EINVAL;
break;
}
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: enable mixing of "
"tx-data with conf members\n", __func__);
dsp->tx_mix = 1;
dsp_cmx_hardware(dsp->conf, dsp);
dsp_rx_off(dsp);
if (dsp_debug & DEBUG_DSP_CMX)
dsp_cmx_debug(dsp);
break;
case DSP_MIX_OFF: /* disable mixing of tx data */
if (dsp->hdlc) {
ret = -EINVAL;
break;
}
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: disable mixing of "
"tx-data with conf members\n", __func__);
dsp->tx_mix = 0;
dsp_cmx_hardware(dsp->conf, dsp);
dsp_rx_off(dsp);
if (dsp_debug & DEBUG_DSP_CMX)
dsp_cmx_debug(dsp);
break;
case DSP_TXDATA_ON: /* enable txdata */
dsp->tx_data = 1;
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: enable tx-data\n", __func__);
dsp_cmx_hardware(dsp->conf, dsp);
dsp_rx_off(dsp);
if (dsp_debug & DEBUG_DSP_CMX)
dsp_cmx_debug(dsp);
break;
case DSP_TXDATA_OFF: /* disable txdata */
dsp->tx_data = 0;
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: disable tx-data\n", __func__);
dsp_cmx_hardware(dsp->conf, dsp);
dsp_rx_off(dsp);
if (dsp_debug & DEBUG_DSP_CMX)
dsp_cmx_debug(dsp);
break;
case DSP_DELAY: /* use delay algorithm instead of dynamic
jitter algorithm */
if (dsp->hdlc) {
ret = -EINVAL;
break;
}
if (len < sizeof(int)) {
ret = -EINVAL;
break;
}
dsp->cmx_delay = (*((int *)data)) << 3;
/* milliseconds to samples */
if (dsp->cmx_delay >= (CMX_BUFF_HALF >> 1))
/* clip to half of maximum usable buffer
(half of half buffer) */
dsp->cmx_delay = (CMX_BUFF_HALF >> 1) - 1;
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: use delay algorithm to "
"compensate jitter (%d samples)\n",
__func__, dsp->cmx_delay);
break;
case DSP_JITTER: /* use dynamic jitter algorithm instead of
delay algorithm */
if (dsp->hdlc) {
ret = -EINVAL;
break;
}
dsp->cmx_delay = 0;
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: use jitter algorithm to "
"compensate jitter\n", __func__);
break;
case DSP_TX_DEJITTER: /* use dynamic jitter algorithm for tx-buffer */
if (dsp->hdlc) {
ret = -EINVAL;
break;
}
dsp->tx_dejitter = 1;
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: use dejitter on TX "
"buffer\n", __func__);
break;
case DSP_TX_DEJ_OFF: /* use tx-buffer without dejittering*/
if (dsp->hdlc) {
ret = -EINVAL;
break;
}
dsp->tx_dejitter = 0;
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: use TX buffer without "
"dejittering\n", __func__);
break;
case DSP_PIPELINE_CFG:
if (dsp->hdlc) {
ret = -EINVAL;
break;
}
if (len > 0 && ((char *)data)[len - 1]) {
printk(KERN_DEBUG "%s: pipeline config string "
"is not NULL terminated!\n", __func__);
ret = -EINVAL;
} else {
dsp->pipeline.inuse = 1;
dsp_cmx_hardware(dsp->conf, dsp);
ret = dsp_pipeline_build(&dsp->pipeline,
len > 0 ? data : NULL);
dsp_cmx_hardware(dsp->conf, dsp);
dsp_rx_off(dsp);
}
break;
case DSP_BF_ENABLE_KEY: /* turn blowfish on */
if (dsp->hdlc) {
ret = -EINVAL;
break;
}
if (len < 4 || len > 56) {
ret = -EINVAL;
break;
}
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: turn blowfish on (key "
"not shown)\n", __func__);
ret = dsp_bf_init(dsp, (u8 *)data, len);
/* set new cont */
if (!ret)
cont = DSP_BF_ACCEPT;
else
cont = DSP_BF_REJECT;
/* send indication if it worked to set it */
nskb = _alloc_mISDN_skb(PH_CONTROL_IND, MISDN_ID_ANY,
sizeof(int), &cont, GFP_ATOMIC);
if (nskb) {
if (dsp->up) {
if (dsp->up->send(dsp->up, nskb))
dev_kfree_skb(nskb);
} else
dev_kfree_skb(nskb);
}
if (!ret) {
dsp_cmx_hardware(dsp->conf, dsp);
dsp_dtmf_hardware(dsp);
dsp_rx_off(dsp);
}
break;
case DSP_BF_DISABLE: /* turn blowfish off */
if (dsp->hdlc) {
ret = -EINVAL;
break;
}
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: turn blowfish off\n", __func__);
dsp_bf_cleanup(dsp);
dsp_cmx_hardware(dsp->conf, dsp);
dsp_dtmf_hardware(dsp);
dsp_rx_off(dsp);
break;
default:
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: ctrl req %x unhandled\n",
__func__, cont);
ret = -EINVAL;
}
return ret;
}
static void
get_features(struct mISDNchannel *ch)
{
struct dsp *dsp = container_of(ch, struct dsp, ch);
struct mISDN_ctrl_req cq;
if (!ch->peer) {
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: no peer, no features\n",
__func__);
return;
}
memset(&cq, 0, sizeof(cq));
cq.op = MISDN_CTRL_GETOP;
if (ch->peer->ctrl(ch->peer, CONTROL_CHANNEL, &cq) < 0) {
printk(KERN_DEBUG "%s: CONTROL_CHANNEL failed\n",
__func__);
return;
}
if (cq.op & MISDN_CTRL_RX_OFF)
dsp->features_rx_off = 1;
if (cq.op & MISDN_CTRL_FILL_EMPTY)
dsp->features_fill_empty = 1;
if (dsp_options & DSP_OPT_NOHARDWARE)
return;
if ((cq.op & MISDN_CTRL_HW_FEATURES_OP)) {
cq.op = MISDN_CTRL_HW_FEATURES;
*((u_long *)&cq.p1) = (u_long)&dsp->features;
if (ch->peer->ctrl(ch->peer, CONTROL_CHANNEL, &cq)) {
printk(KERN_DEBUG "%s: 2nd CONTROL_CHANNEL failed\n",
__func__);
}
} else
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: features not supported for %s\n",
__func__, dsp->name);
}
static int
dsp_function(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct dsp *dsp = container_of(ch, struct dsp, ch);
struct mISDNhead *hh;
int ret = 0;
u8 *digits = NULL;
u_long flags;
hh = mISDN_HEAD_P(skb);
switch (hh->prim) {
/* FROM DOWN */
case (PH_DATA_CNF):
dsp->data_pending = 0;
/* trigger next hdlc frame, if any */
if (dsp->hdlc) {
spin_lock_irqsave(&dsp_lock, flags);
if (dsp->b_active)
schedule_work(&dsp->workq);
spin_unlock_irqrestore(&dsp_lock, flags);
}
break;
case (PH_DATA_IND):
case (DL_DATA_IND):
if (skb->len < 1) {
ret = -EINVAL;
break;
}
if (dsp->rx_is_off) {
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: rx-data during rx_off"
" for %s\n",
__func__, dsp->name);
}
if (dsp->hdlc) {
/* hdlc */
spin_lock_irqsave(&dsp_lock, flags);
dsp_cmx_hdlc(dsp, skb);
spin_unlock_irqrestore(&dsp_lock, flags);
if (dsp->rx_disabled) {
/* if receive is not allowed */
break;
}
hh->prim = DL_DATA_IND;
if (dsp->up)
return dsp->up->send(dsp->up, skb);
break;
}
spin_lock_irqsave(&dsp_lock, flags);
/* decrypt if enabled */
if (dsp->bf_enable)
dsp_bf_decrypt(dsp, skb->data, skb->len);
/* pipeline */
if (dsp->pipeline.inuse)
dsp_pipeline_process_rx(&dsp->pipeline, skb->data,
skb->len, hh->id);
/* change volume if requested */
if (dsp->rx_volume)
dsp_change_volume(skb, dsp->rx_volume);
/* check if dtmf soft decoding is turned on */
if (dsp->dtmf.software) {
digits = dsp_dtmf_goertzel_decode(dsp, skb->data,
skb->len, (dsp_options & DSP_OPT_ULAW) ? 1 : 0);
}
/* we need to process receive data if software */
if (dsp->conf && dsp->conf->software) {
/* process data from card at cmx */
dsp_cmx_receive(dsp, skb);
}
spin_unlock_irqrestore(&dsp_lock, flags);
/* send dtmf result, if any */
if (digits) {
while (*digits) {
int k;
struct sk_buff *nskb;
if (dsp_debug & DEBUG_DSP_DTMF)
printk(KERN_DEBUG "%s: digit"
"(%c) to layer %s\n",
__func__, *digits, dsp->name);
k = *digits | DTMF_TONE_VAL;
nskb = _alloc_mISDN_skb(PH_CONTROL_IND,
MISDN_ID_ANY, sizeof(int), &k,
GFP_ATOMIC);
if (nskb) {
if (dsp->up) {
if (dsp->up->send(
dsp->up, nskb))
dev_kfree_skb(nskb);
} else
dev_kfree_skb(nskb);
}
digits++;
}
}
if (dsp->rx_disabled) {
/* if receive is not allowed */
break;
}
hh->prim = DL_DATA_IND;
if (dsp->up)
return dsp->up->send(dsp->up, skb);
break;
case (PH_CONTROL_IND):
if (dsp_debug & DEBUG_DSP_DTMFCOEFF)
printk(KERN_DEBUG "%s: PH_CONTROL INDICATION "
"received: %x (len %d) %s\n", __func__,
hh->id, skb->len, dsp->name);
switch (hh->id) {
case (DTMF_HFC_COEF): /* getting coefficients */
if (!dsp->dtmf.hardware) {
if (dsp_debug & DEBUG_DSP_DTMFCOEFF)
printk(KERN_DEBUG "%s: ignoring DTMF "
"coefficients from HFC\n",
__func__);
break;
}
digits = dsp_dtmf_goertzel_decode(dsp, skb->data,
skb->len, 2);
while (*digits) {
int k;
struct sk_buff *nskb;
if (dsp_debug & DEBUG_DSP_DTMF)
printk(KERN_DEBUG "%s: digit"
"(%c) to layer %s\n",
__func__, *digits, dsp->name);
k = *digits | DTMF_TONE_VAL;
nskb = _alloc_mISDN_skb(PH_CONTROL_IND,
MISDN_ID_ANY, sizeof(int), &k,
GFP_ATOMIC);
if (nskb) {
if (dsp->up) {
if (dsp->up->send(
dsp->up, nskb))
dev_kfree_skb(nskb);
} else
dev_kfree_skb(nskb);
}
digits++;
}
break;
case (HFC_VOL_CHANGE_TX): /* change volume */
if (skb->len != sizeof(int)) {
ret = -EINVAL;
break;
}
spin_lock_irqsave(&dsp_lock, flags);
dsp->tx_volume = *((int *)skb->data);
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: change tx volume to "
"%d\n", __func__, dsp->tx_volume);
dsp_cmx_hardware(dsp->conf, dsp);
dsp_dtmf_hardware(dsp);
dsp_rx_off(dsp);
spin_unlock_irqrestore(&dsp_lock, flags);
break;
default:
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: ctrl ind %x unhandled "
"%s\n", __func__, hh->id, dsp->name);
ret = -EINVAL;
}
break;
case (PH_ACTIVATE_IND):
case (PH_ACTIVATE_CNF):
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: b_channel is now active %s\n",
__func__, dsp->name);
/* bchannel now active */
spin_lock_irqsave(&dsp_lock, flags);
dsp->b_active = 1;
dsp->data_pending = 0;
dsp->rx_init = 1;
/* rx_W and rx_R will be adjusted on first frame */
dsp->rx_W = 0;
dsp->rx_R = 0;
memset(dsp->rx_buff, 0, sizeof(dsp->rx_buff));
dsp_cmx_hardware(dsp->conf, dsp);
dsp_dtmf_hardware(dsp);
dsp_rx_off(dsp);
spin_unlock_irqrestore(&dsp_lock, flags);
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: done with activation, sending "
"confirm to user space. %s\n", __func__,
dsp->name);
/* send activation to upper layer */
hh->prim = DL_ESTABLISH_CNF;
if (dsp->up)
return dsp->up->send(dsp->up, skb);
break;
case (PH_DEACTIVATE_IND):
case (PH_DEACTIVATE_CNF):
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: b_channel is now inactive %s\n",
__func__, dsp->name);
/* bchannel now inactive */
spin_lock_irqsave(&dsp_lock, flags);
dsp->b_active = 0;
dsp->data_pending = 0;
dsp_cmx_hardware(dsp->conf, dsp);
dsp_rx_off(dsp);
spin_unlock_irqrestore(&dsp_lock, flags);
hh->prim = DL_RELEASE_CNF;
if (dsp->up)
return dsp->up->send(dsp->up, skb);
break;
/* FROM UP */
case (DL_DATA_REQ):
case (PH_DATA_REQ):
if (skb->len < 1) {
ret = -EINVAL;
break;
}
if (dsp->hdlc) {
/* hdlc */
if (!dsp->b_active) {
ret = -EIO;
break;
}
hh->prim = PH_DATA_REQ;
spin_lock_irqsave(&dsp_lock, flags);
skb_queue_tail(&dsp->sendq, skb);
schedule_work(&dsp->workq);
spin_unlock_irqrestore(&dsp_lock, flags);
return 0;
}
/* send data to tx-buffer (if no tone is played) */
if (!dsp->tone.tone) {
spin_lock_irqsave(&dsp_lock, flags);
dsp_cmx_transmit(dsp, skb);
spin_unlock_irqrestore(&dsp_lock, flags);
}
break;
case (PH_CONTROL_REQ):
spin_lock_irqsave(&dsp_lock, flags);
ret = dsp_control_req(dsp, hh, skb);
spin_unlock_irqrestore(&dsp_lock, flags);
break;
case (DL_ESTABLISH_REQ):
case (PH_ACTIVATE_REQ):
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: activating b_channel %s\n",
__func__, dsp->name);
if (dsp->dtmf.hardware || dsp->dtmf.software)
dsp_dtmf_goertzel_init(dsp);
get_features(ch);
/* enable fill_empty feature */
if (dsp->features_fill_empty)
dsp_fill_empty(dsp);
/* send ph_activate */
hh->prim = PH_ACTIVATE_REQ;
if (ch->peer)
return ch->recv(ch->peer, skb);
break;
case (DL_RELEASE_REQ):
case (PH_DEACTIVATE_REQ):
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: releasing b_channel %s\n",
__func__, dsp->name);
spin_lock_irqsave(&dsp_lock, flags);
dsp->tone.tone = 0;
dsp->tone.hardware = 0;
dsp->tone.software = 0;
if (timer_pending(&dsp->tone.tl))
del_timer(&dsp->tone.tl);
if (dsp->conf)
dsp_cmx_conf(dsp, 0); /* dsp_cmx_hardware will also be
called here */
skb_queue_purge(&dsp->sendq);
spin_unlock_irqrestore(&dsp_lock, flags);
hh->prim = PH_DEACTIVATE_REQ;
if (ch->peer)
return ch->recv(ch->peer, skb);
break;
default:
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: msg %x unhandled %s\n",
__func__, hh->prim, dsp->name);
ret = -EINVAL;
}
if (!ret)
dev_kfree_skb(skb);
return ret;
}
static int
dsp_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
{
struct dsp *dsp = container_of(ch, struct dsp, ch);
u_long flags;
if (debug & DEBUG_DSP_CTRL)
printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd);
switch (cmd) {
case OPEN_CHANNEL:
break;
case CLOSE_CHANNEL:
if (dsp->ch.peer)
dsp->ch.peer->ctrl(dsp->ch.peer, CLOSE_CHANNEL, NULL);
/* wait until workqueue has finished,
* must lock here, or we may hit send-process currently
* queueing. */
spin_lock_irqsave(&dsp_lock, flags);
dsp->b_active = 0;
spin_unlock_irqrestore(&dsp_lock, flags);
/* MUST not be locked, because it waits until queue is done. */
cancel_work_sync(&dsp->workq);
spin_lock_irqsave(&dsp_lock, flags);
if (timer_pending(&dsp->tone.tl))
del_timer(&dsp->tone.tl);
skb_queue_purge(&dsp->sendq);
if (dsp_debug & DEBUG_DSP_CTRL)
printk(KERN_DEBUG "%s: releasing member %s\n",
__func__, dsp->name);
dsp->b_active = 0;
dsp_cmx_conf(dsp, 0); /* dsp_cmx_hardware will also be called
here */
dsp_pipeline_destroy(&dsp->pipeline);
if (dsp_debug & DEBUG_DSP_CTRL)
printk(KERN_DEBUG "%s: remove & destroy object %s\n",
__func__, dsp->name);
list_del(&dsp->list);
spin_unlock_irqrestore(&dsp_lock, flags);
if (dsp_debug & DEBUG_DSP_CTRL)
printk(KERN_DEBUG "%s: dsp instance released\n",
__func__);
vfree(dsp);
module_put(THIS_MODULE);
break;
}
return 0;
}
static void
dsp_send_bh(struct work_struct *work)
{
struct dsp *dsp = container_of(work, struct dsp, workq);
struct sk_buff *skb;
struct mISDNhead *hh;
if (dsp->hdlc && dsp->data_pending)
return; /* wait until data has been acknowledged */
/* send queued data */
while ((skb = skb_dequeue(&dsp->sendq))) {
/* in locked date, we must have still data in queue */
if (dsp->data_pending) {
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: fifo full %s, this is "
"no bug!\n", __func__, dsp->name);
/* flush transparent data, if not acked */
dev_kfree_skb(skb);
continue;
}
hh = mISDN_HEAD_P(skb);
if (hh->prim == DL_DATA_REQ) {
/* send packet up */
if (dsp->up) {
if (dsp->up->send(dsp->up, skb))
dev_kfree_skb(skb);
} else
dev_kfree_skb(skb);
} else {
/* send packet down */
if (dsp->ch.peer) {
dsp->data_pending = 1;
if (dsp->ch.recv(dsp->ch.peer, skb)) {
dev_kfree_skb(skb);
dsp->data_pending = 0;
}
} else
dev_kfree_skb(skb);
}
}
}
static int
dspcreate(struct channel_req *crq)
{
struct dsp *ndsp;
u_long flags;
if (crq->protocol != ISDN_P_B_L2DSP
&& crq->protocol != ISDN_P_B_L2DSPHDLC)
return -EPROTONOSUPPORT;
ndsp = vzalloc(sizeof(struct dsp));
if (!ndsp) {
printk(KERN_ERR "%s: vmalloc struct dsp failed\n", __func__);
return -ENOMEM;
}
if (dsp_debug & DEBUG_DSP_CTRL)
printk(KERN_DEBUG "%s: creating new dsp instance\n", __func__);
/* default enabled */
INIT_WORK(&ndsp->workq, (void *)dsp_send_bh);
skb_queue_head_init(&ndsp->sendq);
ndsp->ch.send = dsp_function;
ndsp->ch.ctrl = dsp_ctrl;
ndsp->up = crq->ch;
crq->ch = &ndsp->ch;
if (crq->protocol == ISDN_P_B_L2DSP) {
crq->protocol = ISDN_P_B_RAW;
ndsp->hdlc = 0;
} else {
crq->protocol = ISDN_P_B_HDLC;
ndsp->hdlc = 1;
}
if (!try_module_get(THIS_MODULE))
printk(KERN_WARNING "%s:cannot get module\n",
__func__);
sprintf(ndsp->name, "DSP_C%x(0x%p)",
ndsp->up->st->dev->id + 1, ndsp);
/* set frame size to start */
ndsp->features.hfc_id = -1; /* current PCM id */
ndsp->features.pcm_id = -1; /* current PCM id */
ndsp->pcm_slot_rx = -1; /* current CPM slot */
ndsp->pcm_slot_tx = -1;
ndsp->pcm_bank_rx = -1;
ndsp->pcm_bank_tx = -1;
ndsp->hfc_conf = -1; /* current conference number */
/* set tone timer */
timer_setup(&ndsp->tone.tl, dsp_tone_timeout, 0);
if (dtmfthreshold < 20 || dtmfthreshold > 500)
dtmfthreshold = 200;
ndsp->dtmf.treshold = dtmfthreshold * 10000;
/* init pipeline append to list */
spin_lock_irqsave(&dsp_lock, flags);
dsp_pipeline_init(&ndsp->pipeline);
list_add_tail(&ndsp->list, &dsp_ilist);
spin_unlock_irqrestore(&dsp_lock, flags);
return 0;
}
static struct Bprotocol DSP = {
.Bprotocols = (1 << (ISDN_P_B_L2DSP & ISDN_P_B_MASK))
| (1 << (ISDN_P_B_L2DSPHDLC & ISDN_P_B_MASK)),
.name = "dsp",
.create = dspcreate
};
static int __init dsp_init(void)
{
int err;
int tics;
printk(KERN_INFO "DSP module %s\n", mISDN_dsp_revision);
dsp_options = options;
dsp_debug = debug;
/* set packet size */
dsp_poll = poll;
if (dsp_poll) {
if (dsp_poll > MAX_POLL) {
printk(KERN_ERR "%s: Wrong poll value (%d), use %d "
"maximum.\n", __func__, poll, MAX_POLL);
err = -EINVAL;
return err;
}
if (dsp_poll < 8) {
printk(KERN_ERR "%s: Wrong poll value (%d), use 8 "
"minimum.\n", __func__, dsp_poll);
err = -EINVAL;
return err;
}
dsp_tics = poll * HZ / 8000;
if (dsp_tics * 8000 != poll * HZ) {
printk(KERN_INFO "mISDN_dsp: Cannot clock every %d "
"samples (0,125 ms). It is not a multiple of "
"%d HZ.\n", poll, HZ);
err = -EINVAL;
return err;
}
} else {
poll = 8;
while (poll <= MAX_POLL) {
tics = (poll * HZ) / 8000;
if (tics * 8000 == poll * HZ) {
dsp_tics = tics;
dsp_poll = poll;
if (poll >= 64)
break;
}
poll++;
}
}
if (dsp_poll == 0) {
printk(KERN_INFO "mISDN_dsp: There is no multiple of kernel "
"clock that equals exactly the duration of 8-256 "
"samples. (Choose kernel clock speed like 100, 250, "
"300, 1000)\n");
err = -EINVAL;
return err;
}
printk(KERN_INFO "mISDN_dsp: DSP clocks every %d samples. This equals "
"%d jiffies.\n", dsp_poll, dsp_tics);
/* init conversion tables */
dsp_audio_generate_law_tables();
dsp_silence = (dsp_options & DSP_OPT_ULAW) ? 0xff : 0x2a;
dsp_audio_law_to_s32 = (dsp_options & DSP_OPT_ULAW) ?
dsp_audio_ulaw_to_s32 : dsp_audio_alaw_to_s32;
dsp_audio_generate_s2law_table();
dsp_audio_generate_seven();
dsp_audio_generate_mix_table();
if (dsp_options & DSP_OPT_ULAW)
dsp_audio_generate_ulaw_samples();
dsp_audio_generate_volume_changes();
err = dsp_pipeline_module_init();
if (err) {
printk(KERN_ERR "mISDN_dsp: Can't initialize pipeline, "
"error(%d)\n", err);
return err;
}
err = mISDN_register_Bprotocol(&DSP);
if (err) {
printk(KERN_ERR "Can't register %s error(%d)\n", DSP.name, err);
return err;
}
/* set sample timer */
timer_setup(&dsp_spl_tl, dsp_cmx_send, 0);
dsp_spl_tl.expires = jiffies + dsp_tics;
dsp_spl_jiffies = dsp_spl_tl.expires;
add_timer(&dsp_spl_tl);
return 0;
}
static void __exit dsp_cleanup(void)
{
mISDN_unregister_Bprotocol(&DSP);
del_timer_sync(&dsp_spl_tl);
if (!list_empty(&dsp_ilist)) {
printk(KERN_ERR "mISDN_dsp: Audio DSP object inst list not "
"empty.\n");
}
if (!list_empty(&conf_ilist)) {
printk(KERN_ERR "mISDN_dsp: Conference list not empty. Not "
"all memory freed.\n");
}
dsp_pipeline_module_exit();
}
module_init(dsp_init);
module_exit(dsp_cleanup);
| linux-master | drivers/isdn/mISDN/dsp_core.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.