python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Alauda-based card readers
*
* Current development and maintenance by:
* (c) 2005 Daniel Drake <[email protected]>
*
* The 'Alauda' is a chip manufacturered by RATOC for OEM use.
*
* Alauda implements a vendor-specific command set to access two media reader
* ports (XD, SmartMedia). This driver converts SCSI commands to the commands
* which are accepted by these devices.
*
* The driver was developed through reverse-engineering, with the help of the
* sddr09 driver which has many similarities, and with some help from the
* (very old) vendor-supplied GPL sma03 driver.
*
* For protocol info, see http://alauda.sourceforge.net
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include "usb.h"
#include "transport.h"
#include "protocol.h"
#include "debug.h"
#include "scsiglue.h"
#define DRV_NAME "ums-alauda"
MODULE_DESCRIPTION("Driver for Alauda-based card readers");
MODULE_AUTHOR("Daniel Drake <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(USB_STORAGE);
/*
* Status bytes
*/
#define ALAUDA_STATUS_ERROR 0x01
#define ALAUDA_STATUS_READY 0x40
/*
* Control opcodes (for request field)
*/
#define ALAUDA_GET_XD_MEDIA_STATUS 0x08
#define ALAUDA_GET_SM_MEDIA_STATUS 0x98
#define ALAUDA_ACK_XD_MEDIA_CHANGE 0x0a
#define ALAUDA_ACK_SM_MEDIA_CHANGE 0x9a
#define ALAUDA_GET_XD_MEDIA_SIG 0x86
#define ALAUDA_GET_SM_MEDIA_SIG 0x96
/*
* Bulk command identity (byte 0)
*/
#define ALAUDA_BULK_CMD 0x40
/*
* Bulk opcodes (byte 1)
*/
#define ALAUDA_BULK_GET_REDU_DATA 0x85
#define ALAUDA_BULK_READ_BLOCK 0x94
#define ALAUDA_BULK_ERASE_BLOCK 0xa3
#define ALAUDA_BULK_WRITE_BLOCK 0xb4
#define ALAUDA_BULK_GET_STATUS2 0xb7
#define ALAUDA_BULK_RESET_MEDIA 0xe0
/*
* Port to operate on (byte 8)
*/
#define ALAUDA_PORT_XD 0x00
#define ALAUDA_PORT_SM 0x01
/*
* LBA and PBA are unsigned ints. Special values.
*/
#define UNDEF 0xffff
#define SPARE 0xfffe
#define UNUSABLE 0xfffd
struct alauda_media_info {
unsigned long capacity; /* total media size in bytes */
unsigned int pagesize; /* page size in bytes */
unsigned int blocksize; /* number of pages per block */
unsigned int uzonesize; /* number of usable blocks per zone */
unsigned int zonesize; /* number of blocks per zone */
unsigned int blockmask; /* mask to get page from address */
unsigned char pageshift;
unsigned char blockshift;
unsigned char zoneshift;
u16 **lba_to_pba; /* logical to physical block map */
u16 **pba_to_lba; /* physical to logical block map */
};
struct alauda_info {
struct alauda_media_info port[2];
int wr_ep; /* endpoint to write data out of */
unsigned char sense_key;
unsigned long sense_asc; /* additional sense code */
unsigned long sense_ascq; /* additional sense code qualifier */
};
#define short_pack(lsb,msb) ( ((u16)(lsb)) | ( ((u16)(msb))<<8 ) )
#define LSB_of(s) ((s)&0xFF)
#define MSB_of(s) ((s)>>8)
#define MEDIA_PORT(us) us->srb->device->lun
#define MEDIA_INFO(us) ((struct alauda_info *)us->extra)->port[MEDIA_PORT(us)]
#define PBA_LO(pba) ((pba & 0xF) << 5)
#define PBA_HI(pba) (pba >> 3)
#define PBA_ZONE(pba) (pba >> 11)
static int init_alauda(struct us_data *us);
/*
* The table of devices
*/
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
static struct usb_device_id alauda_usb_ids[] = {
# include "unusual_alauda.h"
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, alauda_usb_ids);
#undef UNUSUAL_DEV
/*
* The flags table
*/
#define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \
vendor_name, product_name, use_protocol, use_transport, \
init_function, Flags) \
{ \
.vendorName = vendor_name, \
.productName = product_name, \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
.initFunction = init_function, \
}
static struct us_unusual_dev alauda_unusual_dev_list[] = {
# include "unusual_alauda.h"
{ } /* Terminating entry */
};
#undef UNUSUAL_DEV
/*
* Media handling
*/
struct alauda_card_info {
unsigned char id; /* id byte */
unsigned char chipshift; /* 1<<cs bytes total capacity */
unsigned char pageshift; /* 1<<ps bytes in a page */
unsigned char blockshift; /* 1<<bs pages per block */
unsigned char zoneshift; /* 1<<zs blocks per zone */
};
static struct alauda_card_info alauda_card_ids[] = {
/* NAND flash */
{ 0x6e, 20, 8, 4, 8}, /* 1 MB */
{ 0xe8, 20, 8, 4, 8}, /* 1 MB */
{ 0xec, 20, 8, 4, 8}, /* 1 MB */
{ 0x64, 21, 8, 4, 9}, /* 2 MB */
{ 0xea, 21, 8, 4, 9}, /* 2 MB */
{ 0x6b, 22, 9, 4, 9}, /* 4 MB */
{ 0xe3, 22, 9, 4, 9}, /* 4 MB */
{ 0xe5, 22, 9, 4, 9}, /* 4 MB */
{ 0xe6, 23, 9, 4, 10}, /* 8 MB */
{ 0x73, 24, 9, 5, 10}, /* 16 MB */
{ 0x75, 25, 9, 5, 10}, /* 32 MB */
{ 0x76, 26, 9, 5, 10}, /* 64 MB */
{ 0x79, 27, 9, 5, 10}, /* 128 MB */
{ 0x71, 28, 9, 5, 10}, /* 256 MB */
/* MASK ROM */
{ 0x5d, 21, 9, 4, 8}, /* 2 MB */
{ 0xd5, 22, 9, 4, 9}, /* 4 MB */
{ 0xd6, 23, 9, 4, 10}, /* 8 MB */
{ 0x57, 24, 9, 4, 11}, /* 16 MB */
{ 0x58, 25, 9, 4, 12}, /* 32 MB */
{ 0,}
};
static struct alauda_card_info *alauda_card_find_id(unsigned char id)
{
int i;
for (i = 0; alauda_card_ids[i].id != 0; i++)
if (alauda_card_ids[i].id == id)
return &(alauda_card_ids[i]);
return NULL;
}
/*
* ECC computation.
*/
static unsigned char parity[256];
static unsigned char ecc2[256];
static void nand_init_ecc(void)
{
int i, j, a;
parity[0] = 0;
for (i = 1; i < 256; i++)
parity[i] = (parity[i&(i-1)] ^ 1);
for (i = 0; i < 256; i++) {
a = 0;
for (j = 0; j < 8; j++) {
if (i & (1<<j)) {
if ((j & 1) == 0)
a ^= 0x04;
if ((j & 2) == 0)
a ^= 0x10;
if ((j & 4) == 0)
a ^= 0x40;
}
}
ecc2[i] = ~(a ^ (a<<1) ^ (parity[i] ? 0xa8 : 0));
}
}
/* compute 3-byte ecc on 256 bytes */
static void nand_compute_ecc(unsigned char *data, unsigned char *ecc)
{
int i, j, a;
unsigned char par = 0, bit, bits[8] = {0};
/* collect 16 checksum bits */
for (i = 0; i < 256; i++) {
par ^= data[i];
bit = parity[data[i]];
for (j = 0; j < 8; j++)
if ((i & (1<<j)) == 0)
bits[j] ^= bit;
}
/* put 4+4+4 = 12 bits in the ecc */
a = (bits[3] << 6) + (bits[2] << 4) + (bits[1] << 2) + bits[0];
ecc[0] = ~(a ^ (a<<1) ^ (parity[par] ? 0xaa : 0));
a = (bits[7] << 6) + (bits[6] << 4) + (bits[5] << 2) + bits[4];
ecc[1] = ~(a ^ (a<<1) ^ (parity[par] ? 0xaa : 0));
ecc[2] = ecc2[par];
}
static int nand_compare_ecc(unsigned char *data, unsigned char *ecc)
{
return (data[0] == ecc[0] && data[1] == ecc[1] && data[2] == ecc[2]);
}
static void nand_store_ecc(unsigned char *data, unsigned char *ecc)
{
memcpy(data, ecc, 3);
}
/*
* Alauda driver
*/
/*
* Forget our PBA <---> LBA mappings for a particular port
*/
static void alauda_free_maps (struct alauda_media_info *media_info)
{
unsigned int shift = media_info->zoneshift
+ media_info->blockshift + media_info->pageshift;
unsigned int num_zones = media_info->capacity >> shift;
unsigned int i;
if (media_info->lba_to_pba != NULL)
for (i = 0; i < num_zones; i++) {
kfree(media_info->lba_to_pba[i]);
media_info->lba_to_pba[i] = NULL;
}
if (media_info->pba_to_lba != NULL)
for (i = 0; i < num_zones; i++) {
kfree(media_info->pba_to_lba[i]);
media_info->pba_to_lba[i] = NULL;
}
}
/*
* Returns 2 bytes of status data
* The first byte describes media status, and second byte describes door status
*/
static int alauda_get_media_status(struct us_data *us, unsigned char *data)
{
int rc;
unsigned char command;
if (MEDIA_PORT(us) == ALAUDA_PORT_XD)
command = ALAUDA_GET_XD_MEDIA_STATUS;
else
command = ALAUDA_GET_SM_MEDIA_STATUS;
rc = usb_stor_ctrl_transfer(us, us->recv_ctrl_pipe,
command, 0xc0, 0, 1, data, 2);
if (rc == USB_STOR_XFER_GOOD)
usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]);
return rc;
}
/*
* Clears the "media was changed" bit so that we know when it changes again
* in the future.
*/
static int alauda_ack_media(struct us_data *us)
{
unsigned char command;
if (MEDIA_PORT(us) == ALAUDA_PORT_XD)
command = ALAUDA_ACK_XD_MEDIA_CHANGE;
else
command = ALAUDA_ACK_SM_MEDIA_CHANGE;
return usb_stor_ctrl_transfer(us, us->send_ctrl_pipe,
command, 0x40, 0, 1, NULL, 0);
}
/*
* Retrieves a 4-byte media signature, which indicates manufacturer, capacity,
* and some other details.
*/
static int alauda_get_media_signature(struct us_data *us, unsigned char *data)
{
unsigned char command;
if (MEDIA_PORT(us) == ALAUDA_PORT_XD)
command = ALAUDA_GET_XD_MEDIA_SIG;
else
command = ALAUDA_GET_SM_MEDIA_SIG;
return usb_stor_ctrl_transfer(us, us->recv_ctrl_pipe,
command, 0xc0, 0, 0, data, 4);
}
/*
* Resets the media status (but not the whole device?)
*/
static int alauda_reset_media(struct us_data *us)
{
unsigned char *command = us->iobuf;
memset(command, 0, 9);
command[0] = ALAUDA_BULK_CMD;
command[1] = ALAUDA_BULK_RESET_MEDIA;
command[8] = MEDIA_PORT(us);
return usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
command, 9, NULL);
}
/*
* Examines the media and deduces capacity, etc.
*/
static int alauda_init_media(struct us_data *us)
{
unsigned char *data = us->iobuf;
int ready = 0;
struct alauda_card_info *media_info;
unsigned int num_zones;
while (ready == 0) {
msleep(20);
if (alauda_get_media_status(us, data) != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (data[0] & 0x10)
ready = 1;
}
usb_stor_dbg(us, "We are ready for action!\n");
if (alauda_ack_media(us) != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
msleep(10);
if (alauda_get_media_status(us, data) != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (data[0] != 0x14) {
usb_stor_dbg(us, "Media not ready after ack\n");
return USB_STOR_TRANSPORT_ERROR;
}
if (alauda_get_media_signature(us, data) != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
usb_stor_dbg(us, "Media signature: %4ph\n", data);
media_info = alauda_card_find_id(data[1]);
if (media_info == NULL) {
pr_warn("alauda_init_media: Unrecognised media signature: %4ph\n",
data);
return USB_STOR_TRANSPORT_ERROR;
}
MEDIA_INFO(us).capacity = 1 << media_info->chipshift;
usb_stor_dbg(us, "Found media with capacity: %ldMB\n",
MEDIA_INFO(us).capacity >> 20);
MEDIA_INFO(us).pageshift = media_info->pageshift;
MEDIA_INFO(us).blockshift = media_info->blockshift;
MEDIA_INFO(us).zoneshift = media_info->zoneshift;
MEDIA_INFO(us).pagesize = 1 << media_info->pageshift;
MEDIA_INFO(us).blocksize = 1 << media_info->blockshift;
MEDIA_INFO(us).zonesize = 1 << media_info->zoneshift;
MEDIA_INFO(us).uzonesize = ((1 << media_info->zoneshift) / 128) * 125;
MEDIA_INFO(us).blockmask = MEDIA_INFO(us).blocksize - 1;
num_zones = MEDIA_INFO(us).capacity >> (MEDIA_INFO(us).zoneshift
+ MEDIA_INFO(us).blockshift + MEDIA_INFO(us).pageshift);
MEDIA_INFO(us).pba_to_lba = kcalloc(num_zones, sizeof(u16*), GFP_NOIO);
MEDIA_INFO(us).lba_to_pba = kcalloc(num_zones, sizeof(u16*), GFP_NOIO);
if (MEDIA_INFO(us).pba_to_lba == NULL || MEDIA_INFO(us).lba_to_pba == NULL)
return USB_STOR_TRANSPORT_ERROR;
if (alauda_reset_media(us) != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Examines the media status and does the right thing when the media has gone,
* appeared, or changed.
*/
static int alauda_check_media(struct us_data *us)
{
struct alauda_info *info = (struct alauda_info *) us->extra;
unsigned char *status = us->iobuf;
int rc;
rc = alauda_get_media_status(us, status);
if (rc != USB_STOR_XFER_GOOD) {
status[0] = 0xF0; /* Pretend there's no media */
status[1] = 0;
}
/* Check for no media or door open */
if ((status[0] & 0x80) || ((status[0] & 0x1F) == 0x10)
|| ((status[1] & 0x01) == 0)) {
usb_stor_dbg(us, "No media, or door open\n");
alauda_free_maps(&MEDIA_INFO(us));
info->sense_key = 0x02;
info->sense_asc = 0x3A;
info->sense_ascq = 0x00;
return USB_STOR_TRANSPORT_FAILED;
}
/* Check for media change */
if (status[0] & 0x08) {
usb_stor_dbg(us, "Media change detected\n");
alauda_free_maps(&MEDIA_INFO(us));
alauda_init_media(us);
info->sense_key = UNIT_ATTENTION;
info->sense_asc = 0x28;
info->sense_ascq = 0x00;
return USB_STOR_TRANSPORT_FAILED;
}
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Checks the status from the 2nd status register
* Returns 3 bytes of status data, only the first is known
*/
static int alauda_check_status2(struct us_data *us)
{
int rc;
unsigned char command[] = {
ALAUDA_BULK_CMD, ALAUDA_BULK_GET_STATUS2,
0, 0, 0, 0, 3, 0, MEDIA_PORT(us)
};
unsigned char data[3];
rc = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
command, 9, NULL);
if (rc != USB_STOR_XFER_GOOD)
return rc;
rc = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
data, 3, NULL);
if (rc != USB_STOR_XFER_GOOD)
return rc;
usb_stor_dbg(us, "%3ph\n", data);
if (data[0] & ALAUDA_STATUS_ERROR)
return USB_STOR_XFER_ERROR;
return USB_STOR_XFER_GOOD;
}
/*
* Gets the redundancy data for the first page of a PBA
* Returns 16 bytes.
*/
static int alauda_get_redu_data(struct us_data *us, u16 pba, unsigned char *data)
{
int rc;
unsigned char command[] = {
ALAUDA_BULK_CMD, ALAUDA_BULK_GET_REDU_DATA,
PBA_HI(pba), PBA_ZONE(pba), 0, PBA_LO(pba), 0, 0, MEDIA_PORT(us)
};
rc = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
command, 9, NULL);
if (rc != USB_STOR_XFER_GOOD)
return rc;
return usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
data, 16, NULL);
}
/*
* Finds the first unused PBA in a zone
* Returns the absolute PBA of an unused PBA, or 0 if none found.
*/
static u16 alauda_find_unused_pba(struct alauda_media_info *info,
unsigned int zone)
{
u16 *pba_to_lba = info->pba_to_lba[zone];
unsigned int i;
for (i = 0; i < info->zonesize; i++)
if (pba_to_lba[i] == UNDEF)
return (zone << info->zoneshift) + i;
return 0;
}
/*
* Reads the redundancy data for all PBA's in a zone
* Produces lba <--> pba mappings
*/
static int alauda_read_map(struct us_data *us, unsigned int zone)
{
unsigned char *data = us->iobuf;
int result;
int i, j;
unsigned int zonesize = MEDIA_INFO(us).zonesize;
unsigned int uzonesize = MEDIA_INFO(us).uzonesize;
unsigned int lba_offset, lba_real, blocknum;
unsigned int zone_base_lba = zone * uzonesize;
unsigned int zone_base_pba = zone * zonesize;
u16 *lba_to_pba = kcalloc(zonesize, sizeof(u16), GFP_NOIO);
u16 *pba_to_lba = kcalloc(zonesize, sizeof(u16), GFP_NOIO);
if (lba_to_pba == NULL || pba_to_lba == NULL) {
result = USB_STOR_TRANSPORT_ERROR;
goto error;
}
usb_stor_dbg(us, "Mapping blocks for zone %d\n", zone);
/* 1024 PBA's per zone */
for (i = 0; i < zonesize; i++)
lba_to_pba[i] = pba_to_lba[i] = UNDEF;
for (i = 0; i < zonesize; i++) {
blocknum = zone_base_pba + i;
result = alauda_get_redu_data(us, blocknum, data);
if (result != USB_STOR_XFER_GOOD) {
result = USB_STOR_TRANSPORT_ERROR;
goto error;
}
/* special PBAs have control field 0^16 */
for (j = 0; j < 16; j++)
if (data[j] != 0)
goto nonz;
pba_to_lba[i] = UNUSABLE;
usb_stor_dbg(us, "PBA %d has no logical mapping\n", blocknum);
continue;
nonz:
/* unwritten PBAs have control field FF^16 */
for (j = 0; j < 16; j++)
if (data[j] != 0xff)
goto nonff;
continue;
nonff:
/* normal PBAs start with six FFs */
if (j < 6) {
usb_stor_dbg(us, "PBA %d has no logical mapping: reserved area = %02X%02X%02X%02X data status %02X block status %02X\n",
blocknum,
data[0], data[1], data[2], data[3],
data[4], data[5]);
pba_to_lba[i] = UNUSABLE;
continue;
}
if ((data[6] >> 4) != 0x01) {
usb_stor_dbg(us, "PBA %d has invalid address field %02X%02X/%02X%02X\n",
blocknum, data[6], data[7],
data[11], data[12]);
pba_to_lba[i] = UNUSABLE;
continue;
}
/* check even parity */
if (parity[data[6] ^ data[7]]) {
printk(KERN_WARNING
"alauda_read_map: Bad parity in LBA for block %d"
" (%02X %02X)\n", i, data[6], data[7]);
pba_to_lba[i] = UNUSABLE;
continue;
}
lba_offset = short_pack(data[7], data[6]);
lba_offset = (lba_offset & 0x07FF) >> 1;
lba_real = lba_offset + zone_base_lba;
/*
* Every 1024 physical blocks ("zone"), the LBA numbers
* go back to zero, but are within a higher block of LBA's.
* Also, there is a maximum of 1000 LBA's per zone.
* In other words, in PBA 1024-2047 you will find LBA 0-999
* which are really LBA 1000-1999. This allows for 24 bad
* or special physical blocks per zone.
*/
if (lba_offset >= uzonesize) {
printk(KERN_WARNING
"alauda_read_map: Bad low LBA %d for block %d\n",
lba_real, blocknum);
continue;
}
if (lba_to_pba[lba_offset] != UNDEF) {
printk(KERN_WARNING
"alauda_read_map: "
"LBA %d seen for PBA %d and %d\n",
lba_real, lba_to_pba[lba_offset], blocknum);
continue;
}
pba_to_lba[i] = lba_real;
lba_to_pba[lba_offset] = blocknum;
continue;
}
MEDIA_INFO(us).lba_to_pba[zone] = lba_to_pba;
MEDIA_INFO(us).pba_to_lba[zone] = pba_to_lba;
result = 0;
goto out;
error:
kfree(lba_to_pba);
kfree(pba_to_lba);
out:
return result;
}
/*
* Checks to see whether we have already mapped a certain zone
* If we haven't, the map is generated
*/
static void alauda_ensure_map_for_zone(struct us_data *us, unsigned int zone)
{
if (MEDIA_INFO(us).lba_to_pba[zone] == NULL
|| MEDIA_INFO(us).pba_to_lba[zone] == NULL)
alauda_read_map(us, zone);
}
/*
* Erases an entire block
*/
static int alauda_erase_block(struct us_data *us, u16 pba)
{
int rc;
unsigned char command[] = {
ALAUDA_BULK_CMD, ALAUDA_BULK_ERASE_BLOCK, PBA_HI(pba),
PBA_ZONE(pba), 0, PBA_LO(pba), 0x02, 0, MEDIA_PORT(us)
};
unsigned char buf[2];
usb_stor_dbg(us, "Erasing PBA %d\n", pba);
rc = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
command, 9, NULL);
if (rc != USB_STOR_XFER_GOOD)
return rc;
rc = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
buf, 2, NULL);
if (rc != USB_STOR_XFER_GOOD)
return rc;
usb_stor_dbg(us, "Erase result: %02X %02X\n", buf[0], buf[1]);
return rc;
}
/*
* Reads data from a certain offset page inside a PBA, including interleaved
* redundancy data. Returns (pagesize+64)*pages bytes in data.
*/
static int alauda_read_block_raw(struct us_data *us, u16 pba,
unsigned int page, unsigned int pages, unsigned char *data)
{
int rc;
unsigned char command[] = {
ALAUDA_BULK_CMD, ALAUDA_BULK_READ_BLOCK, PBA_HI(pba),
PBA_ZONE(pba), 0, PBA_LO(pba) + page, pages, 0, MEDIA_PORT(us)
};
usb_stor_dbg(us, "pba %d page %d count %d\n", pba, page, pages);
rc = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
command, 9, NULL);
if (rc != USB_STOR_XFER_GOOD)
return rc;
return usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
data, (MEDIA_INFO(us).pagesize + 64) * pages, NULL);
}
/*
* Reads data from a certain offset page inside a PBA, excluding redundancy
* data. Returns pagesize*pages bytes in data. Note that data must be big enough
* to hold (pagesize+64)*pages bytes of data, but you can ignore those 'extra'
* trailing bytes outside this function.
*/
static int alauda_read_block(struct us_data *us, u16 pba,
unsigned int page, unsigned int pages, unsigned char *data)
{
int i, rc;
unsigned int pagesize = MEDIA_INFO(us).pagesize;
rc = alauda_read_block_raw(us, pba, page, pages, data);
if (rc != USB_STOR_XFER_GOOD)
return rc;
/* Cut out the redundancy data */
for (i = 0; i < pages; i++) {
int dest_offset = i * pagesize;
int src_offset = i * (pagesize + 64);
memmove(data + dest_offset, data + src_offset, pagesize);
}
return rc;
}
/*
* Writes an entire block of data and checks status after write.
* Redundancy data must be already included in data. Data should be
* (pagesize+64)*blocksize bytes in length.
*/
static int alauda_write_block(struct us_data *us, u16 pba, unsigned char *data)
{
int rc;
struct alauda_info *info = (struct alauda_info *) us->extra;
unsigned char command[] = {
ALAUDA_BULK_CMD, ALAUDA_BULK_WRITE_BLOCK, PBA_HI(pba),
PBA_ZONE(pba), 0, PBA_LO(pba), 32, 0, MEDIA_PORT(us)
};
usb_stor_dbg(us, "pba %d\n", pba);
rc = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
command, 9, NULL);
if (rc != USB_STOR_XFER_GOOD)
return rc;
rc = usb_stor_bulk_transfer_buf(us, info->wr_ep, data,
(MEDIA_INFO(us).pagesize + 64) * MEDIA_INFO(us).blocksize,
NULL);
if (rc != USB_STOR_XFER_GOOD)
return rc;
return alauda_check_status2(us);
}
/*
* Write some data to a specific LBA.
*/
static int alauda_write_lba(struct us_data *us, u16 lba,
unsigned int page, unsigned int pages,
unsigned char *ptr, unsigned char *blockbuffer)
{
u16 pba, lbap, new_pba;
unsigned char *bptr, *cptr, *xptr;
unsigned char ecc[3];
int i, result;
unsigned int uzonesize = MEDIA_INFO(us).uzonesize;
unsigned int zonesize = MEDIA_INFO(us).zonesize;
unsigned int pagesize = MEDIA_INFO(us).pagesize;
unsigned int blocksize = MEDIA_INFO(us).blocksize;
unsigned int lba_offset = lba % uzonesize;
unsigned int new_pba_offset;
unsigned int zone = lba / uzonesize;
alauda_ensure_map_for_zone(us, zone);
pba = MEDIA_INFO(us).lba_to_pba[zone][lba_offset];
if (pba == 1) {
/*
* Maybe it is impossible to write to PBA 1.
* Fake success, but don't do anything.
*/
printk(KERN_WARNING
"alauda_write_lba: avoid writing to pba 1\n");
return USB_STOR_TRANSPORT_GOOD;
}
new_pba = alauda_find_unused_pba(&MEDIA_INFO(us), zone);
if (!new_pba) {
printk(KERN_WARNING
"alauda_write_lba: Out of unused blocks\n");
return USB_STOR_TRANSPORT_ERROR;
}
/* read old contents */
if (pba != UNDEF) {
result = alauda_read_block_raw(us, pba, 0,
blocksize, blockbuffer);
if (result != USB_STOR_XFER_GOOD)
return result;
} else {
memset(blockbuffer, 0, blocksize * (pagesize + 64));
}
lbap = (lba_offset << 1) | 0x1000;
if (parity[MSB_of(lbap) ^ LSB_of(lbap)])
lbap ^= 1;
/* check old contents and fill lba */
for (i = 0; i < blocksize; i++) {
bptr = blockbuffer + (i * (pagesize + 64));
cptr = bptr + pagesize;
nand_compute_ecc(bptr, ecc);
if (!nand_compare_ecc(cptr+13, ecc)) {
usb_stor_dbg(us, "Warning: bad ecc in page %d- of pba %d\n",
i, pba);
nand_store_ecc(cptr+13, ecc);
}
nand_compute_ecc(bptr + (pagesize / 2), ecc);
if (!nand_compare_ecc(cptr+8, ecc)) {
usb_stor_dbg(us, "Warning: bad ecc in page %d+ of pba %d\n",
i, pba);
nand_store_ecc(cptr+8, ecc);
}
cptr[6] = cptr[11] = MSB_of(lbap);
cptr[7] = cptr[12] = LSB_of(lbap);
}
/* copy in new stuff and compute ECC */
xptr = ptr;
for (i = page; i < page+pages; i++) {
bptr = blockbuffer + (i * (pagesize + 64));
cptr = bptr + pagesize;
memcpy(bptr, xptr, pagesize);
xptr += pagesize;
nand_compute_ecc(bptr, ecc);
nand_store_ecc(cptr+13, ecc);
nand_compute_ecc(bptr + (pagesize / 2), ecc);
nand_store_ecc(cptr+8, ecc);
}
result = alauda_write_block(us, new_pba, blockbuffer);
if (result != USB_STOR_XFER_GOOD)
return result;
new_pba_offset = new_pba - (zone * zonesize);
MEDIA_INFO(us).pba_to_lba[zone][new_pba_offset] = lba;
MEDIA_INFO(us).lba_to_pba[zone][lba_offset] = new_pba;
usb_stor_dbg(us, "Remapped LBA %d to PBA %d\n", lba, new_pba);
if (pba != UNDEF) {
unsigned int pba_offset = pba - (zone * zonesize);
result = alauda_erase_block(us, pba);
if (result != USB_STOR_XFER_GOOD)
return result;
MEDIA_INFO(us).pba_to_lba[zone][pba_offset] = UNDEF;
}
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Read data from a specific sector address
*/
static int alauda_read_data(struct us_data *us, unsigned long address,
unsigned int sectors)
{
unsigned char *buffer;
u16 lba, max_lba;
unsigned int page, len, offset;
unsigned int blockshift = MEDIA_INFO(us).blockshift;
unsigned int pageshift = MEDIA_INFO(us).pageshift;
unsigned int blocksize = MEDIA_INFO(us).blocksize;
unsigned int pagesize = MEDIA_INFO(us).pagesize;
unsigned int uzonesize = MEDIA_INFO(us).uzonesize;
struct scatterlist *sg;
int result;
/*
* Since we only read in one block at a time, we have to create
* a bounce buffer and move the data a piece at a time between the
* bounce buffer and the actual transfer buffer.
* We make this buffer big enough to hold temporary redundancy data,
* which we use when reading the data blocks.
*/
len = min(sectors, blocksize) * (pagesize + 64);
buffer = kmalloc(len, GFP_NOIO);
if (!buffer)
return USB_STOR_TRANSPORT_ERROR;
/* Figure out the initial LBA and page */
lba = address >> blockshift;
page = (address & MEDIA_INFO(us).blockmask);
max_lba = MEDIA_INFO(us).capacity >> (blockshift + pageshift);
result = USB_STOR_TRANSPORT_GOOD;
offset = 0;
sg = NULL;
while (sectors > 0) {
unsigned int zone = lba / uzonesize; /* integer division */
unsigned int lba_offset = lba - (zone * uzonesize);
unsigned int pages;
u16 pba;
alauda_ensure_map_for_zone(us, zone);
/* Not overflowing capacity? */
if (lba >= max_lba) {
usb_stor_dbg(us, "Error: Requested lba %u exceeds maximum %u\n",
lba, max_lba);
result = USB_STOR_TRANSPORT_ERROR;
break;
}
/* Find number of pages we can read in this block */
pages = min(sectors, blocksize - page);
len = pages << pageshift;
/* Find where this lba lives on disk */
pba = MEDIA_INFO(us).lba_to_pba[zone][lba_offset];
if (pba == UNDEF) { /* this lba was never written */
usb_stor_dbg(us, "Read %d zero pages (LBA %d) page %d\n",
pages, lba, page);
/*
* This is not really an error. It just means
* that the block has never been written.
* Instead of returning USB_STOR_TRANSPORT_ERROR
* it is better to return all zero data.
*/
memset(buffer, 0, len);
} else {
usb_stor_dbg(us, "Read %d pages, from PBA %d (LBA %d) page %d\n",
pages, pba, lba, page);
result = alauda_read_block(us, pba, page, pages, buffer);
if (result != USB_STOR_TRANSPORT_GOOD)
break;
}
/* Store the data in the transfer buffer */
usb_stor_access_xfer_buf(buffer, len, us->srb,
&sg, &offset, TO_XFER_BUF);
page = 0;
lba++;
sectors -= pages;
}
kfree(buffer);
return result;
}
/*
* Write data to a specific sector address
*/
static int alauda_write_data(struct us_data *us, unsigned long address,
unsigned int sectors)
{
unsigned char *buffer, *blockbuffer;
unsigned int page, len, offset;
unsigned int blockshift = MEDIA_INFO(us).blockshift;
unsigned int pageshift = MEDIA_INFO(us).pageshift;
unsigned int blocksize = MEDIA_INFO(us).blocksize;
unsigned int pagesize = MEDIA_INFO(us).pagesize;
struct scatterlist *sg;
u16 lba, max_lba;
int result;
/*
* Since we don't write the user data directly to the device,
* we have to create a bounce buffer and move the data a piece
* at a time between the bounce buffer and the actual transfer buffer.
*/
len = min(sectors, blocksize) * pagesize;
buffer = kmalloc(len, GFP_NOIO);
if (!buffer)
return USB_STOR_TRANSPORT_ERROR;
/*
* We also need a temporary block buffer, where we read in the old data,
* overwrite parts with the new data, and manipulate the redundancy data
*/
blockbuffer = kmalloc_array(pagesize + 64, blocksize, GFP_NOIO);
if (!blockbuffer) {
kfree(buffer);
return USB_STOR_TRANSPORT_ERROR;
}
/* Figure out the initial LBA and page */
lba = address >> blockshift;
page = (address & MEDIA_INFO(us).blockmask);
max_lba = MEDIA_INFO(us).capacity >> (pageshift + blockshift);
result = USB_STOR_TRANSPORT_GOOD;
offset = 0;
sg = NULL;
while (sectors > 0) {
/* Write as many sectors as possible in this block */
unsigned int pages = min(sectors, blocksize - page);
len = pages << pageshift;
/* Not overflowing capacity? */
if (lba >= max_lba) {
usb_stor_dbg(us, "Requested lba %u exceeds maximum %u\n",
lba, max_lba);
result = USB_STOR_TRANSPORT_ERROR;
break;
}
/* Get the data from the transfer buffer */
usb_stor_access_xfer_buf(buffer, len, us->srb,
&sg, &offset, FROM_XFER_BUF);
result = alauda_write_lba(us, lba, page, pages, buffer,
blockbuffer);
if (result != USB_STOR_TRANSPORT_GOOD)
break;
page = 0;
lba++;
sectors -= pages;
}
kfree(buffer);
kfree(blockbuffer);
return result;
}
/*
* Our interface with the rest of the world
*/
static void alauda_info_destructor(void *extra)
{
struct alauda_info *info = (struct alauda_info *) extra;
int port;
if (!info)
return;
for (port = 0; port < 2; port++) {
struct alauda_media_info *media_info = &info->port[port];
alauda_free_maps(media_info);
kfree(media_info->lba_to_pba);
kfree(media_info->pba_to_lba);
}
}
/*
* Initialize alauda_info struct and find the data-write endpoint
*/
static int init_alauda(struct us_data *us)
{
struct alauda_info *info;
struct usb_host_interface *altsetting = us->pusb_intf->cur_altsetting;
nand_init_ecc();
us->extra = kzalloc(sizeof(struct alauda_info), GFP_NOIO);
if (!us->extra)
return -ENOMEM;
info = (struct alauda_info *) us->extra;
us->extra_destructor = alauda_info_destructor;
info->wr_ep = usb_sndbulkpipe(us->pusb_dev,
altsetting->endpoint[0].desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
return 0;
}
static int alauda_transport(struct scsi_cmnd *srb, struct us_data *us)
{
int rc;
struct alauda_info *info = (struct alauda_info *) us->extra;
unsigned char *ptr = us->iobuf;
static unsigned char inquiry_response[36] = {
0x00, 0x80, 0x00, 0x01, 0x1F, 0x00, 0x00, 0x00
};
if (srb->cmnd[0] == INQUIRY) {
usb_stor_dbg(us, "INQUIRY - Returning bogus response\n");
memcpy(ptr, inquiry_response, sizeof(inquiry_response));
fill_inquiry_response(us, ptr, 36);
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == TEST_UNIT_READY) {
usb_stor_dbg(us, "TEST_UNIT_READY\n");
return alauda_check_media(us);
}
if (srb->cmnd[0] == READ_CAPACITY) {
unsigned int num_zones;
unsigned long capacity;
rc = alauda_check_media(us);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
num_zones = MEDIA_INFO(us).capacity >> (MEDIA_INFO(us).zoneshift
+ MEDIA_INFO(us).blockshift + MEDIA_INFO(us).pageshift);
capacity = num_zones * MEDIA_INFO(us).uzonesize
* MEDIA_INFO(us).blocksize;
/* Report capacity and page size */
((__be32 *) ptr)[0] = cpu_to_be32(capacity - 1);
((__be32 *) ptr)[1] = cpu_to_be32(512);
usb_stor_set_xfer_buf(ptr, 8, srb);
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == READ_10) {
unsigned int page, pages;
rc = alauda_check_media(us);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
page = short_pack(srb->cmnd[3], srb->cmnd[2]);
page <<= 16;
page |= short_pack(srb->cmnd[5], srb->cmnd[4]);
pages = short_pack(srb->cmnd[8], srb->cmnd[7]);
usb_stor_dbg(us, "READ_10: page %d pagect %d\n", page, pages);
return alauda_read_data(us, page, pages);
}
if (srb->cmnd[0] == WRITE_10) {
unsigned int page, pages;
rc = alauda_check_media(us);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
page = short_pack(srb->cmnd[3], srb->cmnd[2]);
page <<= 16;
page |= short_pack(srb->cmnd[5], srb->cmnd[4]);
pages = short_pack(srb->cmnd[8], srb->cmnd[7]);
usb_stor_dbg(us, "WRITE_10: page %d pagect %d\n", page, pages);
return alauda_write_data(us, page, pages);
}
if (srb->cmnd[0] == REQUEST_SENSE) {
usb_stor_dbg(us, "REQUEST_SENSE\n");
memset(ptr, 0, 18);
ptr[0] = 0xF0;
ptr[2] = info->sense_key;
ptr[7] = 11;
ptr[12] = info->sense_asc;
ptr[13] = info->sense_ascq;
usb_stor_set_xfer_buf(ptr, 18, srb);
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) {
/*
* sure. whatever. not like we can stop the user from popping
* the media out of the device (no locking doors, etc)
*/
return USB_STOR_TRANSPORT_GOOD;
}
usb_stor_dbg(us, "Gah! Unknown command: %d (0x%x)\n",
srb->cmnd[0], srb->cmnd[0]);
info->sense_key = 0x05;
info->sense_asc = 0x20;
info->sense_ascq = 0x00;
return USB_STOR_TRANSPORT_FAILED;
}
static struct scsi_host_template alauda_host_template;
static int alauda_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct us_data *us;
int result;
result = usb_stor_probe1(&us, intf, id,
(id - alauda_usb_ids) + alauda_unusual_dev_list,
&alauda_host_template);
if (result)
return result;
us->transport_name = "Alauda Control/Bulk";
us->transport = alauda_transport;
us->transport_reset = usb_stor_Bulk_reset;
us->max_lun = 1;
result = usb_stor_probe2(us);
return result;
}
static struct usb_driver alauda_driver = {
.name = DRV_NAME,
.probe = alauda_probe,
.disconnect = usb_stor_disconnect,
.suspend = usb_stor_suspend,
.resume = usb_stor_resume,
.reset_resume = usb_stor_reset_resume,
.pre_reset = usb_stor_pre_reset,
.post_reset = usb_stor_post_reset,
.id_table = alauda_usb_ids,
.soft_unbind = 1,
.no_dynamic_id = 1,
};
module_usb_stor_driver(alauda_driver, alauda_host_template, DRV_NAME);
| linux-master | drivers/usb/storage/alauda.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Datafab USB Compact Flash reader
*
* datafab driver v0.1:
*
* First release
*
* Current development and maintenance by:
* (c) 2000 Jimmie Mayfield ([email protected])
*
* Many thanks to Robert Baruch for the SanDisk SmartMedia reader driver
* which I used as a template for this driver.
*
* Some bugfixes and scatter-gather code by Gregory P. Smith
* ([email protected])
*
* Fix for media change by Joerg Schneider ([email protected])
*
* Other contributors:
* (c) 2002 Alan Stern <[email protected]>
*/
/*
* This driver attempts to support USB CompactFlash reader/writer devices
* based on Datafab USB-to-ATA chips. It was specifically developed for the
* Datafab MDCFE-B USB CompactFlash reader but has since been found to work
* with a variety of Datafab-based devices from a number of manufacturers.
* I've received a report of this driver working with a Datafab-based
* SmartMedia device though please be aware that I'm personally unable to
* test SmartMedia support.
*
* This driver supports reading and writing. If you're truly paranoid,
* however, you can force the driver into a write-protected state by setting
* the WP enable bits in datafab_handle_mode_sense(). See the comments
* in that routine.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include "usb.h"
#include "transport.h"
#include "protocol.h"
#include "debug.h"
#include "scsiglue.h"
#define DRV_NAME "ums-datafab"
MODULE_DESCRIPTION("Driver for Datafab USB Compact Flash reader");
MODULE_AUTHOR("Jimmie Mayfield <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(USB_STORAGE);
struct datafab_info {
unsigned long sectors; /* total sector count */
unsigned long ssize; /* sector size in bytes */
signed char lun; /* used for dual-slot readers */
/* the following aren't used yet */
unsigned char sense_key;
unsigned long sense_asc; /* additional sense code */
unsigned long sense_ascq; /* additional sense code qualifier */
};
static int datafab_determine_lun(struct us_data *us,
struct datafab_info *info);
/*
* The table of devices
*/
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
static struct usb_device_id datafab_usb_ids[] = {
# include "unusual_datafab.h"
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, datafab_usb_ids);
#undef UNUSUAL_DEV
/*
* The flags table
*/
#define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \
vendor_name, product_name, use_protocol, use_transport, \
init_function, Flags) \
{ \
.vendorName = vendor_name, \
.productName = product_name, \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
.initFunction = init_function, \
}
static struct us_unusual_dev datafab_unusual_dev_list[] = {
# include "unusual_datafab.h"
{ } /* Terminating entry */
};
#undef UNUSUAL_DEV
static inline int
datafab_bulk_read(struct us_data *us, unsigned char *data, unsigned int len) {
if (len == 0)
return USB_STOR_XFER_GOOD;
usb_stor_dbg(us, "len = %d\n", len);
return usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
data, len, NULL);
}
static inline int
datafab_bulk_write(struct us_data *us, unsigned char *data, unsigned int len) {
if (len == 0)
return USB_STOR_XFER_GOOD;
usb_stor_dbg(us, "len = %d\n", len);
return usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
data, len, NULL);
}
static int datafab_read_data(struct us_data *us,
struct datafab_info *info,
u32 sector,
u32 sectors)
{
unsigned char *command = us->iobuf;
unsigned char *buffer;
unsigned char thistime;
unsigned int totallen, alloclen;
int len, result;
unsigned int sg_offset = 0;
struct scatterlist *sg = NULL;
// we're working in LBA mode. according to the ATA spec,
// we can support up to 28-bit addressing. I don't know if Datafab
// supports beyond 24-bit addressing. It's kind of hard to test
// since it requires > 8GB CF card.
//
if (sectors > 0x0FFFFFFF)
return USB_STOR_TRANSPORT_ERROR;
if (info->lun == -1) {
result = datafab_determine_lun(us, info);
if (result != USB_STOR_TRANSPORT_GOOD)
return result;
}
totallen = sectors * info->ssize;
// Since we don't read more than 64 KB at a time, we have to create
// a bounce buffer and move the data a piece at a time between the
// bounce buffer and the actual transfer buffer.
alloclen = min(totallen, 65536u);
buffer = kmalloc(alloclen, GFP_NOIO);
if (buffer == NULL)
return USB_STOR_TRANSPORT_ERROR;
do {
// loop, never allocate or transfer more than 64k at once
// (min(128k, 255*info->ssize) is the real limit)
len = min(totallen, alloclen);
thistime = (len / info->ssize) & 0xff;
command[0] = 0;
command[1] = thistime;
command[2] = sector & 0xFF;
command[3] = (sector >> 8) & 0xFF;
command[4] = (sector >> 16) & 0xFF;
command[5] = 0xE0 + (info->lun << 4);
command[5] |= (sector >> 24) & 0x0F;
command[6] = 0x20;
command[7] = 0x01;
// send the read command
result = datafab_bulk_write(us, command, 8);
if (result != USB_STOR_XFER_GOOD)
goto leave;
// read the result
result = datafab_bulk_read(us, buffer, len);
if (result != USB_STOR_XFER_GOOD)
goto leave;
// Store the data in the transfer buffer
usb_stor_access_xfer_buf(buffer, len, us->srb,
&sg, &sg_offset, TO_XFER_BUF);
sector += thistime;
totallen -= len;
} while (totallen > 0);
kfree(buffer);
return USB_STOR_TRANSPORT_GOOD;
leave:
kfree(buffer);
return USB_STOR_TRANSPORT_ERROR;
}
static int datafab_write_data(struct us_data *us,
struct datafab_info *info,
u32 sector,
u32 sectors)
{
unsigned char *command = us->iobuf;
unsigned char *reply = us->iobuf;
unsigned char *buffer;
unsigned char thistime;
unsigned int totallen, alloclen;
int len, result;
unsigned int sg_offset = 0;
struct scatterlist *sg = NULL;
// we're working in LBA mode. according to the ATA spec,
// we can support up to 28-bit addressing. I don't know if Datafab
// supports beyond 24-bit addressing. It's kind of hard to test
// since it requires > 8GB CF card.
//
if (sectors > 0x0FFFFFFF)
return USB_STOR_TRANSPORT_ERROR;
if (info->lun == -1) {
result = datafab_determine_lun(us, info);
if (result != USB_STOR_TRANSPORT_GOOD)
return result;
}
totallen = sectors * info->ssize;
// Since we don't write more than 64 KB at a time, we have to create
// a bounce buffer and move the data a piece at a time between the
// bounce buffer and the actual transfer buffer.
alloclen = min(totallen, 65536u);
buffer = kmalloc(alloclen, GFP_NOIO);
if (buffer == NULL)
return USB_STOR_TRANSPORT_ERROR;
do {
// loop, never allocate or transfer more than 64k at once
// (min(128k, 255*info->ssize) is the real limit)
len = min(totallen, alloclen);
thistime = (len / info->ssize) & 0xff;
// Get the data from the transfer buffer
usb_stor_access_xfer_buf(buffer, len, us->srb,
&sg, &sg_offset, FROM_XFER_BUF);
command[0] = 0;
command[1] = thistime;
command[2] = sector & 0xFF;
command[3] = (sector >> 8) & 0xFF;
command[4] = (sector >> 16) & 0xFF;
command[5] = 0xE0 + (info->lun << 4);
command[5] |= (sector >> 24) & 0x0F;
command[6] = 0x30;
command[7] = 0x02;
// send the command
result = datafab_bulk_write(us, command, 8);
if (result != USB_STOR_XFER_GOOD)
goto leave;
// send the data
result = datafab_bulk_write(us, buffer, len);
if (result != USB_STOR_XFER_GOOD)
goto leave;
// read the result
result = datafab_bulk_read(us, reply, 2);
if (result != USB_STOR_XFER_GOOD)
goto leave;
if (reply[0] != 0x50 && reply[1] != 0) {
usb_stor_dbg(us, "Gah! write return code: %02x %02x\n",
reply[0], reply[1]);
goto leave;
}
sector += thistime;
totallen -= len;
} while (totallen > 0);
kfree(buffer);
return USB_STOR_TRANSPORT_GOOD;
leave:
kfree(buffer);
return USB_STOR_TRANSPORT_ERROR;
}
static int datafab_determine_lun(struct us_data *us,
struct datafab_info *info)
{
// Dual-slot readers can be thought of as dual-LUN devices.
// We need to determine which card slot is being used.
// We'll send an IDENTIFY DEVICE command and see which LUN responds...
//
// There might be a better way of doing this?
static unsigned char scommand[8] = { 0, 1, 0, 0, 0, 0xa0, 0xec, 1 };
unsigned char *command = us->iobuf;
unsigned char *buf;
int count = 0, rc;
if (!info)
return USB_STOR_TRANSPORT_ERROR;
memcpy(command, scommand, 8);
buf = kmalloc(512, GFP_NOIO);
if (!buf)
return USB_STOR_TRANSPORT_ERROR;
usb_stor_dbg(us, "locating...\n");
// we'll try 3 times before giving up...
//
while (count++ < 3) {
command[5] = 0xa0;
rc = datafab_bulk_write(us, command, 8);
if (rc != USB_STOR_XFER_GOOD) {
rc = USB_STOR_TRANSPORT_ERROR;
goto leave;
}
rc = datafab_bulk_read(us, buf, 512);
if (rc == USB_STOR_XFER_GOOD) {
info->lun = 0;
rc = USB_STOR_TRANSPORT_GOOD;
goto leave;
}
command[5] = 0xb0;
rc = datafab_bulk_write(us, command, 8);
if (rc != USB_STOR_XFER_GOOD) {
rc = USB_STOR_TRANSPORT_ERROR;
goto leave;
}
rc = datafab_bulk_read(us, buf, 512);
if (rc == USB_STOR_XFER_GOOD) {
info->lun = 1;
rc = USB_STOR_TRANSPORT_GOOD;
goto leave;
}
msleep(20);
}
rc = USB_STOR_TRANSPORT_ERROR;
leave:
kfree(buf);
return rc;
}
static int datafab_id_device(struct us_data *us,
struct datafab_info *info)
{
// this is a variation of the ATA "IDENTIFY DEVICE" command...according
// to the ATA spec, 'Sector Count' isn't used but the Windows driver
// sets this bit so we do too...
//
static unsigned char scommand[8] = { 0, 1, 0, 0, 0, 0xa0, 0xec, 1 };
unsigned char *command = us->iobuf;
unsigned char *reply;
int rc;
if (!info)
return USB_STOR_TRANSPORT_ERROR;
if (info->lun == -1) {
rc = datafab_determine_lun(us, info);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
}
memcpy(command, scommand, 8);
reply = kmalloc(512, GFP_NOIO);
if (!reply)
return USB_STOR_TRANSPORT_ERROR;
command[5] += (info->lun << 4);
rc = datafab_bulk_write(us, command, 8);
if (rc != USB_STOR_XFER_GOOD) {
rc = USB_STOR_TRANSPORT_ERROR;
goto leave;
}
// we'll go ahead and extract the media capacity while we're here...
//
rc = datafab_bulk_read(us, reply, 512);
if (rc == USB_STOR_XFER_GOOD) {
// capacity is at word offset 57-58
//
info->sectors = ((u32)(reply[117]) << 24) |
((u32)(reply[116]) << 16) |
((u32)(reply[115]) << 8) |
((u32)(reply[114]) );
rc = USB_STOR_TRANSPORT_GOOD;
goto leave;
}
rc = USB_STOR_TRANSPORT_ERROR;
leave:
kfree(reply);
return rc;
}
static int datafab_handle_mode_sense(struct us_data *us,
struct scsi_cmnd * srb,
int sense_6)
{
static unsigned char rw_err_page[12] = {
0x1, 0xA, 0x21, 1, 0, 0, 0, 0, 1, 0, 0, 0
};
static unsigned char cache_page[12] = {
0x8, 0xA, 0x1, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
static unsigned char rbac_page[12] = {
0x1B, 0xA, 0, 0x81, 0, 0, 0, 0, 0, 0, 0, 0
};
static unsigned char timer_page[8] = {
0x1C, 0x6, 0, 0, 0, 0
};
unsigned char pc, page_code;
unsigned int i = 0;
struct datafab_info *info = (struct datafab_info *) (us->extra);
unsigned char *ptr = us->iobuf;
// most of this stuff is just a hack to get things working. the
// datafab reader doesn't present a SCSI interface so we
// fudge the SCSI commands...
//
pc = srb->cmnd[2] >> 6;
page_code = srb->cmnd[2] & 0x3F;
switch (pc) {
case 0x0:
usb_stor_dbg(us, "Current values\n");
break;
case 0x1:
usb_stor_dbg(us, "Changeable values\n");
break;
case 0x2:
usb_stor_dbg(us, "Default values\n");
break;
case 0x3:
usb_stor_dbg(us, "Saves values\n");
break;
}
memset(ptr, 0, 8);
if (sense_6) {
ptr[2] = 0x00; // WP enable: 0x80
i = 4;
} else {
ptr[3] = 0x00; // WP enable: 0x80
i = 8;
}
switch (page_code) {
default:
// vendor-specific mode
info->sense_key = 0x05;
info->sense_asc = 0x24;
info->sense_ascq = 0x00;
return USB_STOR_TRANSPORT_FAILED;
case 0x1:
memcpy(ptr + i, rw_err_page, sizeof(rw_err_page));
i += sizeof(rw_err_page);
break;
case 0x8:
memcpy(ptr + i, cache_page, sizeof(cache_page));
i += sizeof(cache_page);
break;
case 0x1B:
memcpy(ptr + i, rbac_page, sizeof(rbac_page));
i += sizeof(rbac_page);
break;
case 0x1C:
memcpy(ptr + i, timer_page, sizeof(timer_page));
i += sizeof(timer_page);
break;
case 0x3F: // retrieve all pages
memcpy(ptr + i, timer_page, sizeof(timer_page));
i += sizeof(timer_page);
memcpy(ptr + i, rbac_page, sizeof(rbac_page));
i += sizeof(rbac_page);
memcpy(ptr + i, cache_page, sizeof(cache_page));
i += sizeof(cache_page);
memcpy(ptr + i, rw_err_page, sizeof(rw_err_page));
i += sizeof(rw_err_page);
break;
}
if (sense_6)
ptr[0] = i - 1;
else
((__be16 *) ptr)[0] = cpu_to_be16(i - 2);
usb_stor_set_xfer_buf(ptr, i, srb);
return USB_STOR_TRANSPORT_GOOD;
}
static void datafab_info_destructor(void *extra)
{
// this routine is a placeholder...
// currently, we don't allocate any extra memory so we're okay
}
// Transport for the Datafab MDCFE-B
//
static int datafab_transport(struct scsi_cmnd *srb, struct us_data *us)
{
struct datafab_info *info;
int rc;
unsigned long block, blocks;
unsigned char *ptr = us->iobuf;
static unsigned char inquiry_reply[8] = {
0x00, 0x80, 0x00, 0x01, 0x1F, 0x00, 0x00, 0x00
};
if (!us->extra) {
us->extra = kzalloc(sizeof(struct datafab_info), GFP_NOIO);
if (!us->extra)
return USB_STOR_TRANSPORT_ERROR;
us->extra_destructor = datafab_info_destructor;
((struct datafab_info *)us->extra)->lun = -1;
}
info = (struct datafab_info *) (us->extra);
if (srb->cmnd[0] == INQUIRY) {
usb_stor_dbg(us, "INQUIRY - Returning bogus response\n");
memcpy(ptr, inquiry_reply, sizeof(inquiry_reply));
fill_inquiry_response(us, ptr, 36);
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == READ_CAPACITY) {
info->ssize = 0x200; // hard coded 512 byte sectors as per ATA spec
rc = datafab_id_device(us, info);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
usb_stor_dbg(us, "READ_CAPACITY: %ld sectors, %ld bytes per sector\n",
info->sectors, info->ssize);
// build the reply
// we need the last sector, not the number of sectors
((__be32 *) ptr)[0] = cpu_to_be32(info->sectors - 1);
((__be32 *) ptr)[1] = cpu_to_be32(info->ssize);
usb_stor_set_xfer_buf(ptr, 8, srb);
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == MODE_SELECT_10) {
usb_stor_dbg(us, "Gah! MODE_SELECT_10\n");
return USB_STOR_TRANSPORT_ERROR;
}
// don't bother implementing READ_6 or WRITE_6.
//
if (srb->cmnd[0] == READ_10) {
block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5]));
blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8]));
usb_stor_dbg(us, "READ_10: read block 0x%04lx count %ld\n",
block, blocks);
return datafab_read_data(us, info, block, blocks);
}
if (srb->cmnd[0] == READ_12) {
// we'll probably never see a READ_12 but we'll do it anyway...
//
block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5]));
blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) |
((u32)(srb->cmnd[8]) << 8) | ((u32)(srb->cmnd[9]));
usb_stor_dbg(us, "READ_12: read block 0x%04lx count %ld\n",
block, blocks);
return datafab_read_data(us, info, block, blocks);
}
if (srb->cmnd[0] == WRITE_10) {
block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5]));
blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8]));
usb_stor_dbg(us, "WRITE_10: write block 0x%04lx count %ld\n",
block, blocks);
return datafab_write_data(us, info, block, blocks);
}
if (srb->cmnd[0] == WRITE_12) {
// we'll probably never see a WRITE_12 but we'll do it anyway...
//
block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5]));
blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) |
((u32)(srb->cmnd[8]) << 8) | ((u32)(srb->cmnd[9]));
usb_stor_dbg(us, "WRITE_12: write block 0x%04lx count %ld\n",
block, blocks);
return datafab_write_data(us, info, block, blocks);
}
if (srb->cmnd[0] == TEST_UNIT_READY) {
usb_stor_dbg(us, "TEST_UNIT_READY\n");
return datafab_id_device(us, info);
}
if (srb->cmnd[0] == REQUEST_SENSE) {
usb_stor_dbg(us, "REQUEST_SENSE - Returning faked response\n");
// this response is pretty bogus right now. eventually if necessary
// we can set the correct sense data. so far though it hasn't been
// necessary
//
memset(ptr, 0, 18);
ptr[0] = 0xF0;
ptr[2] = info->sense_key;
ptr[7] = 11;
ptr[12] = info->sense_asc;
ptr[13] = info->sense_ascq;
usb_stor_set_xfer_buf(ptr, 18, srb);
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == MODE_SENSE) {
usb_stor_dbg(us, "MODE_SENSE_6 detected\n");
return datafab_handle_mode_sense(us, srb, 1);
}
if (srb->cmnd[0] == MODE_SENSE_10) {
usb_stor_dbg(us, "MODE_SENSE_10 detected\n");
return datafab_handle_mode_sense(us, srb, 0);
}
if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) {
/*
* sure. whatever. not like we can stop the user from
* popping the media out of the device (no locking doors, etc)
*/
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == START_STOP) {
/*
* this is used by sd.c'check_scsidisk_media_change to detect
* media change
*/
usb_stor_dbg(us, "START_STOP\n");
/*
* the first datafab_id_device after a media change returns
* an error (determined experimentally)
*/
rc = datafab_id_device(us, info);
if (rc == USB_STOR_TRANSPORT_GOOD) {
info->sense_key = NO_SENSE;
srb->result = SUCCESS;
} else {
info->sense_key = UNIT_ATTENTION;
srb->result = SAM_STAT_CHECK_CONDITION;
}
return rc;
}
usb_stor_dbg(us, "Gah! Unknown command: %d (0x%x)\n",
srb->cmnd[0], srb->cmnd[0]);
info->sense_key = 0x05;
info->sense_asc = 0x20;
info->sense_ascq = 0x00;
return USB_STOR_TRANSPORT_FAILED;
}
static struct scsi_host_template datafab_host_template;
static int datafab_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct us_data *us;
int result;
result = usb_stor_probe1(&us, intf, id,
(id - datafab_usb_ids) + datafab_unusual_dev_list,
&datafab_host_template);
if (result)
return result;
us->transport_name = "Datafab Bulk-Only";
us->transport = datafab_transport;
us->transport_reset = usb_stor_Bulk_reset;
us->max_lun = 1;
result = usb_stor_probe2(us);
return result;
}
static struct usb_driver datafab_driver = {
.name = DRV_NAME,
.probe = datafab_probe,
.disconnect = usb_stor_disconnect,
.suspend = usb_stor_suspend,
.resume = usb_stor_resume,
.reset_resume = usb_stor_reset_resume,
.pre_reset = usb_stor_pre_reset,
.post_reset = usb_stor_post_reset,
.id_table = datafab_usb_ids,
.soft_unbind = 1,
.no_dynamic_id = 1,
};
module_usb_stor_driver(datafab_driver, datafab_host_template, DRV_NAME);
| linux-master | drivers/usb/storage/datafab.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
*
* Current development and maintenance by:
* (c) 1999-2003 Matthew Dharm ([email protected])
*
* Developed with the assistance of:
* (c) 2000 David L. Brown, Jr. ([email protected])
* (c) 2003-2009 Alan Stern ([email protected])
*
* Initial work by:
* (c) 1999 Michael Gee ([email protected])
*
* usb_device_id support by Adam J. Richter ([email protected]):
* (c) 2000 Yggdrasil Computing, Inc.
*
* This driver is based on the 'USB Mass Storage Class' document. This
* describes in detail the protocol used to communicate with such
* devices. Clearly, the designers had SCSI and ATAPI commands in
* mind when they created this document. The commands are all very
* similar to commands in the SCSI-II and ATAPI specifications.
*
* It is important to note that in a number of cases this class
* exhibits class-specific exemptions from the USB specification.
* Notably the usage of NAK, STALL and ACK differs from the norm, in
* that they are used to communicate wait, failed and OK on commands.
*
* Also, for certain devices, the interrupt endpoint is used to convey
* status of a command.
*/
#ifdef CONFIG_USB_STORAGE_DEBUG
#define DEBUG
#endif
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/utsname.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include "usb.h"
#include "scsiglue.h"
#include "transport.h"
#include "protocol.h"
#include "debug.h"
#include "initializers.h"
#include "sierra_ms.h"
#include "option_ms.h"
#if IS_ENABLED(CONFIG_USB_UAS)
#include "uas-detect.h"
#endif
#define DRV_NAME "usb-storage"
/* Some informational data */
MODULE_AUTHOR("Matthew Dharm <[email protected]>");
MODULE_DESCRIPTION("USB Mass Storage driver for Linux");
MODULE_LICENSE("GPL");
static unsigned int delay_use = 1;
module_param(delay_use, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(delay_use, "seconds to delay before using a new device");
static char quirks[128];
module_param_string(quirks, quirks, sizeof(quirks), S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks");
/*
* The entries in this table correspond, line for line,
* with the entries in usb_storage_usb_ids[], defined in usual-tables.c.
*/
/*
*The vendor name should be kept at eight characters or less, and
* the product name should be kept at 16 characters or less. If a device
* has the US_FL_FIX_INQUIRY flag, then the vendor and product names
* normally generated by a device through the INQUIRY response will be
* taken from this list, and this is the reason for the above size
* restriction. However, if the flag is not present, then you
* are free to use as many characters as you like.
*/
#define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \
vendor_name, product_name, use_protocol, use_transport, \
init_function, Flags) \
{ \
.vendorName = vendor_name, \
.productName = product_name, \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
.initFunction = init_function, \
}
#define COMPLIANT_DEV UNUSUAL_DEV
#define USUAL_DEV(use_protocol, use_transport) \
{ \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
}
#define UNUSUAL_VENDOR_INTF(idVendor, cl, sc, pr, \
vendor_name, product_name, use_protocol, use_transport, \
init_function, Flags) \
{ \
.vendorName = vendor_name, \
.productName = product_name, \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
.initFunction = init_function, \
}
static const struct us_unusual_dev us_unusual_dev_list[] = {
# include "unusual_devs.h"
{ } /* Terminating entry */
};
static const struct us_unusual_dev for_dynamic_ids =
USUAL_DEV(USB_SC_SCSI, USB_PR_BULK);
#undef UNUSUAL_DEV
#undef COMPLIANT_DEV
#undef USUAL_DEV
#undef UNUSUAL_VENDOR_INTF
#ifdef CONFIG_LOCKDEP
static struct lock_class_key us_interface_key[USB_MAXINTERFACES];
static void us_set_lock_class(struct mutex *mutex,
struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_host_config *config = udev->actconfig;
int i;
for (i = 0; i < config->desc.bNumInterfaces; i++) {
if (config->interface[i] == intf)
break;
}
BUG_ON(i == config->desc.bNumInterfaces);
lockdep_set_class(mutex, &us_interface_key[i]);
}
#else
static void us_set_lock_class(struct mutex *mutex,
struct usb_interface *intf)
{
}
#endif
#ifdef CONFIG_PM /* Minimal support for suspend and resume */
int usb_stor_suspend(struct usb_interface *iface, pm_message_t message)
{
struct us_data *us = usb_get_intfdata(iface);
/* Wait until no command is running */
mutex_lock(&us->dev_mutex);
if (us->suspend_resume_hook)
(us->suspend_resume_hook)(us, US_SUSPEND);
/*
* When runtime PM is working, we'll set a flag to indicate
* whether we should autoresume when a SCSI request arrives.
*/
mutex_unlock(&us->dev_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(usb_stor_suspend);
int usb_stor_resume(struct usb_interface *iface)
{
struct us_data *us = usb_get_intfdata(iface);
mutex_lock(&us->dev_mutex);
if (us->suspend_resume_hook)
(us->suspend_resume_hook)(us, US_RESUME);
mutex_unlock(&us->dev_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(usb_stor_resume);
int usb_stor_reset_resume(struct usb_interface *iface)
{
struct us_data *us = usb_get_intfdata(iface);
/* Report the reset to the SCSI core */
usb_stor_report_bus_reset(us);
/*
* If any of the subdrivers implemented a reinitialization scheme,
* this is where the callback would be invoked.
*/
return 0;
}
EXPORT_SYMBOL_GPL(usb_stor_reset_resume);
#endif /* CONFIG_PM */
/*
* The next two routines get called just before and just after
* a USB port reset, whether from this driver or a different one.
*/
int usb_stor_pre_reset(struct usb_interface *iface)
{
struct us_data *us = usb_get_intfdata(iface);
/* Make sure no command runs during the reset */
mutex_lock(&us->dev_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(usb_stor_pre_reset);
int usb_stor_post_reset(struct usb_interface *iface)
{
struct us_data *us = usb_get_intfdata(iface);
/* Report the reset to the SCSI core */
usb_stor_report_bus_reset(us);
/*
* If any of the subdrivers implemented a reinitialization scheme,
* this is where the callback would be invoked.
*/
mutex_unlock(&us->dev_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(usb_stor_post_reset);
/*
* fill_inquiry_response takes an unsigned char array (which must
* be at least 36 characters) and populates the vendor name,
* product name, and revision fields. Then the array is copied
* into the SCSI command's response buffer (oddly enough
* called request_buffer). data_len contains the length of the
* data array, which again must be at least 36.
*/
void fill_inquiry_response(struct us_data *us, unsigned char *data,
unsigned int data_len)
{
if (data_len < 36) /* You lose. */
return;
memset(data+8, ' ', 28);
if (data[0]&0x20) { /*
* USB device currently not connected. Return
* peripheral qualifier 001b ("...however, the
* physical device is not currently connected
* to this logical unit") and leave vendor and
* product identification empty. ("If the target
* does store some of the INQUIRY data on the
* device, it may return zeros or ASCII spaces
* (20h) in those fields until the data is
* available from the device.").
*/
} else {
u16 bcdDevice = le16_to_cpu(us->pusb_dev->descriptor.bcdDevice);
int n;
n = strlen(us->unusual_dev->vendorName);
memcpy(data+8, us->unusual_dev->vendorName, min(8, n));
n = strlen(us->unusual_dev->productName);
memcpy(data+16, us->unusual_dev->productName, min(16, n));
data[32] = 0x30 + ((bcdDevice>>12) & 0x0F);
data[33] = 0x30 + ((bcdDevice>>8) & 0x0F);
data[34] = 0x30 + ((bcdDevice>>4) & 0x0F);
data[35] = 0x30 + ((bcdDevice) & 0x0F);
}
usb_stor_set_xfer_buf(data, data_len, us->srb);
}
EXPORT_SYMBOL_GPL(fill_inquiry_response);
static int usb_stor_control_thread(void * __us)
{
struct us_data *us = (struct us_data *)__us;
struct Scsi_Host *host = us_to_host(us);
struct scsi_cmnd *srb;
for (;;) {
usb_stor_dbg(us, "*** thread sleeping\n");
if (wait_for_completion_interruptible(&us->cmnd_ready))
break;
usb_stor_dbg(us, "*** thread awakened\n");
/* lock the device pointers */
mutex_lock(&(us->dev_mutex));
/* lock access to the state */
scsi_lock(host);
/* When we are called with no command pending, we're done */
srb = us->srb;
if (srb == NULL) {
scsi_unlock(host);
mutex_unlock(&us->dev_mutex);
usb_stor_dbg(us, "-- exiting\n");
break;
}
/* has the command timed out *already* ? */
if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
srb->result = DID_ABORT << 16;
goto SkipForAbort;
}
scsi_unlock(host);
/*
* reject the command if the direction indicator
* is UNKNOWN
*/
if (srb->sc_data_direction == DMA_BIDIRECTIONAL) {
usb_stor_dbg(us, "UNKNOWN data direction\n");
srb->result = DID_ERROR << 16;
}
/*
* reject if target != 0 or if LUN is higher than
* the maximum known LUN
*/
else if (srb->device->id &&
!(us->fflags & US_FL_SCM_MULT_TARG)) {
usb_stor_dbg(us, "Bad target number (%d:%llu)\n",
srb->device->id,
srb->device->lun);
srb->result = DID_BAD_TARGET << 16;
}
else if (srb->device->lun > us->max_lun) {
usb_stor_dbg(us, "Bad LUN (%d:%llu)\n",
srb->device->id,
srb->device->lun);
srb->result = DID_BAD_TARGET << 16;
}
/*
* Handle those devices which need us to fake
* their inquiry data
*/
else if ((srb->cmnd[0] == INQUIRY) &&
(us->fflags & US_FL_FIX_INQUIRY)) {
unsigned char data_ptr[36] = {
0x00, 0x80, 0x02, 0x02,
0x1F, 0x00, 0x00, 0x00};
usb_stor_dbg(us, "Faking INQUIRY command\n");
fill_inquiry_response(us, data_ptr, 36);
srb->result = SAM_STAT_GOOD;
}
/* we've got a command, let's do it! */
else {
US_DEBUG(usb_stor_show_command(us, srb));
us->proto_handler(srb, us);
usb_mark_last_busy(us->pusb_dev);
}
/* lock access to the state */
scsi_lock(host);
/* was the command aborted? */
if (srb->result == DID_ABORT << 16) {
SkipForAbort:
usb_stor_dbg(us, "scsi command aborted\n");
srb = NULL; /* Don't call scsi_done() */
}
/*
* If an abort request was received we need to signal that
* the abort has finished. The proper test for this is
* the TIMED_OUT flag, not srb->result == DID_ABORT, because
* the timeout might have occurred after the command had
* already completed with a different result code.
*/
if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
complete(&(us->notify));
/* Allow USB transfers to resume */
clear_bit(US_FLIDX_ABORTING, &us->dflags);
clear_bit(US_FLIDX_TIMED_OUT, &us->dflags);
}
/* finished working on this command */
us->srb = NULL;
scsi_unlock(host);
/* unlock the device pointers */
mutex_unlock(&us->dev_mutex);
/* now that the locks are released, notify the SCSI core */
if (srb) {
usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",
srb->result);
scsi_done_direct(srb);
}
} /* for (;;) */
/* Wait until we are told to stop */
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop())
break;
schedule();
}
__set_current_state(TASK_RUNNING);
return 0;
}
/***********************************************************************
* Device probing and disconnecting
***********************************************************************/
/* Associate our private data with the USB device */
static int associate_dev(struct us_data *us, struct usb_interface *intf)
{
/* Fill in the device-related fields */
us->pusb_dev = interface_to_usbdev(intf);
us->pusb_intf = intf;
us->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
usb_stor_dbg(us, "Vendor: 0x%04x, Product: 0x%04x, Revision: 0x%04x\n",
le16_to_cpu(us->pusb_dev->descriptor.idVendor),
le16_to_cpu(us->pusb_dev->descriptor.idProduct),
le16_to_cpu(us->pusb_dev->descriptor.bcdDevice));
usb_stor_dbg(us, "Interface Subclass: 0x%02x, Protocol: 0x%02x\n",
intf->cur_altsetting->desc.bInterfaceSubClass,
intf->cur_altsetting->desc.bInterfaceProtocol);
/* Store our private data in the interface */
usb_set_intfdata(intf, us);
/* Allocate the control/setup and DMA-mapped buffers */
us->cr = kmalloc(sizeof(*us->cr), GFP_KERNEL);
if (!us->cr)
return -ENOMEM;
us->iobuf = usb_alloc_coherent(us->pusb_dev, US_IOBUF_SIZE,
GFP_KERNEL, &us->iobuf_dma);
if (!us->iobuf) {
usb_stor_dbg(us, "I/O buffer allocation failed\n");
return -ENOMEM;
}
return 0;
}
/* Works only for digits and letters, but small and fast */
#define TOLOWER(x) ((x) | 0x20)
/* Adjust device flags based on the "quirks=" module parameter */
void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
{
char *p;
u16 vid = le16_to_cpu(udev->descriptor.idVendor);
u16 pid = le16_to_cpu(udev->descriptor.idProduct);
unsigned f = 0;
unsigned int mask = (US_FL_SANE_SENSE | US_FL_BAD_SENSE |
US_FL_FIX_CAPACITY | US_FL_IGNORE_UAS |
US_FL_CAPACITY_HEURISTICS | US_FL_IGNORE_DEVICE |
US_FL_NOT_LOCKABLE | US_FL_MAX_SECTORS_64 |
US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE |
US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT |
US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
US_FL_MAX_SECTORS_240 | US_FL_NO_REPORT_LUNS |
US_FL_ALWAYS_SYNC);
p = quirks;
while (*p) {
/* Each entry consists of VID:PID:flags */
if (vid == simple_strtoul(p, &p, 16) &&
*p == ':' &&
pid == simple_strtoul(p+1, &p, 16) &&
*p == ':')
break;
/* Move forward to the next entry */
while (*p) {
if (*p++ == ',')
break;
}
}
if (!*p) /* No match */
return;
/* Collect the flags */
while (*++p && *p != ',') {
switch (TOLOWER(*p)) {
case 'a':
f |= US_FL_SANE_SENSE;
break;
case 'b':
f |= US_FL_BAD_SENSE;
break;
case 'c':
f |= US_FL_FIX_CAPACITY;
break;
case 'd':
f |= US_FL_NO_READ_DISC_INFO;
break;
case 'e':
f |= US_FL_NO_READ_CAPACITY_16;
break;
case 'f':
f |= US_FL_NO_REPORT_OPCODES;
break;
case 'g':
f |= US_FL_MAX_SECTORS_240;
break;
case 'h':
f |= US_FL_CAPACITY_HEURISTICS;
break;
case 'i':
f |= US_FL_IGNORE_DEVICE;
break;
case 'j':
f |= US_FL_NO_REPORT_LUNS;
break;
case 'k':
f |= US_FL_NO_SAME;
break;
case 'l':
f |= US_FL_NOT_LOCKABLE;
break;
case 'm':
f |= US_FL_MAX_SECTORS_64;
break;
case 'n':
f |= US_FL_INITIAL_READ10;
break;
case 'o':
f |= US_FL_CAPACITY_OK;
break;
case 'p':
f |= US_FL_WRITE_CACHE;
break;
case 'r':
f |= US_FL_IGNORE_RESIDUE;
break;
case 's':
f |= US_FL_SINGLE_LUN;
break;
case 't':
f |= US_FL_NO_ATA_1X;
break;
case 'u':
f |= US_FL_IGNORE_UAS;
break;
case 'w':
f |= US_FL_NO_WP_DETECT;
break;
case 'y':
f |= US_FL_ALWAYS_SYNC;
break;
/* Ignore unrecognized flag characters */
}
}
*fflags = (*fflags & ~mask) | f;
}
EXPORT_SYMBOL_GPL(usb_stor_adjust_quirks);
/* Get the unusual_devs entries and the string descriptors */
static int get_device_info(struct us_data *us, const struct usb_device_id *id,
const struct us_unusual_dev *unusual_dev)
{
struct usb_device *dev = us->pusb_dev;
struct usb_interface_descriptor *idesc =
&us->pusb_intf->cur_altsetting->desc;
struct device *pdev = &us->pusb_intf->dev;
/* Store the entries */
us->unusual_dev = unusual_dev;
us->subclass = (unusual_dev->useProtocol == USB_SC_DEVICE) ?
idesc->bInterfaceSubClass :
unusual_dev->useProtocol;
us->protocol = (unusual_dev->useTransport == USB_PR_DEVICE) ?
idesc->bInterfaceProtocol :
unusual_dev->useTransport;
us->fflags = id->driver_info;
usb_stor_adjust_quirks(us->pusb_dev, &us->fflags);
if (us->fflags & US_FL_IGNORE_DEVICE) {
dev_info(pdev, "device ignored\n");
return -ENODEV;
}
/*
* This flag is only needed when we're in high-speed, so let's
* disable it if we're in full-speed
*/
if (dev->speed != USB_SPEED_HIGH)
us->fflags &= ~US_FL_GO_SLOW;
if (us->fflags)
dev_info(pdev, "Quirks match for vid %04x pid %04x: %lx\n",
le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct),
us->fflags);
/*
* Log a message if a non-generic unusual_dev entry contains an
* unnecessary subclass or protocol override. This may stimulate
* reports from users that will help us remove unneeded entries
* from the unusual_devs.h table.
*/
if (id->idVendor || id->idProduct) {
static const char *msgs[3] = {
"an unneeded SubClass entry",
"an unneeded Protocol entry",
"unneeded SubClass and Protocol entries"};
struct usb_device_descriptor *ddesc = &dev->descriptor;
int msg = -1;
if (unusual_dev->useProtocol != USB_SC_DEVICE &&
us->subclass == idesc->bInterfaceSubClass)
msg += 1;
if (unusual_dev->useTransport != USB_PR_DEVICE &&
us->protocol == idesc->bInterfaceProtocol)
msg += 2;
if (msg >= 0 && !(us->fflags & US_FL_NEED_OVERRIDE))
dev_notice(pdev, "This device "
"(%04x,%04x,%04x S %02x P %02x)"
" has %s in unusual_devs.h (kernel"
" %s)\n"
" Please send a copy of this message to "
"<[email protected]> and "
"<[email protected]>\n",
le16_to_cpu(ddesc->idVendor),
le16_to_cpu(ddesc->idProduct),
le16_to_cpu(ddesc->bcdDevice),
idesc->bInterfaceSubClass,
idesc->bInterfaceProtocol,
msgs[msg],
utsname()->release);
}
return 0;
}
/* Get the transport settings */
static void get_transport(struct us_data *us)
{
switch (us->protocol) {
case USB_PR_CB:
us->transport_name = "Control/Bulk";
us->transport = usb_stor_CB_transport;
us->transport_reset = usb_stor_CB_reset;
us->max_lun = 7;
break;
case USB_PR_CBI:
us->transport_name = "Control/Bulk/Interrupt";
us->transport = usb_stor_CB_transport;
us->transport_reset = usb_stor_CB_reset;
us->max_lun = 7;
break;
case USB_PR_BULK:
us->transport_name = "Bulk";
us->transport = usb_stor_Bulk_transport;
us->transport_reset = usb_stor_Bulk_reset;
break;
}
}
/* Get the protocol settings */
static void get_protocol(struct us_data *us)
{
switch (us->subclass) {
case USB_SC_RBC:
us->protocol_name = "Reduced Block Commands (RBC)";
us->proto_handler = usb_stor_transparent_scsi_command;
break;
case USB_SC_8020:
us->protocol_name = "8020i";
us->proto_handler = usb_stor_pad12_command;
us->max_lun = 0;
break;
case USB_SC_QIC:
us->protocol_name = "QIC-157";
us->proto_handler = usb_stor_pad12_command;
us->max_lun = 0;
break;
case USB_SC_8070:
us->protocol_name = "8070i";
us->proto_handler = usb_stor_pad12_command;
us->max_lun = 0;
break;
case USB_SC_SCSI:
us->protocol_name = "Transparent SCSI";
us->proto_handler = usb_stor_transparent_scsi_command;
break;
case USB_SC_UFI:
us->protocol_name = "Uniform Floppy Interface (UFI)";
us->proto_handler = usb_stor_ufi_command;
break;
}
}
/* Get the pipe settings */
static int get_pipes(struct us_data *us)
{
struct usb_host_interface *alt = us->pusb_intf->cur_altsetting;
struct usb_endpoint_descriptor *ep_in;
struct usb_endpoint_descriptor *ep_out;
struct usb_endpoint_descriptor *ep_int;
int res;
/*
* Find the first endpoint of each type we need.
* We are expecting a minimum of 2 endpoints - in and out (bulk).
* An optional interrupt-in is OK (necessary for CBI protocol).
* We will ignore any others.
*/
res = usb_find_common_endpoints(alt, &ep_in, &ep_out, NULL, NULL);
if (res) {
usb_stor_dbg(us, "bulk endpoints not found\n");
return res;
}
res = usb_find_int_in_endpoint(alt, &ep_int);
if (res && us->protocol == USB_PR_CBI) {
usb_stor_dbg(us, "interrupt endpoint not found\n");
return res;
}
/* Calculate and store the pipe values */
us->send_ctrl_pipe = usb_sndctrlpipe(us->pusb_dev, 0);
us->recv_ctrl_pipe = usb_rcvctrlpipe(us->pusb_dev, 0);
us->send_bulk_pipe = usb_sndbulkpipe(us->pusb_dev,
usb_endpoint_num(ep_out));
us->recv_bulk_pipe = usb_rcvbulkpipe(us->pusb_dev,
usb_endpoint_num(ep_in));
if (ep_int) {
us->recv_intr_pipe = usb_rcvintpipe(us->pusb_dev,
usb_endpoint_num(ep_int));
us->ep_bInterval = ep_int->bInterval;
}
return 0;
}
/* Initialize all the dynamic resources we need */
static int usb_stor_acquire_resources(struct us_data *us)
{
int p;
struct task_struct *th;
us->current_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!us->current_urb)
return -ENOMEM;
/*
* Just before we start our control thread, initialize
* the device if it needs initialization
*/
if (us->unusual_dev->initFunction) {
p = us->unusual_dev->initFunction(us);
if (p)
return p;
}
/* Start up our control thread */
th = kthread_run(usb_stor_control_thread, us, "usb-storage");
if (IS_ERR(th)) {
dev_warn(&us->pusb_intf->dev,
"Unable to start control thread\n");
return PTR_ERR(th);
}
us->ctl_thread = th;
return 0;
}
/* Release all our dynamic resources */
static void usb_stor_release_resources(struct us_data *us)
{
/*
* Tell the control thread to exit. The SCSI host must
* already have been removed and the DISCONNECTING flag set
* so that we won't accept any more commands.
*/
usb_stor_dbg(us, "-- sending exit command to thread\n");
complete(&us->cmnd_ready);
if (us->ctl_thread)
kthread_stop(us->ctl_thread);
/* Call the destructor routine, if it exists */
if (us->extra_destructor) {
usb_stor_dbg(us, "-- calling extra_destructor()\n");
us->extra_destructor(us->extra);
}
/* Free the extra data and the URB */
kfree(us->extra);
usb_free_urb(us->current_urb);
}
/* Dissociate from the USB device */
static void dissociate_dev(struct us_data *us)
{
/* Free the buffers */
kfree(us->cr);
usb_free_coherent(us->pusb_dev, US_IOBUF_SIZE, us->iobuf, us->iobuf_dma);
/* Remove our private data from the interface */
usb_set_intfdata(us->pusb_intf, NULL);
}
/*
* First stage of disconnect processing: stop SCSI scanning,
* remove the host, and stop accepting new commands
*/
static void quiesce_and_remove_host(struct us_data *us)
{
struct Scsi_Host *host = us_to_host(us);
/* If the device is really gone, cut short reset delays */
if (us->pusb_dev->state == USB_STATE_NOTATTACHED) {
set_bit(US_FLIDX_DISCONNECTING, &us->dflags);
wake_up(&us->delay_wait);
}
/*
* Prevent SCSI scanning (if it hasn't started yet)
* or wait for the SCSI-scanning routine to stop.
*/
cancel_delayed_work_sync(&us->scan_dwork);
/* Balance autopm calls if scanning was cancelled */
if (test_bit(US_FLIDX_SCAN_PENDING, &us->dflags))
usb_autopm_put_interface_no_suspend(us->pusb_intf);
/*
* Removing the host will perform an orderly shutdown: caches
* synchronized, disks spun down, etc.
*/
scsi_remove_host(host);
/*
* Prevent any new commands from being accepted and cut short
* reset delays.
*/
scsi_lock(host);
set_bit(US_FLIDX_DISCONNECTING, &us->dflags);
scsi_unlock(host);
wake_up(&us->delay_wait);
}
/* Second stage of disconnect processing: deallocate all resources */
static void release_everything(struct us_data *us)
{
usb_stor_release_resources(us);
dissociate_dev(us);
/*
* Drop our reference to the host; the SCSI core will free it
* (and "us" along with it) when the refcount becomes 0.
*/
scsi_host_put(us_to_host(us));
}
/* Delayed-work routine to carry out SCSI-device scanning */
static void usb_stor_scan_dwork(struct work_struct *work)
{
struct us_data *us = container_of(work, struct us_data,
scan_dwork.work);
struct device *dev = &us->pusb_intf->dev;
dev_dbg(dev, "starting scan\n");
/* For bulk-only devices, determine the max LUN value */
if (us->protocol == USB_PR_BULK &&
!(us->fflags & US_FL_SINGLE_LUN) &&
!(us->fflags & US_FL_SCM_MULT_TARG)) {
mutex_lock(&us->dev_mutex);
us->max_lun = usb_stor_Bulk_max_lun(us);
/*
* Allow proper scanning of devices that present more than 8 LUNs
* While not affecting other devices that may need the previous
* behavior
*/
if (us->max_lun >= 8)
us_to_host(us)->max_lun = us->max_lun+1;
mutex_unlock(&us->dev_mutex);
}
scsi_scan_host(us_to_host(us));
dev_dbg(dev, "scan complete\n");
/* Should we unbind if no devices were detected? */
usb_autopm_put_interface(us->pusb_intf);
clear_bit(US_FLIDX_SCAN_PENDING, &us->dflags);
}
static unsigned int usb_stor_sg_tablesize(struct usb_interface *intf)
{
struct usb_device *usb_dev = interface_to_usbdev(intf);
if (usb_dev->bus->sg_tablesize) {
return usb_dev->bus->sg_tablesize;
}
return SG_ALL;
}
/* First part of general USB mass-storage probing */
int usb_stor_probe1(struct us_data **pus,
struct usb_interface *intf,
const struct usb_device_id *id,
const struct us_unusual_dev *unusual_dev,
const struct scsi_host_template *sht)
{
struct Scsi_Host *host;
struct us_data *us;
int result;
dev_info(&intf->dev, "USB Mass Storage device detected\n");
/*
* Ask the SCSI layer to allocate a host structure, with extra
* space at the end for our private us_data structure.
*/
host = scsi_host_alloc(sht, sizeof(*us));
if (!host) {
dev_warn(&intf->dev, "Unable to allocate the scsi host\n");
return -ENOMEM;
}
/*
* Allow 16-byte CDBs and thus > 2TB
*/
host->max_cmd_len = 16;
host->sg_tablesize = usb_stor_sg_tablesize(intf);
*pus = us = host_to_us(host);
mutex_init(&(us->dev_mutex));
us_set_lock_class(&us->dev_mutex, intf);
init_completion(&us->cmnd_ready);
init_completion(&(us->notify));
init_waitqueue_head(&us->delay_wait);
INIT_DELAYED_WORK(&us->scan_dwork, usb_stor_scan_dwork);
/* Associate the us_data structure with the USB device */
result = associate_dev(us, intf);
if (result)
goto BadDevice;
/* Get the unusual_devs entries and the descriptors */
result = get_device_info(us, id, unusual_dev);
if (result)
goto BadDevice;
/* Get standard transport and protocol settings */
get_transport(us);
get_protocol(us);
/*
* Give the caller a chance to fill in specialized transport
* or protocol settings.
*/
return 0;
BadDevice:
usb_stor_dbg(us, "storage_probe() failed\n");
release_everything(us);
return result;
}
EXPORT_SYMBOL_GPL(usb_stor_probe1);
/* Second part of general USB mass-storage probing */
int usb_stor_probe2(struct us_data *us)
{
int result;
struct device *dev = &us->pusb_intf->dev;
/* Make sure the transport and protocol have both been set */
if (!us->transport || !us->proto_handler) {
result = -ENXIO;
goto BadDevice;
}
usb_stor_dbg(us, "Transport: %s\n", us->transport_name);
usb_stor_dbg(us, "Protocol: %s\n", us->protocol_name);
if (us->fflags & US_FL_SCM_MULT_TARG) {
/*
* SCM eUSCSI bridge devices can have different numbers
* of LUNs on different targets; allow all to be probed.
*/
us->max_lun = 7;
/* The eUSCSI itself has ID 7, so avoid scanning that */
us_to_host(us)->this_id = 7;
/* max_id is 8 initially, so no need to set it here */
} else {
/* In the normal case there is only a single target */
us_to_host(us)->max_id = 1;
/*
* Like Windows, we won't store the LUN bits in CDB[1] for
* SCSI-2 devices using the Bulk-Only transport (even though
* this violates the SCSI spec).
*/
if (us->transport == usb_stor_Bulk_transport)
us_to_host(us)->no_scsi2_lun_in_cdb = 1;
}
/* fix for single-lun devices */
if (us->fflags & US_FL_SINGLE_LUN)
us->max_lun = 0;
/* Find the endpoints and calculate pipe values */
result = get_pipes(us);
if (result)
goto BadDevice;
/*
* If the device returns invalid data for the first READ(10)
* command, indicate the command should be retried.
*/
if (us->fflags & US_FL_INITIAL_READ10)
set_bit(US_FLIDX_REDO_READ10, &us->dflags);
/* Acquire all the other resources and add the host */
result = usb_stor_acquire_resources(us);
if (result)
goto BadDevice;
usb_autopm_get_interface_no_resume(us->pusb_intf);
snprintf(us->scsi_name, sizeof(us->scsi_name), "usb-storage %s",
dev_name(&us->pusb_intf->dev));
result = scsi_add_host(us_to_host(us), dev);
if (result) {
dev_warn(dev,
"Unable to add the scsi host\n");
goto HostAddErr;
}
/* Submit the delayed_work for SCSI-device scanning */
set_bit(US_FLIDX_SCAN_PENDING, &us->dflags);
if (delay_use > 0)
dev_dbg(dev, "waiting for device to settle before scanning\n");
queue_delayed_work(system_freezable_wq, &us->scan_dwork,
delay_use * HZ);
return 0;
/* We come here if there are any problems */
HostAddErr:
usb_autopm_put_interface_no_suspend(us->pusb_intf);
BadDevice:
usb_stor_dbg(us, "storage_probe() failed\n");
release_everything(us);
return result;
}
EXPORT_SYMBOL_GPL(usb_stor_probe2);
/* Handle a USB mass-storage disconnect */
void usb_stor_disconnect(struct usb_interface *intf)
{
struct us_data *us = usb_get_intfdata(intf);
quiesce_and_remove_host(us);
release_everything(us);
}
EXPORT_SYMBOL_GPL(usb_stor_disconnect);
static struct scsi_host_template usb_stor_host_template;
/* The main probe routine for standard devices */
static int storage_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
const struct us_unusual_dev *unusual_dev;
struct us_data *us;
int result;
int size;
/* If uas is enabled and this device can do uas then ignore it. */
#if IS_ENABLED(CONFIG_USB_UAS)
if (uas_use_uas_driver(intf, id, NULL))
return -ENXIO;
#endif
/*
* If the device isn't standard (is handled by a subdriver
* module) then don't accept it.
*/
if (usb_usual_ignore_device(intf))
return -ENXIO;
/*
* Call the general probe procedures.
*
* The unusual_dev_list array is parallel to the usb_storage_usb_ids
* table, so we use the index of the id entry to find the
* corresponding unusual_devs entry.
*/
size = ARRAY_SIZE(us_unusual_dev_list);
if (id >= usb_storage_usb_ids && id < usb_storage_usb_ids + size) {
unusual_dev = (id - usb_storage_usb_ids) + us_unusual_dev_list;
} else {
unusual_dev = &for_dynamic_ids;
dev_dbg(&intf->dev, "Use Bulk-Only transport with the Transparent SCSI protocol for dynamic id: 0x%04x 0x%04x\n",
id->idVendor, id->idProduct);
}
result = usb_stor_probe1(&us, intf, id, unusual_dev,
&usb_stor_host_template);
if (result)
return result;
/* No special transport or protocol settings in the main module */
result = usb_stor_probe2(us);
return result;
}
static struct usb_driver usb_storage_driver = {
.name = DRV_NAME,
.probe = storage_probe,
.disconnect = usb_stor_disconnect,
.suspend = usb_stor_suspend,
.resume = usb_stor_resume,
.reset_resume = usb_stor_reset_resume,
.pre_reset = usb_stor_pre_reset,
.post_reset = usb_stor_post_reset,
.id_table = usb_storage_usb_ids,
.supports_autosuspend = 1,
.soft_unbind = 1,
};
module_usb_stor_driver(usb_storage_driver, usb_stor_host_template, DRV_NAME);
| linux-master | drivers/usb/storage/usb.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for SCM Microsystems (a.k.a. Shuttle) USB-ATAPI cable
*
* Current development and maintenance by:
* (c) 2000, 2001 Robert Baruch ([email protected])
* (c) 2004, 2005 Daniel Drake <[email protected]>
*
* Developed with the assistance of:
* (c) 2002 Alan Stern <[email protected]>
*
* Flash support based on earlier work by:
* (c) 2002 Thomas Kreiling <[email protected]>
*
* Many originally ATAPI devices were slightly modified to meet the USB
* market by using some kind of translation from ATAPI to USB on the host,
* and the peripheral would translate from USB back to ATAPI.
*
* SCM Microsystems (www.scmmicro.com) makes a device, sold to OEM's only,
* which does the USB-to-ATAPI conversion. By obtaining the data sheet on
* their device under nondisclosure agreement, I have been able to write
* this driver for Linux.
*
* The chip used in the device can also be used for EPP and ISA translation
* as well. This driver is only guaranteed to work with the ATAPI
* translation.
*
* See the Kconfig help text for a list of devices known to be supported by
* this driver.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/cdrom.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include "usb.h"
#include "transport.h"
#include "protocol.h"
#include "debug.h"
#include "scsiglue.h"
#define DRV_NAME "ums-usbat"
MODULE_DESCRIPTION("Driver for SCM Microsystems (a.k.a. Shuttle) USB-ATAPI cable");
MODULE_AUTHOR("Daniel Drake <[email protected]>, Robert Baruch <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(USB_STORAGE);
/* Supported device types */
#define USBAT_DEV_HP8200 0x01
#define USBAT_DEV_FLASH 0x02
#define USBAT_EPP_PORT 0x10
#define USBAT_EPP_REGISTER 0x30
#define USBAT_ATA 0x40
#define USBAT_ISA 0x50
/* Commands (need to be logically OR'd with an access type */
#define USBAT_CMD_READ_REG 0x00
#define USBAT_CMD_WRITE_REG 0x01
#define USBAT_CMD_READ_BLOCK 0x02
#define USBAT_CMD_WRITE_BLOCK 0x03
#define USBAT_CMD_COND_READ_BLOCK 0x04
#define USBAT_CMD_COND_WRITE_BLOCK 0x05
#define USBAT_CMD_WRITE_REGS 0x07
/* Commands (these don't need an access type) */
#define USBAT_CMD_EXEC_CMD 0x80
#define USBAT_CMD_SET_FEAT 0x81
#define USBAT_CMD_UIO 0x82
/* Methods of accessing UIO register */
#define USBAT_UIO_READ 1
#define USBAT_UIO_WRITE 0
/* Qualifier bits */
#define USBAT_QUAL_FCQ 0x20 /* full compare */
#define USBAT_QUAL_ALQ 0x10 /* auto load subcount */
/* USBAT Flash Media status types */
#define USBAT_FLASH_MEDIA_NONE 0
#define USBAT_FLASH_MEDIA_CF 1
/* USBAT Flash Media change types */
#define USBAT_FLASH_MEDIA_SAME 0
#define USBAT_FLASH_MEDIA_CHANGED 1
/* USBAT ATA registers */
#define USBAT_ATA_DATA 0x10 /* read/write data (R/W) */
#define USBAT_ATA_FEATURES 0x11 /* set features (W) */
#define USBAT_ATA_ERROR 0x11 /* error (R) */
#define USBAT_ATA_SECCNT 0x12 /* sector count (R/W) */
#define USBAT_ATA_SECNUM 0x13 /* sector number (R/W) */
#define USBAT_ATA_LBA_ME 0x14 /* cylinder low (R/W) */
#define USBAT_ATA_LBA_HI 0x15 /* cylinder high (R/W) */
#define USBAT_ATA_DEVICE 0x16 /* head/device selection (R/W) */
#define USBAT_ATA_STATUS 0x17 /* device status (R) */
#define USBAT_ATA_CMD 0x17 /* device command (W) */
#define USBAT_ATA_ALTSTATUS 0x0E /* status (no clear IRQ) (R) */
/* USBAT User I/O Data registers */
#define USBAT_UIO_EPAD 0x80 /* Enable Peripheral Control Signals */
#define USBAT_UIO_CDT 0x40 /* Card Detect (Read Only) */
/* CDT = ACKD & !UI1 & !UI0 */
#define USBAT_UIO_1 0x20 /* I/O 1 */
#define USBAT_UIO_0 0x10 /* I/O 0 */
#define USBAT_UIO_EPP_ATA 0x08 /* 1=EPP mode, 0=ATA mode */
#define USBAT_UIO_UI1 0x04 /* Input 1 */
#define USBAT_UIO_UI0 0x02 /* Input 0 */
#define USBAT_UIO_INTR_ACK 0x01 /* Interrupt (ATA/ISA)/Acknowledge (EPP) */
/* USBAT User I/O Enable registers */
#define USBAT_UIO_DRVRST 0x80 /* Reset Peripheral */
#define USBAT_UIO_ACKD 0x40 /* Enable Card Detect */
#define USBAT_UIO_OE1 0x20 /* I/O 1 set=output/clr=input */
/* If ACKD=1, set OE1 to 1 also. */
#define USBAT_UIO_OE0 0x10 /* I/O 0 set=output/clr=input */
#define USBAT_UIO_ADPRST 0x01 /* Reset SCM chip */
/* USBAT Features */
#define USBAT_FEAT_ETEN 0x80 /* External trigger enable */
#define USBAT_FEAT_U1 0x08
#define USBAT_FEAT_U0 0x04
#define USBAT_FEAT_ET1 0x02
#define USBAT_FEAT_ET2 0x01
struct usbat_info {
int devicetype;
/* Used for Flash readers only */
unsigned long sectors; /* total sector count */
unsigned long ssize; /* sector size in bytes */
unsigned char sense_key;
unsigned long sense_asc; /* additional sense code */
unsigned long sense_ascq; /* additional sense code qualifier */
};
#define short_pack(LSB,MSB) ( ((u16)(LSB)) | ( ((u16)(MSB))<<8 ) )
#define LSB_of(s) ((s)&0xFF)
#define MSB_of(s) ((s)>>8)
static int transferred = 0;
static int usbat_flash_transport(struct scsi_cmnd * srb, struct us_data *us);
static int usbat_hp8200e_transport(struct scsi_cmnd *srb, struct us_data *us);
static int init_usbat_cd(struct us_data *us);
static int init_usbat_flash(struct us_data *us);
/*
* The table of devices
*/
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
static struct usb_device_id usbat_usb_ids[] = {
# include "unusual_usbat.h"
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, usbat_usb_ids);
#undef UNUSUAL_DEV
/*
* The flags table
*/
#define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \
vendor_name, product_name, use_protocol, use_transport, \
init_function, Flags) \
{ \
.vendorName = vendor_name, \
.productName = product_name, \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
.initFunction = init_function, \
}
static struct us_unusual_dev usbat_unusual_dev_list[] = {
# include "unusual_usbat.h"
{ } /* Terminating entry */
};
#undef UNUSUAL_DEV
/*
* Convenience function to produce an ATA read/write sectors command
* Use cmd=0x20 for read, cmd=0x30 for write
*/
static void usbat_pack_ata_sector_cmd(unsigned char *buf,
unsigned char thistime,
u32 sector, unsigned char cmd)
{
buf[0] = 0;
buf[1] = thistime;
buf[2] = sector & 0xFF;
buf[3] = (sector >> 8) & 0xFF;
buf[4] = (sector >> 16) & 0xFF;
buf[5] = 0xE0 | ((sector >> 24) & 0x0F);
buf[6] = cmd;
}
/*
* Convenience function to get the device type (flash or hp8200)
*/
static int usbat_get_device_type(struct us_data *us)
{
return ((struct usbat_info*)us->extra)->devicetype;
}
/*
* Read a register from the device
*/
static int usbat_read(struct us_data *us,
unsigned char access,
unsigned char reg,
unsigned char *content)
{
return usb_stor_ctrl_transfer(us,
us->recv_ctrl_pipe,
access | USBAT_CMD_READ_REG,
0xC0,
(u16)reg,
0,
content,
1);
}
/*
* Write to a register on the device
*/
static int usbat_write(struct us_data *us,
unsigned char access,
unsigned char reg,
unsigned char content)
{
return usb_stor_ctrl_transfer(us,
us->send_ctrl_pipe,
access | USBAT_CMD_WRITE_REG,
0x40,
short_pack(reg, content),
0,
NULL,
0);
}
/*
* Convenience function to perform a bulk read
*/
static int usbat_bulk_read(struct us_data *us,
void* buf,
unsigned int len,
int use_sg)
{
if (len == 0)
return USB_STOR_XFER_GOOD;
usb_stor_dbg(us, "len = %d\n", len);
return usb_stor_bulk_transfer_sg(us, us->recv_bulk_pipe, buf, len, use_sg, NULL);
}
/*
* Convenience function to perform a bulk write
*/
static int usbat_bulk_write(struct us_data *us,
void* buf,
unsigned int len,
int use_sg)
{
if (len == 0)
return USB_STOR_XFER_GOOD;
usb_stor_dbg(us, "len = %d\n", len);
return usb_stor_bulk_transfer_sg(us, us->send_bulk_pipe, buf, len, use_sg, NULL);
}
/*
* Some USBAT-specific commands can only be executed over a command transport
* This transport allows one (len=8) or two (len=16) vendor-specific commands
* to be executed.
*/
static int usbat_execute_command(struct us_data *us,
unsigned char *commands,
unsigned int len)
{
return usb_stor_ctrl_transfer(us, us->send_ctrl_pipe,
USBAT_CMD_EXEC_CMD, 0x40, 0, 0,
commands, len);
}
/*
* Read the status register
*/
static int usbat_get_status(struct us_data *us, unsigned char *status)
{
int rc;
rc = usbat_read(us, USBAT_ATA, USBAT_ATA_STATUS, status);
usb_stor_dbg(us, "0x%02X\n", *status);
return rc;
}
/*
* Check the device status
*/
static int usbat_check_status(struct us_data *us)
{
unsigned char *reply = us->iobuf;
int rc;
rc = usbat_get_status(us, reply);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_FAILED;
/* error/check condition (0x51 is ok) */
if (*reply & 0x01 && *reply != 0x51)
return USB_STOR_TRANSPORT_FAILED;
/* device fault */
if (*reply & 0x20)
return USB_STOR_TRANSPORT_FAILED;
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Stores critical information in internal registers in preparation for the execution
* of a conditional usbat_read_blocks or usbat_write_blocks call.
*/
static int usbat_set_shuttle_features(struct us_data *us,
unsigned char external_trigger,
unsigned char epp_control,
unsigned char mask_byte,
unsigned char test_pattern,
unsigned char subcountH,
unsigned char subcountL)
{
unsigned char *command = us->iobuf;
command[0] = 0x40;
command[1] = USBAT_CMD_SET_FEAT;
/*
* The only bit relevant to ATA access is bit 6
* which defines 8 bit data access (set) or 16 bit (unset)
*/
command[2] = epp_control;
/*
* If FCQ is set in the qualifier (defined in R/W cmd), then bits U0, U1,
* ET1 and ET2 define an external event to be checked for on event of a
* _read_blocks or _write_blocks operation. The read/write will not take
* place unless the defined trigger signal is active.
*/
command[3] = external_trigger;
/*
* The resultant byte of the mask operation (see mask_byte) is compared for
* equivalence with this test pattern. If equal, the read/write will take
* place.
*/
command[4] = test_pattern;
/*
* This value is logically ANDed with the status register field specified
* in the read/write command.
*/
command[5] = mask_byte;
/*
* If ALQ is set in the qualifier, this field contains the address of the
* registers where the byte count should be read for transferring the data.
* If ALQ is not set, then this field contains the number of bytes to be
* transferred.
*/
command[6] = subcountL;
command[7] = subcountH;
return usbat_execute_command(us, command, 8);
}
/*
* Block, waiting for an ATA device to become not busy or to report
* an error condition.
*/
static int usbat_wait_not_busy(struct us_data *us, int minutes)
{
int i;
int result;
unsigned char *status = us->iobuf;
/*
* Synchronizing cache on a CDR could take a heck of a long time,
* but probably not more than 10 minutes or so. On the other hand,
* doing a full blank on a CDRW at speed 1 will take about 75
* minutes!
*/
for (i=0; i<1200+minutes*60; i++) {
result = usbat_get_status(us, status);
if (result!=USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (*status & 0x01) { /* check condition */
result = usbat_read(us, USBAT_ATA, 0x10, status);
return USB_STOR_TRANSPORT_FAILED;
}
if (*status & 0x20) /* device fault */
return USB_STOR_TRANSPORT_FAILED;
if ((*status & 0x80)==0x00) { /* not busy */
usb_stor_dbg(us, "Waited not busy for %d steps\n", i);
return USB_STOR_TRANSPORT_GOOD;
}
if (i<500)
msleep(10); /* 5 seconds */
else if (i<700)
msleep(50); /* 10 seconds */
else if (i<1200)
msleep(100); /* 50 seconds */
else
msleep(1000); /* X minutes */
}
usb_stor_dbg(us, "Waited not busy for %d minutes, timing out\n",
minutes);
return USB_STOR_TRANSPORT_FAILED;
}
/*
* Read block data from the data register
*/
static int usbat_read_block(struct us_data *us,
void* buf,
unsigned short len,
int use_sg)
{
int result;
unsigned char *command = us->iobuf;
if (!len)
return USB_STOR_TRANSPORT_GOOD;
command[0] = 0xC0;
command[1] = USBAT_ATA | USBAT_CMD_READ_BLOCK;
command[2] = USBAT_ATA_DATA;
command[3] = 0;
command[4] = 0;
command[5] = 0;
command[6] = LSB_of(len);
command[7] = MSB_of(len);
result = usbat_execute_command(us, command, 8);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
result = usbat_bulk_read(us, buf, len, use_sg);
return (result == USB_STOR_XFER_GOOD ?
USB_STOR_TRANSPORT_GOOD : USB_STOR_TRANSPORT_ERROR);
}
/*
* Write block data via the data register
*/
static int usbat_write_block(struct us_data *us,
unsigned char access,
void* buf,
unsigned short len,
int minutes,
int use_sg)
{
int result;
unsigned char *command = us->iobuf;
if (!len)
return USB_STOR_TRANSPORT_GOOD;
command[0] = 0x40;
command[1] = access | USBAT_CMD_WRITE_BLOCK;
command[2] = USBAT_ATA_DATA;
command[3] = 0;
command[4] = 0;
command[5] = 0;
command[6] = LSB_of(len);
command[7] = MSB_of(len);
result = usbat_execute_command(us, command, 8);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
result = usbat_bulk_write(us, buf, len, use_sg);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
return usbat_wait_not_busy(us, minutes);
}
/*
* Process read and write requests
*/
static int usbat_hp8200e_rw_block_test(struct us_data *us,
unsigned char access,
unsigned char *registers,
unsigned char *data_out,
unsigned short num_registers,
unsigned char data_reg,
unsigned char status_reg,
unsigned char timeout,
unsigned char qualifier,
int direction,
void *buf,
unsigned short len,
int use_sg,
int minutes)
{
int result;
unsigned int pipe = (direction == DMA_FROM_DEVICE) ?
us->recv_bulk_pipe : us->send_bulk_pipe;
unsigned char *command = us->iobuf;
int i, j;
int cmdlen;
unsigned char *data = us->iobuf;
unsigned char *status = us->iobuf;
BUG_ON(num_registers > US_IOBUF_SIZE/2);
for (i=0; i<20; i++) {
/*
* The first time we send the full command, which consists
* of downloading the SCSI command followed by downloading
* the data via a write-and-test. Any other time we only
* send the command to download the data -- the SCSI command
* is still 'active' in some sense in the device.
*
* We're only going to try sending the data 10 times. After
* that, we just return a failure.
*/
if (i==0) {
cmdlen = 16;
/*
* Write to multiple registers
* Not really sure the 0x07, 0x17, 0xfc, 0xe7 is
* necessary here, but that's what came out of the
* trace every single time.
*/
command[0] = 0x40;
command[1] = access | USBAT_CMD_WRITE_REGS;
command[2] = 0x07;
command[3] = 0x17;
command[4] = 0xFC;
command[5] = 0xE7;
command[6] = LSB_of(num_registers*2);
command[7] = MSB_of(num_registers*2);
} else
cmdlen = 8;
/* Conditionally read or write blocks */
command[cmdlen-8] = (direction==DMA_TO_DEVICE ? 0x40 : 0xC0);
command[cmdlen-7] = access |
(direction==DMA_TO_DEVICE ?
USBAT_CMD_COND_WRITE_BLOCK : USBAT_CMD_COND_READ_BLOCK);
command[cmdlen-6] = data_reg;
command[cmdlen-5] = status_reg;
command[cmdlen-4] = timeout;
command[cmdlen-3] = qualifier;
command[cmdlen-2] = LSB_of(len);
command[cmdlen-1] = MSB_of(len);
result = usbat_execute_command(us, command, cmdlen);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (i==0) {
for (j=0; j<num_registers; j++) {
data[j<<1] = registers[j];
data[1+(j<<1)] = data_out[j];
}
result = usbat_bulk_write(us, data, num_registers*2, 0);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
}
result = usb_stor_bulk_transfer_sg(us,
pipe, buf, len, use_sg, NULL);
/*
* If we get a stall on the bulk download, we'll retry
* the bulk download -- but not the SCSI command because
* in some sense the SCSI command is still 'active' and
* waiting for the data. Don't ask me why this should be;
* I'm only following what the Windoze driver did.
*
* Note that a stall for the test-and-read/write command means
* that the test failed. In this case we're testing to make
* sure that the device is error-free
* (i.e. bit 0 -- CHK -- of status is 0). The most likely
* hypothesis is that the USBAT chip somehow knows what
* the device will accept, but doesn't give the device any
* data until all data is received. Thus, the device would
* still be waiting for the first byte of data if a stall
* occurs, even if the stall implies that some data was
* transferred.
*/
if (result == USB_STOR_XFER_SHORT ||
result == USB_STOR_XFER_STALLED) {
/*
* If we're reading and we stalled, then clear
* the bulk output pipe only the first time.
*/
if (direction==DMA_FROM_DEVICE && i==0) {
if (usb_stor_clear_halt(us,
us->send_bulk_pipe) < 0)
return USB_STOR_TRANSPORT_ERROR;
}
/*
* Read status: is the device angry, or just busy?
*/
result = usbat_read(us, USBAT_ATA,
direction==DMA_TO_DEVICE ?
USBAT_ATA_STATUS : USBAT_ATA_ALTSTATUS,
status);
if (result!=USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (*status & 0x01) /* check condition */
return USB_STOR_TRANSPORT_FAILED;
if (*status & 0x20) /* device fault */
return USB_STOR_TRANSPORT_FAILED;
usb_stor_dbg(us, "Redoing %s\n",
direction == DMA_TO_DEVICE
? "write" : "read");
} else if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
else
return usbat_wait_not_busy(us, minutes);
}
usb_stor_dbg(us, "Bummer! %s bulk data 20 times failed\n",
direction == DMA_TO_DEVICE ? "Writing" : "Reading");
return USB_STOR_TRANSPORT_FAILED;
}
/*
* Write to multiple registers:
* Allows us to write specific data to any registers. The data to be written
* gets packed in this sequence: reg0, data0, reg1, data1, ..., regN, dataN
* which gets sent through bulk out.
* Not designed for large transfers of data!
*/
static int usbat_multiple_write(struct us_data *us,
unsigned char *registers,
unsigned char *data_out,
unsigned short num_registers)
{
int i, result;
unsigned char *data = us->iobuf;
unsigned char *command = us->iobuf;
BUG_ON(num_registers > US_IOBUF_SIZE/2);
/* Write to multiple registers, ATA access */
command[0] = 0x40;
command[1] = USBAT_ATA | USBAT_CMD_WRITE_REGS;
/* No relevance */
command[2] = 0;
command[3] = 0;
command[4] = 0;
command[5] = 0;
/* Number of bytes to be transferred (incl. addresses and data) */
command[6] = LSB_of(num_registers*2);
command[7] = MSB_of(num_registers*2);
/* The setup command */
result = usbat_execute_command(us, command, 8);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
/* Create the reg/data, reg/data sequence */
for (i=0; i<num_registers; i++) {
data[i<<1] = registers[i];
data[1+(i<<1)] = data_out[i];
}
/* Send the data */
result = usbat_bulk_write(us, data, num_registers*2, 0);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (usbat_get_device_type(us) == USBAT_DEV_HP8200)
return usbat_wait_not_busy(us, 0);
else
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Conditionally read blocks from device:
* Allows us to read blocks from a specific data register, based upon the
* condition that a status register can be successfully masked with a status
* qualifier. If this condition is not initially met, the read will wait
* up until a maximum amount of time has elapsed, as specified by timeout.
* The read will start when the condition is met, otherwise the command aborts.
*
* The qualifier defined here is not the value that is masked, it defines
* conditions for the write to take place. The actual masked qualifier (and
* other related details) are defined beforehand with _set_shuttle_features().
*/
static int usbat_read_blocks(struct us_data *us,
void* buffer,
int len,
int use_sg)
{
int result;
unsigned char *command = us->iobuf;
command[0] = 0xC0;
command[1] = USBAT_ATA | USBAT_CMD_COND_READ_BLOCK;
command[2] = USBAT_ATA_DATA;
command[3] = USBAT_ATA_STATUS;
command[4] = 0xFD; /* Timeout (ms); */
command[5] = USBAT_QUAL_FCQ;
command[6] = LSB_of(len);
command[7] = MSB_of(len);
/* Multiple block read setup command */
result = usbat_execute_command(us, command, 8);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_FAILED;
/* Read the blocks we just asked for */
result = usbat_bulk_read(us, buffer, len, use_sg);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_FAILED;
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Conditionally write blocks to device:
* Allows us to write blocks to a specific data register, based upon the
* condition that a status register can be successfully masked with a status
* qualifier. If this condition is not initially met, the write will wait
* up until a maximum amount of time has elapsed, as specified by timeout.
* The read will start when the condition is met, otherwise the command aborts.
*
* The qualifier defined here is not the value that is masked, it defines
* conditions for the write to take place. The actual masked qualifier (and
* other related details) are defined beforehand with _set_shuttle_features().
*/
static int usbat_write_blocks(struct us_data *us,
void* buffer,
int len,
int use_sg)
{
int result;
unsigned char *command = us->iobuf;
command[0] = 0x40;
command[1] = USBAT_ATA | USBAT_CMD_COND_WRITE_BLOCK;
command[2] = USBAT_ATA_DATA;
command[3] = USBAT_ATA_STATUS;
command[4] = 0xFD; /* Timeout (ms) */
command[5] = USBAT_QUAL_FCQ;
command[6] = LSB_of(len);
command[7] = MSB_of(len);
/* Multiple block write setup command */
result = usbat_execute_command(us, command, 8);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_FAILED;
/* Write the data */
result = usbat_bulk_write(us, buffer, len, use_sg);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_FAILED;
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Read the User IO register
*/
static int usbat_read_user_io(struct us_data *us, unsigned char *data_flags)
{
int result;
result = usb_stor_ctrl_transfer(us,
us->recv_ctrl_pipe,
USBAT_CMD_UIO,
0xC0,
0,
0,
data_flags,
USBAT_UIO_READ);
usb_stor_dbg(us, "UIO register reads %02X\n", *data_flags);
return result;
}
/*
* Write to the User IO register
*/
static int usbat_write_user_io(struct us_data *us,
unsigned char enable_flags,
unsigned char data_flags)
{
return usb_stor_ctrl_transfer(us,
us->send_ctrl_pipe,
USBAT_CMD_UIO,
0x40,
short_pack(enable_flags, data_flags),
0,
NULL,
USBAT_UIO_WRITE);
}
/*
* Reset the device
* Often needed on media change.
*/
static int usbat_device_reset(struct us_data *us)
{
int rc;
/*
* Reset peripheral, enable peripheral control signals
* (bring reset signal up)
*/
rc = usbat_write_user_io(us,
USBAT_UIO_DRVRST | USBAT_UIO_OE1 | USBAT_UIO_OE0,
USBAT_UIO_EPAD | USBAT_UIO_1);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
/*
* Enable peripheral control signals
* (bring reset signal down)
*/
rc = usbat_write_user_io(us,
USBAT_UIO_OE1 | USBAT_UIO_OE0,
USBAT_UIO_EPAD | USBAT_UIO_1);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Enable card detect
*/
static int usbat_device_enable_cdt(struct us_data *us)
{
int rc;
/* Enable peripheral control signals and card detect */
rc = usbat_write_user_io(us,
USBAT_UIO_ACKD | USBAT_UIO_OE1 | USBAT_UIO_OE0,
USBAT_UIO_EPAD | USBAT_UIO_1);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Determine if media is present.
*/
static int usbat_flash_check_media_present(struct us_data *us,
unsigned char *uio)
{
if (*uio & USBAT_UIO_UI0) {
usb_stor_dbg(us, "no media detected\n");
return USBAT_FLASH_MEDIA_NONE;
}
return USBAT_FLASH_MEDIA_CF;
}
/*
* Determine if media has changed since last operation
*/
static int usbat_flash_check_media_changed(struct us_data *us,
unsigned char *uio)
{
if (*uio & USBAT_UIO_0) {
usb_stor_dbg(us, "media change detected\n");
return USBAT_FLASH_MEDIA_CHANGED;
}
return USBAT_FLASH_MEDIA_SAME;
}
/*
* Check for media change / no media and handle the situation appropriately
*/
static int usbat_flash_check_media(struct us_data *us,
struct usbat_info *info)
{
int rc;
unsigned char *uio = us->iobuf;
rc = usbat_read_user_io(us, uio);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
/* Check for media existence */
rc = usbat_flash_check_media_present(us, uio);
if (rc == USBAT_FLASH_MEDIA_NONE) {
info->sense_key = 0x02;
info->sense_asc = 0x3A;
info->sense_ascq = 0x00;
return USB_STOR_TRANSPORT_FAILED;
}
/* Check for media change */
rc = usbat_flash_check_media_changed(us, uio);
if (rc == USBAT_FLASH_MEDIA_CHANGED) {
/* Reset and re-enable card detect */
rc = usbat_device_reset(us);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
rc = usbat_device_enable_cdt(us);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
msleep(50);
rc = usbat_read_user_io(us, uio);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
info->sense_key = UNIT_ATTENTION;
info->sense_asc = 0x28;
info->sense_ascq = 0x00;
return USB_STOR_TRANSPORT_FAILED;
}
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Determine whether we are controlling a flash-based reader/writer,
* or a HP8200-based CD drive.
* Sets transport functions as appropriate.
*/
static int usbat_identify_device(struct us_data *us,
struct usbat_info *info)
{
int rc;
unsigned char status;
if (!us || !info)
return USB_STOR_TRANSPORT_ERROR;
rc = usbat_device_reset(us);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
msleep(500);
/*
* In attempt to distinguish between HP CDRW's and Flash readers, we now
* execute the IDENTIFY PACKET DEVICE command. On ATA devices (i.e. flash
* readers), this command should fail with error. On ATAPI devices (i.e.
* CDROM drives), it should succeed.
*/
rc = usbat_write(us, USBAT_ATA, USBAT_ATA_CMD, 0xA1);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
rc = usbat_get_status(us, &status);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
/* Check for error bit, or if the command 'fell through' */
if (status == 0xA1 || !(status & 0x01)) {
/* Device is HP 8200 */
usb_stor_dbg(us, "Detected HP8200 CDRW\n");
info->devicetype = USBAT_DEV_HP8200;
} else {
/* Device is a CompactFlash reader/writer */
usb_stor_dbg(us, "Detected Flash reader/writer\n");
info->devicetype = USBAT_DEV_FLASH;
}
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Set the transport function based on the device type
*/
static int usbat_set_transport(struct us_data *us,
struct usbat_info *info,
int devicetype)
{
if (!info->devicetype)
info->devicetype = devicetype;
if (!info->devicetype)
usbat_identify_device(us, info);
switch (info->devicetype) {
default:
return USB_STOR_TRANSPORT_ERROR;
case USBAT_DEV_HP8200:
us->transport = usbat_hp8200e_transport;
break;
case USBAT_DEV_FLASH:
us->transport = usbat_flash_transport;
break;
}
return 0;
}
/*
* Read the media capacity
*/
static int usbat_flash_get_sector_count(struct us_data *us,
struct usbat_info *info)
{
unsigned char registers[3] = {
USBAT_ATA_SECCNT,
USBAT_ATA_DEVICE,
USBAT_ATA_CMD,
};
unsigned char command[3] = { 0x01, 0xA0, 0xEC };
unsigned char *reply;
unsigned char status;
int rc;
if (!us || !info)
return USB_STOR_TRANSPORT_ERROR;
reply = kmalloc(512, GFP_NOIO);
if (!reply)
return USB_STOR_TRANSPORT_ERROR;
/* ATA command : IDENTIFY DEVICE */
rc = usbat_multiple_write(us, registers, command, 3);
if (rc != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "Gah! identify_device failed\n");
rc = USB_STOR_TRANSPORT_ERROR;
goto leave;
}
/* Read device status */
if (usbat_get_status(us, &status) != USB_STOR_XFER_GOOD) {
rc = USB_STOR_TRANSPORT_ERROR;
goto leave;
}
msleep(100);
/* Read the device identification data */
rc = usbat_read_block(us, reply, 512, 0);
if (rc != USB_STOR_TRANSPORT_GOOD)
goto leave;
info->sectors = ((u32)(reply[117]) << 24) |
((u32)(reply[116]) << 16) |
((u32)(reply[115]) << 8) |
((u32)(reply[114]) );
rc = USB_STOR_TRANSPORT_GOOD;
leave:
kfree(reply);
return rc;
}
/*
* Read data from device
*/
static int usbat_flash_read_data(struct us_data *us,
struct usbat_info *info,
u32 sector,
u32 sectors)
{
unsigned char registers[7] = {
USBAT_ATA_FEATURES,
USBAT_ATA_SECCNT,
USBAT_ATA_SECNUM,
USBAT_ATA_LBA_ME,
USBAT_ATA_LBA_HI,
USBAT_ATA_DEVICE,
USBAT_ATA_STATUS,
};
unsigned char command[7];
unsigned char *buffer;
unsigned char thistime;
unsigned int totallen, alloclen;
int len, result;
unsigned int sg_offset = 0;
struct scatterlist *sg = NULL;
result = usbat_flash_check_media(us, info);
if (result != USB_STOR_TRANSPORT_GOOD)
return result;
/*
* we're working in LBA mode. according to the ATA spec,
* we can support up to 28-bit addressing. I don't know if Jumpshot
* supports beyond 24-bit addressing. It's kind of hard to test
* since it requires > 8GB CF card.
*/
if (sector > 0x0FFFFFFF)
return USB_STOR_TRANSPORT_ERROR;
totallen = sectors * info->ssize;
/*
* Since we don't read more than 64 KB at a time, we have to create
* a bounce buffer and move the data a piece at a time between the
* bounce buffer and the actual transfer buffer.
*/
alloclen = min(totallen, 65536u);
buffer = kmalloc(alloclen, GFP_NOIO);
if (buffer == NULL)
return USB_STOR_TRANSPORT_ERROR;
do {
/*
* loop, never allocate or transfer more than 64k at once
* (min(128k, 255*info->ssize) is the real limit)
*/
len = min(totallen, alloclen);
thistime = (len / info->ssize) & 0xff;
/* ATA command 0x20 (READ SECTORS) */
usbat_pack_ata_sector_cmd(command, thistime, sector, 0x20);
/* Write/execute ATA read command */
result = usbat_multiple_write(us, registers, command, 7);
if (result != USB_STOR_TRANSPORT_GOOD)
goto leave;
/* Read the data we just requested */
result = usbat_read_blocks(us, buffer, len, 0);
if (result != USB_STOR_TRANSPORT_GOOD)
goto leave;
usb_stor_dbg(us, "%d bytes\n", len);
/* Store the data in the transfer buffer */
usb_stor_access_xfer_buf(buffer, len, us->srb,
&sg, &sg_offset, TO_XFER_BUF);
sector += thistime;
totallen -= len;
} while (totallen > 0);
kfree(buffer);
return USB_STOR_TRANSPORT_GOOD;
leave:
kfree(buffer);
return USB_STOR_TRANSPORT_ERROR;
}
/*
* Write data to device
*/
static int usbat_flash_write_data(struct us_data *us,
struct usbat_info *info,
u32 sector,
u32 sectors)
{
unsigned char registers[7] = {
USBAT_ATA_FEATURES,
USBAT_ATA_SECCNT,
USBAT_ATA_SECNUM,
USBAT_ATA_LBA_ME,
USBAT_ATA_LBA_HI,
USBAT_ATA_DEVICE,
USBAT_ATA_STATUS,
};
unsigned char command[7];
unsigned char *buffer;
unsigned char thistime;
unsigned int totallen, alloclen;
int len, result;
unsigned int sg_offset = 0;
struct scatterlist *sg = NULL;
result = usbat_flash_check_media(us, info);
if (result != USB_STOR_TRANSPORT_GOOD)
return result;
/*
* we're working in LBA mode. according to the ATA spec,
* we can support up to 28-bit addressing. I don't know if the device
* supports beyond 24-bit addressing. It's kind of hard to test
* since it requires > 8GB media.
*/
if (sector > 0x0FFFFFFF)
return USB_STOR_TRANSPORT_ERROR;
totallen = sectors * info->ssize;
/*
* Since we don't write more than 64 KB at a time, we have to create
* a bounce buffer and move the data a piece at a time between the
* bounce buffer and the actual transfer buffer.
*/
alloclen = min(totallen, 65536u);
buffer = kmalloc(alloclen, GFP_NOIO);
if (buffer == NULL)
return USB_STOR_TRANSPORT_ERROR;
do {
/*
* loop, never allocate or transfer more than 64k at once
* (min(128k, 255*info->ssize) is the real limit)
*/
len = min(totallen, alloclen);
thistime = (len / info->ssize) & 0xff;
/* Get the data from the transfer buffer */
usb_stor_access_xfer_buf(buffer, len, us->srb,
&sg, &sg_offset, FROM_XFER_BUF);
/* ATA command 0x30 (WRITE SECTORS) */
usbat_pack_ata_sector_cmd(command, thistime, sector, 0x30);
/* Write/execute ATA write command */
result = usbat_multiple_write(us, registers, command, 7);
if (result != USB_STOR_TRANSPORT_GOOD)
goto leave;
/* Write the data */
result = usbat_write_blocks(us, buffer, len, 0);
if (result != USB_STOR_TRANSPORT_GOOD)
goto leave;
sector += thistime;
totallen -= len;
} while (totallen > 0);
kfree(buffer);
return result;
leave:
kfree(buffer);
return USB_STOR_TRANSPORT_ERROR;
}
/*
* Squeeze a potentially huge (> 65535 byte) read10 command into
* a little ( <= 65535 byte) ATAPI pipe
*/
static int usbat_hp8200e_handle_read10(struct us_data *us,
unsigned char *registers,
unsigned char *data,
struct scsi_cmnd *srb)
{
int result = USB_STOR_TRANSPORT_GOOD;
unsigned char *buffer;
unsigned int len;
unsigned int sector;
unsigned int sg_offset = 0;
struct scatterlist *sg = NULL;
usb_stor_dbg(us, "transfersize %d\n", srb->transfersize);
if (scsi_bufflen(srb) < 0x10000) {
result = usbat_hp8200e_rw_block_test(us, USBAT_ATA,
registers, data, 19,
USBAT_ATA_DATA, USBAT_ATA_STATUS, 0xFD,
(USBAT_QUAL_FCQ | USBAT_QUAL_ALQ),
DMA_FROM_DEVICE,
scsi_sglist(srb),
scsi_bufflen(srb), scsi_sg_count(srb), 1);
return result;
}
/*
* Since we're requesting more data than we can handle in
* a single read command (max is 64k-1), we will perform
* multiple reads, but each read must be in multiples of
* a sector. Luckily the sector size is in srb->transfersize
* (see linux/drivers/scsi/sr.c).
*/
if (data[7+0] == GPCMD_READ_CD) {
len = short_pack(data[7+9], data[7+8]);
len <<= 16;
len |= data[7+7];
usb_stor_dbg(us, "GPCMD_READ_CD: len %d\n", len);
srb->transfersize = scsi_bufflen(srb)/len;
}
if (!srb->transfersize) {
srb->transfersize = 2048; /* A guess */
usb_stor_dbg(us, "transfersize 0, forcing %d\n",
srb->transfersize);
}
/*
* Since we only read in one block at a time, we have to create
* a bounce buffer and move the data a piece at a time between the
* bounce buffer and the actual transfer buffer.
*/
len = (65535/srb->transfersize) * srb->transfersize;
usb_stor_dbg(us, "Max read is %d bytes\n", len);
len = min(len, scsi_bufflen(srb));
buffer = kmalloc(len, GFP_NOIO);
if (buffer == NULL) /* bloody hell! */
return USB_STOR_TRANSPORT_FAILED;
sector = short_pack(data[7+3], data[7+2]);
sector <<= 16;
sector |= short_pack(data[7+5], data[7+4]);
transferred = 0;
while (transferred != scsi_bufflen(srb)) {
if (len > scsi_bufflen(srb) - transferred)
len = scsi_bufflen(srb) - transferred;
data[3] = len&0xFF; /* (cylL) = expected length (L) */
data[4] = (len>>8)&0xFF; /* (cylH) = expected length (H) */
/* Fix up the SCSI command sector and num sectors */
data[7+2] = MSB_of(sector>>16); /* SCSI command sector */
data[7+3] = LSB_of(sector>>16);
data[7+4] = MSB_of(sector&0xFFFF);
data[7+5] = LSB_of(sector&0xFFFF);
if (data[7+0] == GPCMD_READ_CD)
data[7+6] = 0;
data[7+7] = MSB_of(len / srb->transfersize); /* SCSI command */
data[7+8] = LSB_of(len / srb->transfersize); /* num sectors */
result = usbat_hp8200e_rw_block_test(us, USBAT_ATA,
registers, data, 19,
USBAT_ATA_DATA, USBAT_ATA_STATUS, 0xFD,
(USBAT_QUAL_FCQ | USBAT_QUAL_ALQ),
DMA_FROM_DEVICE,
buffer,
len, 0, 1);
if (result != USB_STOR_TRANSPORT_GOOD)
break;
/* Store the data in the transfer buffer */
usb_stor_access_xfer_buf(buffer, len, srb,
&sg, &sg_offset, TO_XFER_BUF);
/* Update the amount transferred and the sector number */
transferred += len;
sector += len / srb->transfersize;
} /* while transferred != scsi_bufflen(srb) */
kfree(buffer);
return result;
}
static int usbat_select_and_test_registers(struct us_data *us)
{
int selector;
unsigned char *status = us->iobuf;
/* try device = master, then device = slave. */
for (selector = 0xA0; selector <= 0xB0; selector += 0x10) {
if (usbat_write(us, USBAT_ATA, USBAT_ATA_DEVICE, selector) !=
USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (usbat_read(us, USBAT_ATA, USBAT_ATA_STATUS, status) !=
USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (usbat_read(us, USBAT_ATA, USBAT_ATA_DEVICE, status) !=
USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (usbat_read(us, USBAT_ATA, USBAT_ATA_LBA_ME, status) !=
USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (usbat_read(us, USBAT_ATA, USBAT_ATA_LBA_HI, status) !=
USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (usbat_write(us, USBAT_ATA, USBAT_ATA_LBA_ME, 0x55) !=
USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (usbat_write(us, USBAT_ATA, USBAT_ATA_LBA_HI, 0xAA) !=
USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (usbat_read(us, USBAT_ATA, USBAT_ATA_LBA_ME, status) !=
USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (usbat_read(us, USBAT_ATA, USBAT_ATA_LBA_ME, status) !=
USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
}
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Initialize the USBAT processor and the storage device
*/
static int init_usbat(struct us_data *us, int devicetype)
{
int rc;
struct usbat_info *info;
unsigned char subcountH = USBAT_ATA_LBA_HI;
unsigned char subcountL = USBAT_ATA_LBA_ME;
unsigned char *status = us->iobuf;
us->extra = kzalloc(sizeof(struct usbat_info), GFP_NOIO);
if (!us->extra)
return -ENOMEM;
info = (struct usbat_info *) (us->extra);
/* Enable peripheral control signals */
rc = usbat_write_user_io(us,
USBAT_UIO_OE1 | USBAT_UIO_OE0,
USBAT_UIO_EPAD | USBAT_UIO_1);
if (rc != USB_STOR_XFER_GOOD)
return -EIO;
usb_stor_dbg(us, "INIT 1\n");
msleep(2000);
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_TRANSPORT_GOOD)
return -EIO;
usb_stor_dbg(us, "INIT 2\n");
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_XFER_GOOD)
return -EIO;
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_XFER_GOOD)
return -EIO;
usb_stor_dbg(us, "INIT 3\n");
rc = usbat_select_and_test_registers(us);
if (rc != USB_STOR_TRANSPORT_GOOD)
return -EIO;
usb_stor_dbg(us, "INIT 4\n");
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_XFER_GOOD)
return -EIO;
usb_stor_dbg(us, "INIT 5\n");
/* Enable peripheral control signals and card detect */
rc = usbat_device_enable_cdt(us);
if (rc != USB_STOR_TRANSPORT_GOOD)
return -EIO;
usb_stor_dbg(us, "INIT 6\n");
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_XFER_GOOD)
return -EIO;
usb_stor_dbg(us, "INIT 7\n");
msleep(1400);
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_XFER_GOOD)
return -EIO;
usb_stor_dbg(us, "INIT 8\n");
rc = usbat_select_and_test_registers(us);
if (rc != USB_STOR_TRANSPORT_GOOD)
return -EIO;
usb_stor_dbg(us, "INIT 9\n");
/* At this point, we need to detect which device we are using */
if (usbat_set_transport(us, info, devicetype))
return -EIO;
usb_stor_dbg(us, "INIT 10\n");
if (usbat_get_device_type(us) == USBAT_DEV_FLASH) {
subcountH = 0x02;
subcountL = 0x00;
}
rc = usbat_set_shuttle_features(us, (USBAT_FEAT_ETEN | USBAT_FEAT_ET2 | USBAT_FEAT_ET1),
0x00, 0x88, 0x08, subcountH, subcountL);
if (rc != USB_STOR_XFER_GOOD)
return -EIO;
usb_stor_dbg(us, "INIT 11\n");
return 0;
}
/*
* Transport for the HP 8200e
*/
static int usbat_hp8200e_transport(struct scsi_cmnd *srb, struct us_data *us)
{
int result;
unsigned char *status = us->iobuf;
unsigned char registers[32];
unsigned char data[32];
unsigned int len;
int i;
len = scsi_bufflen(srb);
/*
* Send A0 (ATA PACKET COMMAND).
* Note: I guess we're never going to get any of the ATA
* commands... just ATA Packet Commands.
*/
registers[0] = USBAT_ATA_FEATURES;
registers[1] = USBAT_ATA_SECCNT;
registers[2] = USBAT_ATA_SECNUM;
registers[3] = USBAT_ATA_LBA_ME;
registers[4] = USBAT_ATA_LBA_HI;
registers[5] = USBAT_ATA_DEVICE;
registers[6] = USBAT_ATA_CMD;
data[0] = 0x00;
data[1] = 0x00;
data[2] = 0x00;
data[3] = len&0xFF; /* (cylL) = expected length (L) */
data[4] = (len>>8)&0xFF; /* (cylH) = expected length (H) */
data[5] = 0xB0; /* (device sel) = slave */
data[6] = 0xA0; /* (command) = ATA PACKET COMMAND */
for (i=7; i<19; i++) {
registers[i] = 0x10;
data[i] = (i-7 >= srb->cmd_len) ? 0 : srb->cmnd[i-7];
}
result = usbat_get_status(us, status);
usb_stor_dbg(us, "Status = %02X\n", *status);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (srb->cmnd[0] == TEST_UNIT_READY)
transferred = 0;
if (srb->sc_data_direction == DMA_TO_DEVICE) {
result = usbat_hp8200e_rw_block_test(us, USBAT_ATA,
registers, data, 19,
USBAT_ATA_DATA, USBAT_ATA_STATUS, 0xFD,
(USBAT_QUAL_FCQ | USBAT_QUAL_ALQ),
DMA_TO_DEVICE,
scsi_sglist(srb),
len, scsi_sg_count(srb), 10);
if (result == USB_STOR_TRANSPORT_GOOD) {
transferred += len;
usb_stor_dbg(us, "Wrote %08X bytes\n", transferred);
}
return result;
} else if (srb->cmnd[0] == READ_10 ||
srb->cmnd[0] == GPCMD_READ_CD) {
return usbat_hp8200e_handle_read10(us, registers, data, srb);
}
if (len > 0xFFFF) {
usb_stor_dbg(us, "Error: len = %08X... what do I do now?\n",
len);
return USB_STOR_TRANSPORT_ERROR;
}
result = usbat_multiple_write(us, registers, data, 7);
if (result != USB_STOR_TRANSPORT_GOOD)
return result;
/*
* Write the 12-byte command header.
*
* If the command is BLANK then set the timer for 75 minutes.
* Otherwise set it for 10 minutes.
*
* NOTE: THE 8200 DOCUMENTATION STATES THAT BLANKING A CDRW
* AT SPEED 4 IS UNRELIABLE!!!
*/
result = usbat_write_block(us, USBAT_ATA, srb->cmnd, 12,
srb->cmnd[0] == GPCMD_BLANK ? 75 : 10, 0);
if (result != USB_STOR_TRANSPORT_GOOD)
return result;
/* If there is response data to be read in then do it here. */
if (len != 0 && (srb->sc_data_direction == DMA_FROM_DEVICE)) {
/* How many bytes to read in? Check cylL register */
if (usbat_read(us, USBAT_ATA, USBAT_ATA_LBA_ME, status) !=
USB_STOR_XFER_GOOD) {
return USB_STOR_TRANSPORT_ERROR;
}
if (len > 0xFF) { /* need to read cylH also */
len = *status;
if (usbat_read(us, USBAT_ATA, USBAT_ATA_LBA_HI, status) !=
USB_STOR_XFER_GOOD) {
return USB_STOR_TRANSPORT_ERROR;
}
len += ((unsigned int) *status)<<8;
}
else
len = *status;
result = usbat_read_block(us, scsi_sglist(srb), len,
scsi_sg_count(srb));
}
return result;
}
/*
* Transport for USBAT02-based CompactFlash and similar storage devices
*/
static int usbat_flash_transport(struct scsi_cmnd * srb, struct us_data *us)
{
int rc;
struct usbat_info *info = (struct usbat_info *) (us->extra);
unsigned long block, blocks;
unsigned char *ptr = us->iobuf;
static unsigned char inquiry_response[36] = {
0x00, 0x80, 0x00, 0x01, 0x1F, 0x00, 0x00, 0x00
};
if (srb->cmnd[0] == INQUIRY) {
usb_stor_dbg(us, "INQUIRY - Returning bogus response\n");
memcpy(ptr, inquiry_response, sizeof(inquiry_response));
fill_inquiry_response(us, ptr, 36);
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == READ_CAPACITY) {
rc = usbat_flash_check_media(us, info);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
rc = usbat_flash_get_sector_count(us, info);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
/* hard coded 512 byte sectors as per ATA spec */
info->ssize = 0x200;
usb_stor_dbg(us, "READ_CAPACITY: %ld sectors, %ld bytes per sector\n",
info->sectors, info->ssize);
/*
* build the reply
* note: must return the sector number of the last sector,
* *not* the total number of sectors
*/
((__be32 *) ptr)[0] = cpu_to_be32(info->sectors - 1);
((__be32 *) ptr)[1] = cpu_to_be32(info->ssize);
usb_stor_set_xfer_buf(ptr, 8, srb);
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == MODE_SELECT_10) {
usb_stor_dbg(us, "Gah! MODE_SELECT_10\n");
return USB_STOR_TRANSPORT_ERROR;
}
if (srb->cmnd[0] == READ_10) {
block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5]));
blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8]));
usb_stor_dbg(us, "READ_10: read block 0x%04lx count %ld\n",
block, blocks);
return usbat_flash_read_data(us, info, block, blocks);
}
if (srb->cmnd[0] == READ_12) {
/*
* I don't think we'll ever see a READ_12 but support it anyway
*/
block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5]));
blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) |
((u32)(srb->cmnd[8]) << 8) | ((u32)(srb->cmnd[9]));
usb_stor_dbg(us, "READ_12: read block 0x%04lx count %ld\n",
block, blocks);
return usbat_flash_read_data(us, info, block, blocks);
}
if (srb->cmnd[0] == WRITE_10) {
block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5]));
blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8]));
usb_stor_dbg(us, "WRITE_10: write block 0x%04lx count %ld\n",
block, blocks);
return usbat_flash_write_data(us, info, block, blocks);
}
if (srb->cmnd[0] == WRITE_12) {
/*
* I don't think we'll ever see a WRITE_12 but support it anyway
*/
block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5]));
blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) |
((u32)(srb->cmnd[8]) << 8) | ((u32)(srb->cmnd[9]));
usb_stor_dbg(us, "WRITE_12: write block 0x%04lx count %ld\n",
block, blocks);
return usbat_flash_write_data(us, info, block, blocks);
}
if (srb->cmnd[0] == TEST_UNIT_READY) {
usb_stor_dbg(us, "TEST_UNIT_READY\n");
rc = usbat_flash_check_media(us, info);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
return usbat_check_status(us);
}
if (srb->cmnd[0] == REQUEST_SENSE) {
usb_stor_dbg(us, "REQUEST_SENSE\n");
memset(ptr, 0, 18);
ptr[0] = 0xF0;
ptr[2] = info->sense_key;
ptr[7] = 11;
ptr[12] = info->sense_asc;
ptr[13] = info->sense_ascq;
usb_stor_set_xfer_buf(ptr, 18, srb);
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) {
/*
* sure. whatever. not like we can stop the user from popping
* the media out of the device (no locking doors, etc)
*/
return USB_STOR_TRANSPORT_GOOD;
}
usb_stor_dbg(us, "Gah! Unknown command: %d (0x%x)\n",
srb->cmnd[0], srb->cmnd[0]);
info->sense_key = 0x05;
info->sense_asc = 0x20;
info->sense_ascq = 0x00;
return USB_STOR_TRANSPORT_FAILED;
}
static int init_usbat_cd(struct us_data *us)
{
return init_usbat(us, USBAT_DEV_HP8200);
}
static int init_usbat_flash(struct us_data *us)
{
return init_usbat(us, USBAT_DEV_FLASH);
}
static struct scsi_host_template usbat_host_template;
static int usbat_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct us_data *us;
int result;
result = usb_stor_probe1(&us, intf, id,
(id - usbat_usb_ids) + usbat_unusual_dev_list,
&usbat_host_template);
if (result)
return result;
/*
* The actual transport will be determined later by the
* initialization routine; this is just a placeholder.
*/
us->transport_name = "Shuttle USBAT";
us->transport = usbat_flash_transport;
us->transport_reset = usb_stor_CB_reset;
us->max_lun = 0;
result = usb_stor_probe2(us);
return result;
}
static struct usb_driver usbat_driver = {
.name = DRV_NAME,
.probe = usbat_probe,
.disconnect = usb_stor_disconnect,
.suspend = usb_stor_suspend,
.resume = usb_stor_resume,
.reset_resume = usb_stor_reset_resume,
.pre_reset = usb_stor_pre_reset,
.post_reset = usb_stor_post_reset,
.id_table = usbat_usb_ids,
.soft_unbind = 1,
.no_dynamic_id = 1,
};
module_usb_stor_driver(usbat_driver, usbat_host_template, DRV_NAME);
| linux-master | drivers/usb/storage/shuttle_usbat.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Option High Speed Mobile Devices.
*
* (c) 2008 Dan Williams <[email protected]>
*
* Inspiration taken from sierra_ms.c by Kevin Lloyd <[email protected]>
*/
#include <linux/usb.h>
#include <linux/slab.h>
#include <linux/module.h>
#include "usb.h"
#include "transport.h"
#include "option_ms.h"
#include "debug.h"
#define ZCD_FORCE_MODEM 0x01
#define ZCD_ALLOW_MS 0x02
static unsigned int option_zero_cd = ZCD_FORCE_MODEM;
module_param(option_zero_cd, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(option_zero_cd, "ZeroCD mode (1=Force Modem (default),"
" 2=Allow CD-Rom");
#define RESPONSE_LEN 1024
static int option_rezero(struct us_data *us)
{
static const unsigned char rezero_msg[] = {
0x55, 0x53, 0x42, 0x43, 0x78, 0x56, 0x34, 0x12,
0x01, 0x00, 0x00, 0x00, 0x80, 0x00, 0x06, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
char *buffer;
int result;
usb_stor_dbg(us, "Option MS: %s\n", "DEVICE MODE SWITCH");
buffer = kzalloc(RESPONSE_LEN, GFP_KERNEL);
if (buffer == NULL)
return USB_STOR_TRANSPORT_ERROR;
memcpy(buffer, rezero_msg, sizeof(rezero_msg));
result = usb_stor_bulk_transfer_buf(us,
us->send_bulk_pipe,
buffer, sizeof(rezero_msg), NULL);
if (result != USB_STOR_XFER_GOOD) {
result = USB_STOR_XFER_ERROR;
goto out;
}
/*
* Some of the devices need to be asked for a response, but we don't
* care what that response is.
*/
usb_stor_bulk_transfer_buf(us,
us->recv_bulk_pipe,
buffer, RESPONSE_LEN, NULL);
/* Read the CSW */
usb_stor_bulk_transfer_buf(us,
us->recv_bulk_pipe,
buffer, 13, NULL);
result = USB_STOR_XFER_GOOD;
out:
kfree(buffer);
return result;
}
static int option_inquiry(struct us_data *us)
{
static const unsigned char inquiry_msg[] = {
0x55, 0x53, 0x42, 0x43, 0x12, 0x34, 0x56, 0x78,
0x24, 0x00, 0x00, 0x00, 0x80, 0x00, 0x06, 0x12,
0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
char *buffer;
int result;
usb_stor_dbg(us, "Option MS: %s\n", "device inquiry for vendor name");
buffer = kzalloc(0x24, GFP_KERNEL);
if (buffer == NULL)
return USB_STOR_TRANSPORT_ERROR;
memcpy(buffer, inquiry_msg, sizeof(inquiry_msg));
result = usb_stor_bulk_transfer_buf(us,
us->send_bulk_pipe,
buffer, sizeof(inquiry_msg), NULL);
if (result != USB_STOR_XFER_GOOD) {
result = USB_STOR_XFER_ERROR;
goto out;
}
result = usb_stor_bulk_transfer_buf(us,
us->recv_bulk_pipe,
buffer, 0x24, NULL);
if (result != USB_STOR_XFER_GOOD) {
result = USB_STOR_XFER_ERROR;
goto out;
}
result = memcmp(buffer+8, "Option", 6);
if (result != 0)
result = memcmp(buffer+8, "ZCOPTION", 8);
/* Read the CSW */
usb_stor_bulk_transfer_buf(us,
us->recv_bulk_pipe,
buffer, 13, NULL);
out:
kfree(buffer);
return result;
}
int option_ms_init(struct us_data *us)
{
int result;
usb_stor_dbg(us, "Option MS: %s\n", "option_ms_init called");
/*
* Additional test for vendor information via INQUIRY,
* because some vendor/product IDs are ambiguous
*/
result = option_inquiry(us);
if (result != 0) {
usb_stor_dbg(us, "Option MS: %s\n",
"vendor is not Option or not determinable, no action taken");
return 0;
} else
usb_stor_dbg(us, "Option MS: %s\n",
"this is a genuine Option device, proceeding");
/* Force Modem mode */
if (option_zero_cd == ZCD_FORCE_MODEM) {
usb_stor_dbg(us, "Option MS: %s\n", "Forcing Modem Mode");
result = option_rezero(us);
if (result != USB_STOR_XFER_GOOD)
usb_stor_dbg(us, "Option MS: %s\n",
"Failed to switch to modem mode");
return -EIO;
} else if (option_zero_cd == ZCD_ALLOW_MS) {
/* Allow Mass Storage mode (keep CD-Rom) */
usb_stor_dbg(us, "Option MS: %s\n",
"Allowing Mass Storage Mode if device requests it");
}
return 0;
}
| linux-master | drivers/usb/storage/option_ms.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
* SCSI layer glue code
*
* Current development and maintenance by:
* (c) 1999-2002 Matthew Dharm ([email protected])
*
* Developed with the assistance of:
* (c) 2000 David L. Brown, Jr. ([email protected])
* (c) 2000 Stephen J. Gowdy ([email protected])
*
* Initial work by:
* (c) 1999 Michael Gee ([email protected])
*
* This driver is based on the 'USB Mass Storage Class' document. This
* describes in detail the protocol used to communicate with such
* devices. Clearly, the designers had SCSI and ATAPI commands in
* mind when they created this document. The commands are all very
* similar to commands in the SCSI-II and ATAPI specifications.
*
* It is important to note that in a number of cases this class
* exhibits class-specific exemptions from the USB specification.
* Notably the usage of NAK, STALL and ACK differs from the norm, in
* that they are used to communicate wait, failed and OK on commands.
*
* Also, for certain devices, the interrupt endpoint is used to convey
* status of a command.
*/
#include <linux/blkdev.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_devinfo.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_eh.h>
#include "usb.h"
#include <linux/usb/hcd.h>
#include "scsiglue.h"
#include "debug.h"
#include "transport.h"
#include "protocol.h"
/*
* Vendor IDs for companies that seem to include the READ CAPACITY bug
* in all their devices
*/
#define VENDOR_ID_NOKIA 0x0421
#define VENDOR_ID_NIKON 0x04b0
#define VENDOR_ID_PENTAX 0x0a17
#define VENDOR_ID_MOTOROLA 0x22b8
/***********************************************************************
* Host functions
***********************************************************************/
static const char* host_info(struct Scsi_Host *host)
{
struct us_data *us = host_to_us(host);
return us->scsi_name;
}
static int slave_alloc (struct scsi_device *sdev)
{
struct us_data *us = host_to_us(sdev->host);
/*
* Set the INQUIRY transfer length to 36. We don't use any of
* the extra data and many devices choke if asked for more or
* less than 36 bytes.
*/
sdev->inquiry_len = 36;
/*
* Some host controllers may have alignment requirements.
* We'll play it safe by requiring 512-byte alignment always.
*/
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
/* Tell the SCSI layer if we know there is more than one LUN */
if (us->protocol == USB_PR_BULK && us->max_lun > 0)
sdev->sdev_bflags |= BLIST_FORCELUN;
return 0;
}
static int slave_configure(struct scsi_device *sdev)
{
struct us_data *us = host_to_us(sdev->host);
struct device *dev = us->pusb_dev->bus->sysdev;
/*
* Many devices have trouble transferring more than 32KB at a time,
* while others have trouble with more than 64K. At this time we
* are limiting both to 32K (64 sectores).
*/
if (us->fflags & (US_FL_MAX_SECTORS_64 | US_FL_MAX_SECTORS_MIN)) {
unsigned int max_sectors = 64;
if (us->fflags & US_FL_MAX_SECTORS_MIN)
max_sectors = PAGE_SIZE >> 9;
if (queue_max_hw_sectors(sdev->request_queue) > max_sectors)
blk_queue_max_hw_sectors(sdev->request_queue,
max_sectors);
} else if (sdev->type == TYPE_TAPE) {
/*
* Tapes need much higher max_sector limits, so just
* raise it to the maximum possible (4 GB / 512) and
* let the queue segment size sort out the real limit.
*/
blk_queue_max_hw_sectors(sdev->request_queue, 0x7FFFFF);
} else if (us->pusb_dev->speed >= USB_SPEED_SUPER) {
/*
* USB3 devices will be limited to 2048 sectors. This gives us
* better throughput on most devices.
*/
blk_queue_max_hw_sectors(sdev->request_queue, 2048);
}
/*
* The max_hw_sectors should be up to maximum size of a mapping for
* the device. Otherwise, a DMA API might fail on swiotlb environment.
*/
blk_queue_max_hw_sectors(sdev->request_queue,
min_t(size_t, queue_max_hw_sectors(sdev->request_queue),
dma_max_mapping_size(dev) >> SECTOR_SHIFT));
/*
* Some USB host controllers can't do DMA; they have to use PIO.
* For such controllers we need to make sure the block layer sets
* up bounce buffers in addressable memory.
*/
if (!hcd_uses_dma(bus_to_hcd(us->pusb_dev->bus)) ||
(bus_to_hcd(us->pusb_dev->bus)->localmem_pool != NULL))
blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_HIGH);
/*
* We can't put these settings in slave_alloc() because that gets
* called before the device type is known. Consequently these
* settings can't be overridden via the scsi devinfo mechanism.
*/
if (sdev->type == TYPE_DISK) {
/*
* Some vendors seem to put the READ CAPACITY bug into
* all their devices -- primarily makers of cell phones
* and digital cameras. Since these devices always use
* flash media and can be expected to have an even number
* of sectors, we will always enable the CAPACITY_HEURISTICS
* flag unless told otherwise.
*/
switch (le16_to_cpu(us->pusb_dev->descriptor.idVendor)) {
case VENDOR_ID_NOKIA:
case VENDOR_ID_NIKON:
case VENDOR_ID_PENTAX:
case VENDOR_ID_MOTOROLA:
if (!(us->fflags & (US_FL_FIX_CAPACITY |
US_FL_CAPACITY_OK)))
us->fflags |= US_FL_CAPACITY_HEURISTICS;
break;
}
/*
* Disk-type devices use MODE SENSE(6) if the protocol
* (SubClass) is Transparent SCSI, otherwise they use
* MODE SENSE(10).
*/
if (us->subclass != USB_SC_SCSI && us->subclass != USB_SC_CYP_ATACB)
sdev->use_10_for_ms = 1;
/*
*Many disks only accept MODE SENSE transfer lengths of
* 192 bytes (that's what Windows uses).
*/
sdev->use_192_bytes_for_3f = 1;
/*
* Some devices don't like MODE SENSE with page=0x3f,
* which is the command used for checking if a device
* is write-protected. Now that we tell the sd driver
* to do a 192-byte transfer with this command the
* majority of devices work fine, but a few still can't
* handle it. The sd driver will simply assume those
* devices are write-enabled.
*/
if (us->fflags & US_FL_NO_WP_DETECT)
sdev->skip_ms_page_3f = 1;
/*
* A number of devices have problems with MODE SENSE for
* page x08, so we will skip it.
*/
sdev->skip_ms_page_8 = 1;
/*
* Some devices don't handle VPD pages correctly, so skip vpd
* pages if not forced by SCSI layer.
*/
sdev->skip_vpd_pages = !sdev->try_vpd_pages;
/* Do not attempt to use REPORT SUPPORTED OPERATION CODES */
sdev->no_report_opcodes = 1;
/* Do not attempt to use WRITE SAME */
sdev->no_write_same = 1;
/*
* Some disks return the total number of blocks in response
* to READ CAPACITY rather than the highest block number.
* If this device makes that mistake, tell the sd driver.
*/
if (us->fflags & US_FL_FIX_CAPACITY)
sdev->fix_capacity = 1;
/*
* A few disks have two indistinguishable version, one of
* which reports the correct capacity and the other does not.
* The sd driver has to guess which is the case.
*/
if (us->fflags & US_FL_CAPACITY_HEURISTICS)
sdev->guess_capacity = 1;
/* Some devices cannot handle READ_CAPACITY_16 */
if (us->fflags & US_FL_NO_READ_CAPACITY_16)
sdev->no_read_capacity_16 = 1;
/*
* Many devices do not respond properly to READ_CAPACITY_16.
* Tell the SCSI layer to try READ_CAPACITY_10 first.
* However some USB 3.0 drive enclosures return capacity
* modulo 2TB. Those must use READ_CAPACITY_16
*/
if (!(us->fflags & US_FL_NEEDS_CAP16))
sdev->try_rc_10_first = 1;
/*
* assume SPC3 or latter devices support sense size > 18
* unless US_FL_BAD_SENSE quirk is specified.
*/
if (sdev->scsi_level > SCSI_SPC_2 &&
!(us->fflags & US_FL_BAD_SENSE))
us->fflags |= US_FL_SANE_SENSE;
/*
* USB-IDE bridges tend to report SK = 0x04 (Non-recoverable
* Hardware Error) when any low-level error occurs,
* recoverable or not. Setting this flag tells the SCSI
* midlayer to retry such commands, which frequently will
* succeed and fix the error. The worst this can lead to
* is an occasional series of retries that will all fail.
*/
sdev->retry_hwerror = 1;
/*
* USB disks should allow restart. Some drives spin down
* automatically, requiring a START-STOP UNIT command.
*/
sdev->allow_restart = 1;
/*
* Some USB cardreaders have trouble reading an sdcard's last
* sector in a larger then 1 sector read, since the performance
* impact is negligible we set this flag for all USB disks
*/
sdev->last_sector_bug = 1;
/*
* Enable last-sector hacks for single-target devices using
* the Bulk-only transport, unless we already know the
* capacity will be decremented or is correct.
*/
if (!(us->fflags & (US_FL_FIX_CAPACITY | US_FL_CAPACITY_OK |
US_FL_SCM_MULT_TARG)) &&
us->protocol == USB_PR_BULK)
us->use_last_sector_hacks = 1;
/* Check if write cache default on flag is set or not */
if (us->fflags & US_FL_WRITE_CACHE)
sdev->wce_default_on = 1;
/* A few buggy USB-ATA bridges don't understand FUA */
if (us->fflags & US_FL_BROKEN_FUA)
sdev->broken_fua = 1;
/* Some even totally fail to indicate a cache */
if (us->fflags & US_FL_ALWAYS_SYNC) {
/* don't read caching information */
sdev->skip_ms_page_8 = 1;
sdev->skip_ms_page_3f = 1;
/* assume sync is needed */
sdev->wce_default_on = 1;
}
} else {
/*
* Non-disk-type devices don't need to ignore any pages
* or to force 192-byte transfer lengths for MODE SENSE.
* But they do need to use MODE SENSE(10).
*/
sdev->use_10_for_ms = 1;
/* Some (fake) usb cdrom devices don't like READ_DISC_INFO */
if (us->fflags & US_FL_NO_READ_DISC_INFO)
sdev->no_read_disc_info = 1;
}
/*
* The CB and CBI transports have no way to pass LUN values
* other than the bits in the second byte of a CDB. But those
* bits don't get set to the LUN value if the device reports
* scsi_level == 0 (UNKNOWN). Hence such devices must necessarily
* be single-LUN.
*/
if ((us->protocol == USB_PR_CB || us->protocol == USB_PR_CBI) &&
sdev->scsi_level == SCSI_UNKNOWN)
us->max_lun = 0;
/*
* Some devices choke when they receive a PREVENT-ALLOW MEDIUM
* REMOVAL command, so suppress those commands.
*/
if (us->fflags & US_FL_NOT_LOCKABLE)
sdev->lockable = 0;
/*
* this is to satisfy the compiler, tho I don't think the
* return code is ever checked anywhere.
*/
return 0;
}
static int target_alloc(struct scsi_target *starget)
{
struct us_data *us = host_to_us(dev_to_shost(starget->dev.parent));
/*
* Some USB drives don't support REPORT LUNS, even though they
* report a SCSI revision level above 2. Tell the SCSI layer
* not to issue that command; it will perform a normal sequential
* scan instead.
*/
starget->no_report_luns = 1;
/*
* The UFI spec treats the Peripheral Qualifier bits in an
* INQUIRY result as reserved and requires devices to set them
* to 0. However the SCSI spec requires these bits to be set
* to 3 to indicate when a LUN is not present.
*
* Let the scanning code know if this target merely sets
* Peripheral Device Type to 0x1f to indicate no LUN.
*/
if (us->subclass == USB_SC_UFI)
starget->pdt_1f_for_no_lun = 1;
return 0;
}
/* queue a command */
/* This is always called with scsi_lock(host) held */
static int queuecommand_lck(struct scsi_cmnd *srb)
{
void (*done)(struct scsi_cmnd *) = scsi_done;
struct us_data *us = host_to_us(srb->device->host);
/* check for state-transition errors */
if (us->srb != NULL) {
dev_err(&us->pusb_intf->dev,
"Error in %s: us->srb = %p\n", __func__, us->srb);
return SCSI_MLQUEUE_HOST_BUSY;
}
/* fail the command if we are disconnecting */
if (test_bit(US_FLIDX_DISCONNECTING, &us->dflags)) {
usb_stor_dbg(us, "Fail command during disconnect\n");
srb->result = DID_NO_CONNECT << 16;
done(srb);
return 0;
}
if ((us->fflags & US_FL_NO_ATA_1X) &&
(srb->cmnd[0] == ATA_12 || srb->cmnd[0] == ATA_16)) {
memcpy(srb->sense_buffer, usb_stor_sense_invalidCDB,
sizeof(usb_stor_sense_invalidCDB));
srb->result = SAM_STAT_CHECK_CONDITION;
done(srb);
return 0;
}
/* enqueue the command and wake up the control thread */
us->srb = srb;
complete(&us->cmnd_ready);
return 0;
}
static DEF_SCSI_QCMD(queuecommand)
/***********************************************************************
* Error handling functions
***********************************************************************/
/* Command timeout and abort */
static int command_abort_matching(struct us_data *us, struct scsi_cmnd *srb_match)
{
/*
* us->srb together with the TIMED_OUT, RESETTING, and ABORTING
* bits are protected by the host lock.
*/
scsi_lock(us_to_host(us));
/* is there any active pending command to abort ? */
if (!us->srb) {
scsi_unlock(us_to_host(us));
usb_stor_dbg(us, "-- nothing to abort\n");
return SUCCESS;
}
/* Does the command match the passed srb if any ? */
if (srb_match && us->srb != srb_match) {
scsi_unlock(us_to_host(us));
usb_stor_dbg(us, "-- pending command mismatch\n");
return FAILED;
}
/*
* Set the TIMED_OUT bit. Also set the ABORTING bit, but only if
* a device reset isn't already in progress (to avoid interfering
* with the reset). Note that we must retain the host lock while
* calling usb_stor_stop_transport(); otherwise it might interfere
* with an auto-reset that begins as soon as we release the lock.
*/
set_bit(US_FLIDX_TIMED_OUT, &us->dflags);
if (!test_bit(US_FLIDX_RESETTING, &us->dflags)) {
set_bit(US_FLIDX_ABORTING, &us->dflags);
usb_stor_stop_transport(us);
}
scsi_unlock(us_to_host(us));
/* Wait for the aborted command to finish */
wait_for_completion(&us->notify);
return SUCCESS;
}
static int command_abort(struct scsi_cmnd *srb)
{
struct us_data *us = host_to_us(srb->device->host);
usb_stor_dbg(us, "%s called\n", __func__);
return command_abort_matching(us, srb);
}
/*
* This invokes the transport reset mechanism to reset the state of the
* device
*/
static int device_reset(struct scsi_cmnd *srb)
{
struct us_data *us = host_to_us(srb->device->host);
int result;
usb_stor_dbg(us, "%s called\n", __func__);
/* abort any pending command before reset */
command_abort_matching(us, NULL);
/* lock the device pointers and do the reset */
mutex_lock(&(us->dev_mutex));
result = us->transport_reset(us);
mutex_unlock(&us->dev_mutex);
return result < 0 ? FAILED : SUCCESS;
}
/* Simulate a SCSI bus reset by resetting the device's USB port. */
static int bus_reset(struct scsi_cmnd *srb)
{
struct us_data *us = host_to_us(srb->device->host);
int result;
usb_stor_dbg(us, "%s called\n", __func__);
result = usb_stor_port_reset(us);
return result < 0 ? FAILED : SUCCESS;
}
/*
* Report a driver-initiated device reset to the SCSI layer.
* Calling this for a SCSI-initiated reset is unnecessary but harmless.
* The caller must own the SCSI host lock.
*/
void usb_stor_report_device_reset(struct us_data *us)
{
int i;
struct Scsi_Host *host = us_to_host(us);
scsi_report_device_reset(host, 0, 0);
if (us->fflags & US_FL_SCM_MULT_TARG) {
for (i = 1; i < host->max_id; ++i)
scsi_report_device_reset(host, 0, i);
}
}
/*
* Report a driver-initiated bus reset to the SCSI layer.
* Calling this for a SCSI-initiated reset is unnecessary but harmless.
* The caller must not own the SCSI host lock.
*/
void usb_stor_report_bus_reset(struct us_data *us)
{
struct Scsi_Host *host = us_to_host(us);
scsi_lock(host);
scsi_report_bus_reset(host, 0);
scsi_unlock(host);
}
/***********************************************************************
* /proc/scsi/ functions
***********************************************************************/
static int write_info(struct Scsi_Host *host, char *buffer, int length)
{
/* if someone is sending us data, just throw it away */
return length;
}
static int show_info (struct seq_file *m, struct Scsi_Host *host)
{
struct us_data *us = host_to_us(host);
const char *string;
/* print the controller name */
seq_printf(m, " Host scsi%d: usb-storage\n", host->host_no);
/* print product, vendor, and serial number strings */
if (us->pusb_dev->manufacturer)
string = us->pusb_dev->manufacturer;
else if (us->unusual_dev->vendorName)
string = us->unusual_dev->vendorName;
else
string = "Unknown";
seq_printf(m, " Vendor: %s\n", string);
if (us->pusb_dev->product)
string = us->pusb_dev->product;
else if (us->unusual_dev->productName)
string = us->unusual_dev->productName;
else
string = "Unknown";
seq_printf(m, " Product: %s\n", string);
if (us->pusb_dev->serial)
string = us->pusb_dev->serial;
else
string = "None";
seq_printf(m, "Serial Number: %s\n", string);
/* show the protocol and transport */
seq_printf(m, " Protocol: %s\n", us->protocol_name);
seq_printf(m, " Transport: %s\n", us->transport_name);
/* show the device flags */
seq_printf(m, " Quirks:");
#define US_FLAG(name, value) \
if (us->fflags & value) seq_printf(m, " " #name);
US_DO_ALL_FLAGS
#undef US_FLAG
seq_putc(m, '\n');
return 0;
}
/***********************************************************************
* Sysfs interface
***********************************************************************/
/* Output routine for the sysfs max_sectors file */
static ssize_t max_sectors_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
return sprintf(buf, "%u\n", queue_max_hw_sectors(sdev->request_queue));
}
/* Input routine for the sysfs max_sectors file */
static ssize_t max_sectors_store(struct device *dev, struct device_attribute *attr, const char *buf,
size_t count)
{
struct scsi_device *sdev = to_scsi_device(dev);
unsigned short ms;
if (sscanf(buf, "%hu", &ms) > 0) {
blk_queue_max_hw_sectors(sdev->request_queue, ms);
return count;
}
return -EINVAL;
}
static DEVICE_ATTR_RW(max_sectors);
static struct attribute *usb_sdev_attrs[] = {
&dev_attr_max_sectors.attr,
NULL,
};
ATTRIBUTE_GROUPS(usb_sdev);
/*
* this defines our host template, with which we'll allocate hosts
*/
static const struct scsi_host_template usb_stor_host_template = {
/* basic userland interface stuff */
.name = "usb-storage",
.proc_name = "usb-storage",
.show_info = show_info,
.write_info = write_info,
.info = host_info,
/* command interface -- queued only */
.queuecommand = queuecommand,
/* error and abort handlers */
.eh_abort_handler = command_abort,
.eh_device_reset_handler = device_reset,
.eh_bus_reset_handler = bus_reset,
/* queue commands only, only one command per LUN */
.can_queue = 1,
/* unknown initiator id */
.this_id = -1,
.slave_alloc = slave_alloc,
.slave_configure = slave_configure,
.target_alloc = target_alloc,
/* lots of sg segments can be handled */
.sg_tablesize = SG_MAX_SEGMENTS,
/*
* Limit the total size of a transfer to 120 KB.
*
* Some devices are known to choke with anything larger. It seems like
* the problem stems from the fact that original IDE controllers had
* only an 8-bit register to hold the number of sectors in one transfer
* and even those couldn't handle a full 256 sectors.
*
* Because we want to make sure we interoperate with as many devices as
* possible, we will maintain a 240 sector transfer size limit for USB
* Mass Storage devices.
*
* Tests show that other operating have similar limits with Microsoft
* Windows 7 limiting transfers to 128 sectors for both USB2 and USB3
* and Apple Mac OS X 10.11 limiting transfers to 256 sectors for USB2
* and 2048 for USB3 devices.
*/
.max_sectors = 240,
/* emulated HBA */
.emulated = 1,
/* we do our own delay after a device or bus reset */
.skip_settle_delay = 1,
/* sysfs device attributes */
.sdev_groups = usb_sdev_groups,
/* module management */
.module = THIS_MODULE
};
void usb_stor_host_template_init(struct scsi_host_template *sht,
const char *name, struct module *owner)
{
*sht = usb_stor_host_template;
sht->name = name;
sht->proc_name = name;
sht->module = owner;
}
EXPORT_SYMBOL_GPL(usb_stor_host_template_init);
/* To Report "Illegal Request: Invalid Field in CDB */
unsigned char usb_stor_sense_invalidCDB[18] = {
[0] = 0x70, /* current error */
[2] = ILLEGAL_REQUEST, /* Illegal Request = 0x05 */
[7] = 0x0a, /* additional length */
[12] = 0x24 /* Invalid Field in CDB */
};
EXPORT_SYMBOL_GPL(usb_stor_sense_invalidCDB);
| linux-master | drivers/usb/storage/scsiglue.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Special Initializers for certain USB Mass Storage devices
*
* Current development and maintenance by:
* (c) 1999, 2000 Matthew Dharm ([email protected])
*
* This driver is based on the 'USB Mass Storage Class' document. This
* describes in detail the protocol used to communicate with such
* devices. Clearly, the designers had SCSI and ATAPI commands in
* mind when they created this document. The commands are all very
* similar to commands in the SCSI-II and ATAPI specifications.
*
* It is important to note that in a number of cases this class
* exhibits class-specific exemptions from the USB specification.
* Notably the usage of NAK, STALL and ACK differs from the norm, in
* that they are used to communicate wait, failed and OK on commands.
*
* Also, for certain devices, the interrupt endpoint is used to convey
* status of a command.
*/
#include <linux/errno.h>
#include "usb.h"
#include "initializers.h"
#include "debug.h"
#include "transport.h"
/*
* This places the Shuttle/SCM USB<->SCSI bridge devices in multi-target
* mode
*/
int usb_stor_euscsi_init(struct us_data *us)
{
int result;
usb_stor_dbg(us, "Attempting to init eUSCSI bridge...\n");
result = usb_stor_control_msg(us, us->send_ctrl_pipe,
0x0C, USB_RECIP_INTERFACE | USB_TYPE_VENDOR,
0x01, 0x0, NULL, 0x0, 5 * HZ);
usb_stor_dbg(us, "-- result is %d\n", result);
return 0;
}
/*
* This function is required to activate all four slots on the UCR-61S2B
* flash reader
*/
int usb_stor_ucr61s2b_init(struct us_data *us)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap*) us->iobuf;
struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap*) us->iobuf;
int res;
unsigned int partial;
static char init_string[] = "\xec\x0a\x06\x00$PCCHIPS";
usb_stor_dbg(us, "Sending UCR-61S2B initialization packet...\n");
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->Tag = 0;
bcb->DataTransferLength = cpu_to_le32(0);
bcb->Flags = bcb->Lun = 0;
bcb->Length = sizeof(init_string) - 1;
memset(bcb->CDB, 0, sizeof(bcb->CDB));
memcpy(bcb->CDB, init_string, sizeof(init_string) - 1);
res = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb,
US_BULK_CB_WRAP_LEN, &partial);
if (res)
return -EIO;
usb_stor_dbg(us, "Getting status packet...\n");
res = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs,
US_BULK_CS_WRAP_LEN, &partial);
if (res)
return -EIO;
return 0;
}
/* This places the HUAWEI E220 devices in multi-port mode */
int usb_stor_huawei_e220_init(struct us_data *us)
{
int result;
result = usb_stor_control_msg(us, us->send_ctrl_pipe,
USB_REQ_SET_FEATURE,
USB_TYPE_STANDARD | USB_RECIP_DEVICE,
0x01, 0x0, NULL, 0x0, 1 * HZ);
usb_stor_dbg(us, "Huawei mode set result is %d\n", result);
return 0;
}
| linux-master | drivers/usb/storage/initializers.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Support for emulating SAT (ata pass through) on devices based
* on the Cypress USB/ATA bridge supporting ATACB.
*
* Copyright (c) 2008 Matthieu Castet ([email protected])
*/
#include <linux/module.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_eh.h>
#include <linux/ata.h>
#include "usb.h"
#include "protocol.h"
#include "scsiglue.h"
#include "debug.h"
#define DRV_NAME "ums-cypress"
MODULE_DESCRIPTION("SAT support for Cypress USB/ATA bridges with ATACB");
MODULE_AUTHOR("Matthieu Castet <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(USB_STORAGE);
/*
* The table of devices
*/
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
static struct usb_device_id cypress_usb_ids[] = {
# include "unusual_cypress.h"
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, cypress_usb_ids);
#undef UNUSUAL_DEV
/*
* The flags table
*/
#define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \
vendor_name, product_name, use_protocol, use_transport, \
init_function, Flags) \
{ \
.vendorName = vendor_name, \
.productName = product_name, \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
.initFunction = init_function, \
}
static struct us_unusual_dev cypress_unusual_dev_list[] = {
# include "unusual_cypress.h"
{ } /* Terminating entry */
};
#undef UNUSUAL_DEV
/*
* ATACB is a protocol used on cypress usb<->ata bridge to
* send raw ATA command over mass storage
* There is a ATACB2 protocol that support LBA48 on newer chip.
* More info that be found on cy7c68310_8.pdf and cy7c68300c_8.pdf
* datasheet from cypress.com.
*/
static void cypress_atacb_passthrough(struct scsi_cmnd *srb, struct us_data *us)
{
unsigned char save_cmnd[MAX_COMMAND_SIZE];
if (likely(srb->cmnd[0] != ATA_16 && srb->cmnd[0] != ATA_12)) {
usb_stor_transparent_scsi_command(srb, us);
return;
}
memcpy(save_cmnd, srb->cmnd, sizeof(save_cmnd));
memset(srb->cmnd, 0, MAX_COMMAND_SIZE);
/* check if we support the command */
if (save_cmnd[1] >> 5) /* MULTIPLE_COUNT */
goto invalid_fld;
/* check protocol */
switch ((save_cmnd[1] >> 1) & 0xf) {
case 3: /*no DATA */
case 4: /* PIO in */
case 5: /* PIO out */
break;
default:
goto invalid_fld;
}
/* first build the ATACB command */
srb->cmd_len = 16;
srb->cmnd[0] = 0x24; /*
* bVSCBSignature : vendor-specific command
* this value can change, but most(all ?) manufacturers
* keep the cypress default : 0x24
*/
srb->cmnd[1] = 0x24; /* bVSCBSubCommand : 0x24 for ATACB */
srb->cmnd[3] = 0xff - 1; /*
* features, sector count, lba low, lba med
* lba high, device, command are valid
*/
srb->cmnd[4] = 1; /* TransferBlockCount : 512 */
if (save_cmnd[0] == ATA_16) {
srb->cmnd[ 6] = save_cmnd[ 4]; /* features */
srb->cmnd[ 7] = save_cmnd[ 6]; /* sector count */
srb->cmnd[ 8] = save_cmnd[ 8]; /* lba low */
srb->cmnd[ 9] = save_cmnd[10]; /* lba med */
srb->cmnd[10] = save_cmnd[12]; /* lba high */
srb->cmnd[11] = save_cmnd[13]; /* device */
srb->cmnd[12] = save_cmnd[14]; /* command */
if (save_cmnd[1] & 0x01) {/* extended bit set for LBA48 */
/* this could be supported by atacb2 */
if (save_cmnd[3] || save_cmnd[5] || save_cmnd[7] || save_cmnd[9]
|| save_cmnd[11])
goto invalid_fld;
}
} else { /* ATA12 */
srb->cmnd[ 6] = save_cmnd[3]; /* features */
srb->cmnd[ 7] = save_cmnd[4]; /* sector count */
srb->cmnd[ 8] = save_cmnd[5]; /* lba low */
srb->cmnd[ 9] = save_cmnd[6]; /* lba med */
srb->cmnd[10] = save_cmnd[7]; /* lba high */
srb->cmnd[11] = save_cmnd[8]; /* device */
srb->cmnd[12] = save_cmnd[9]; /* command */
}
/* Filter SET_FEATURES - XFER MODE command */
if ((srb->cmnd[12] == ATA_CMD_SET_FEATURES)
&& (srb->cmnd[6] == SETFEATURES_XFER))
goto invalid_fld;
if (srb->cmnd[12] == ATA_CMD_ID_ATA || srb->cmnd[12] == ATA_CMD_ID_ATAPI)
srb->cmnd[2] |= (1<<7); /* set IdentifyPacketDevice for these cmds */
usb_stor_transparent_scsi_command(srb, us);
/* if the device doesn't support ATACB */
if (srb->result == SAM_STAT_CHECK_CONDITION &&
memcmp(srb->sense_buffer, usb_stor_sense_invalidCDB,
sizeof(usb_stor_sense_invalidCDB)) == 0) {
usb_stor_dbg(us, "cypress atacb not supported ???\n");
goto end;
}
/*
* if ck_cond flags is set, and there wasn't critical error,
* build the special sense
*/
if ((srb->result != (DID_ERROR << 16) &&
srb->result != (DID_ABORT << 16)) &&
save_cmnd[2] & 0x20) {
struct scsi_eh_save ses;
unsigned char regs[8];
unsigned char *sb = srb->sense_buffer;
unsigned char *desc = sb + 8;
int tmp_result;
/* build the command for reading the ATA registers */
scsi_eh_prep_cmnd(srb, &ses, NULL, 0, sizeof(regs));
/*
* we use the same command as before, but we set
* the read taskfile bit, for not executing atacb command,
* but reading register selected in srb->cmnd[4]
*/
srb->cmd_len = 16;
srb->cmnd[2] = 1;
usb_stor_transparent_scsi_command(srb, us);
memcpy(regs, srb->sense_buffer, sizeof(regs));
tmp_result = srb->result;
scsi_eh_restore_cmnd(srb, &ses);
/* we fail to get registers, report invalid command */
if (tmp_result != SAM_STAT_GOOD)
goto invalid_fld;
/* build the sense */
memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
/* set sk, asc for a good command */
sb[1] = RECOVERED_ERROR;
sb[2] = 0; /* ATA PASS THROUGH INFORMATION AVAILABLE */
sb[3] = 0x1D;
/*
* XXX we should generate sk, asc, ascq from status and error
* regs
* (see 11.1 Error translation ATA device error to SCSI error
* map, and ata_to_sense_error from libata.)
*/
/* Sense data is current and format is descriptor. */
sb[0] = 0x72;
desc[0] = 0x09; /* ATA_RETURN_DESCRIPTOR */
/* set length of additional sense data */
sb[7] = 14;
desc[1] = 12;
/* Copy registers into sense buffer. */
desc[ 2] = 0x00;
desc[ 3] = regs[1]; /* features */
desc[ 5] = regs[2]; /* sector count */
desc[ 7] = regs[3]; /* lba low */
desc[ 9] = regs[4]; /* lba med */
desc[11] = regs[5]; /* lba high */
desc[12] = regs[6]; /* device */
desc[13] = regs[7]; /* command */
srb->result = SAM_STAT_CHECK_CONDITION;
}
goto end;
invalid_fld:
srb->result = SAM_STAT_CHECK_CONDITION;
memcpy(srb->sense_buffer,
usb_stor_sense_invalidCDB,
sizeof(usb_stor_sense_invalidCDB));
end:
memcpy(srb->cmnd, save_cmnd, sizeof(save_cmnd));
if (srb->cmnd[0] == ATA_12)
srb->cmd_len = 12;
}
static struct scsi_host_template cypress_host_template;
static int cypress_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct us_data *us;
int result;
struct usb_device *device;
result = usb_stor_probe1(&us, intf, id,
(id - cypress_usb_ids) + cypress_unusual_dev_list,
&cypress_host_template);
if (result)
return result;
/*
* Among CY7C68300 chips, the A revision does not support Cypress ATACB
* Filter out this revision from EEPROM default descriptor values
*/
device = interface_to_usbdev(intf);
if (device->descriptor.iManufacturer != 0x38 ||
device->descriptor.iProduct != 0x4e ||
device->descriptor.iSerialNumber != 0x64) {
us->protocol_name = "Transparent SCSI with Cypress ATACB";
us->proto_handler = cypress_atacb_passthrough;
} else {
us->protocol_name = "Transparent SCSI";
us->proto_handler = usb_stor_transparent_scsi_command;
}
result = usb_stor_probe2(us);
return result;
}
static struct usb_driver cypress_driver = {
.name = DRV_NAME,
.probe = cypress_probe,
.disconnect = usb_stor_disconnect,
.suspend = usb_stor_suspend,
.resume = usb_stor_resume,
.reset_resume = usb_stor_reset_resume,
.pre_reset = usb_stor_pre_reset,
.post_reset = usb_stor_post_reset,
.id_table = cypress_usb_ids,
.soft_unbind = 1,
.no_dynamic_id = 1,
};
module_usb_stor_driver(cypress_driver, cypress_host_template, DRV_NAME);
| linux-master | drivers/usb/storage/cypress_atacb.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Realtek RTS51xx USB card reader
*
* Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
*
* Author:
* wwang ([email protected])
* No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
*/
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <linux/cdrom.h>
#include <linux/usb.h>
#include <linux/slab.h>
#include <linux/usb_usual.h>
#include "usb.h"
#include "transport.h"
#include "protocol.h"
#include "debug.h"
#include "scsiglue.h"
#define DRV_NAME "ums-realtek"
MODULE_DESCRIPTION("Driver for Realtek USB Card Reader");
MODULE_AUTHOR("wwang <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(USB_STORAGE);
static int auto_delink_en = 1;
module_param(auto_delink_en, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(auto_delink_en, "auto delink mode (0=firmware, 1=software [default])");
#ifdef CONFIG_REALTEK_AUTOPM
static int ss_en = 1;
module_param(ss_en, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ss_en, "enable selective suspend");
static int ss_delay = 50;
module_param(ss_delay, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ss_delay,
"seconds to delay before entering selective suspend");
enum RTS51X_STAT {
RTS51X_STAT_INIT,
RTS51X_STAT_IDLE,
RTS51X_STAT_RUN,
RTS51X_STAT_SS
};
#define POLLING_INTERVAL 50
#define rts51x_set_stat(chip, stat) \
((chip)->state = (enum RTS51X_STAT)(stat))
#define rts51x_get_stat(chip) ((chip)->state)
#define SET_LUN_READY(chip, lun) ((chip)->lun_ready |= ((u8)1 << (lun)))
#define CLR_LUN_READY(chip, lun) ((chip)->lun_ready &= ~((u8)1 << (lun)))
#define TST_LUN_READY(chip, lun) ((chip)->lun_ready & ((u8)1 << (lun)))
#endif
struct rts51x_status {
u16 vid;
u16 pid;
u8 cur_lun;
u8 card_type;
u8 total_lun;
u16 fw_ver;
u8 phy_exist;
u8 multi_flag;
u8 multi_card;
u8 log_exist;
union {
u8 detailed_type1;
u8 detailed_type2;
} detailed_type;
u8 function[2];
};
struct rts51x_chip {
u16 vendor_id;
u16 product_id;
char max_lun;
struct rts51x_status *status;
int status_len;
u32 flag;
struct us_data *us;
#ifdef CONFIG_REALTEK_AUTOPM
struct timer_list rts51x_suspend_timer;
unsigned long timer_expires;
int pwr_state;
u8 lun_ready;
enum RTS51X_STAT state;
int support_auto_delink;
#endif
/* used to back up the protocol chosen in probe1 phase */
proto_cmnd proto_handler_backup;
};
/* flag definition */
#define FLIDX_AUTO_DELINK 0x01
#define SCSI_LUN(srb) ((srb)->device->lun)
/* Bit Operation */
#define SET_BIT(data, idx) ((data) |= 1 << (idx))
#define CLR_BIT(data, idx) ((data) &= ~(1 << (idx)))
#define CHK_BIT(data, idx) ((data) & (1 << (idx)))
#define SET_AUTO_DELINK(chip) ((chip)->flag |= FLIDX_AUTO_DELINK)
#define CLR_AUTO_DELINK(chip) ((chip)->flag &= ~FLIDX_AUTO_DELINK)
#define CHK_AUTO_DELINK(chip) ((chip)->flag & FLIDX_AUTO_DELINK)
#define RTS51X_GET_VID(chip) ((chip)->vendor_id)
#define RTS51X_GET_PID(chip) ((chip)->product_id)
#define VENDOR_ID(chip) ((chip)->status[0].vid)
#define PRODUCT_ID(chip) ((chip)->status[0].pid)
#define FW_VERSION(chip) ((chip)->status[0].fw_ver)
#define STATUS_LEN(chip) ((chip)->status_len)
#define STATUS_SUCCESS 0
#define STATUS_FAIL 1
/* Check card reader function */
#define SUPPORT_DETAILED_TYPE1(chip) \
CHK_BIT((chip)->status[0].function[0], 1)
#define SUPPORT_OT(chip) \
CHK_BIT((chip)->status[0].function[0], 2)
#define SUPPORT_OC(chip) \
CHK_BIT((chip)->status[0].function[0], 3)
#define SUPPORT_AUTO_DELINK(chip) \
CHK_BIT((chip)->status[0].function[0], 4)
#define SUPPORT_SDIO(chip) \
CHK_BIT((chip)->status[0].function[1], 0)
#define SUPPORT_DETAILED_TYPE2(chip) \
CHK_BIT((chip)->status[0].function[1], 1)
#define CHECK_PID(chip, pid) (RTS51X_GET_PID(chip) == (pid))
#define CHECK_FW_VER(chip, fw_ver) (FW_VERSION(chip) == (fw_ver))
#define CHECK_ID(chip, pid, fw_ver) \
(CHECK_PID((chip), (pid)) && CHECK_FW_VER((chip), (fw_ver)))
static int init_realtek_cr(struct us_data *us);
/*
* The table of devices
*/
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{\
USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) \
}
static const struct usb_device_id realtek_cr_ids[] = {
# include "unusual_realtek.h"
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, realtek_cr_ids);
#undef UNUSUAL_DEV
/*
* The flags table
*/
#define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \
vendor_name, product_name, use_protocol, use_transport, \
init_function, Flags) \
{ \
.vendorName = vendor_name, \
.productName = product_name, \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
.initFunction = init_function, \
}
static struct us_unusual_dev realtek_cr_unusual_dev_list[] = {
# include "unusual_realtek.h"
{} /* Terminating entry */
};
#undef UNUSUAL_DEV
static int rts51x_bulk_transport(struct us_data *us, u8 lun,
u8 *cmd, int cmd_len, u8 *buf, int buf_len,
enum dma_data_direction dir, int *act_len)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *)us->iobuf;
struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *)us->iobuf;
int result;
unsigned int residue;
unsigned int cswlen;
unsigned int cbwlen = US_BULK_CB_WRAP_LEN;
/* set up the command wrapper */
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = cpu_to_le32(buf_len);
bcb->Flags = (dir == DMA_FROM_DEVICE) ? US_BULK_FLAG_IN : 0;
bcb->Tag = ++us->tag;
bcb->Lun = lun;
bcb->Length = cmd_len;
/* copy the command payload */
memset(bcb->CDB, 0, sizeof(bcb->CDB));
memcpy(bcb->CDB, cmd, bcb->Length);
/* send it to out endpoint */
result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
bcb, cbwlen, NULL);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
/* DATA STAGE */
/* send/receive data payload, if there is any */
if (buf && buf_len) {
unsigned int pipe = (dir == DMA_FROM_DEVICE) ?
us->recv_bulk_pipe : us->send_bulk_pipe;
result = usb_stor_bulk_transfer_buf(us, pipe,
buf, buf_len, NULL);
if (result == USB_STOR_XFER_ERROR)
return USB_STOR_TRANSPORT_ERROR;
}
/* get CSW for device status */
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
bcs, US_BULK_CS_WRAP_LEN, &cswlen);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
/* check bulk status */
if (bcs->Signature != cpu_to_le32(US_BULK_CS_SIGN)) {
usb_stor_dbg(us, "Signature mismatch: got %08X, expecting %08X\n",
le32_to_cpu(bcs->Signature), US_BULK_CS_SIGN);
return USB_STOR_TRANSPORT_ERROR;
}
residue = bcs->Residue;
if (bcs->Tag != us->tag)
return USB_STOR_TRANSPORT_ERROR;
/*
* try to compute the actual residue, based on how much data
* was really transferred and what the device tells us
*/
if (residue)
residue = residue < buf_len ? residue : buf_len;
if (act_len)
*act_len = buf_len - residue;
/* based on the status code, we report good or bad */
switch (bcs->Status) {
case US_BULK_STAT_OK:
/* command good -- note that data could be short */
return USB_STOR_TRANSPORT_GOOD;
case US_BULK_STAT_FAIL:
/* command failed */
return USB_STOR_TRANSPORT_FAILED;
case US_BULK_STAT_PHASE:
/*
* phase error -- note that a transport reset will be
* invoked by the invoke_transport() function
*/
return USB_STOR_TRANSPORT_ERROR;
}
/* we should never get here, but if we do, we're in trouble */
return USB_STOR_TRANSPORT_ERROR;
}
static int rts51x_bulk_transport_special(struct us_data *us, u8 lun,
u8 *cmd, int cmd_len, u8 *buf, int buf_len,
enum dma_data_direction dir, int *act_len)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf;
int result;
unsigned int cswlen;
unsigned int cbwlen = US_BULK_CB_WRAP_LEN;
/* set up the command wrapper */
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = cpu_to_le32(buf_len);
bcb->Flags = (dir == DMA_FROM_DEVICE) ? US_BULK_FLAG_IN : 0;
bcb->Tag = ++us->tag;
bcb->Lun = lun;
bcb->Length = cmd_len;
/* copy the command payload */
memset(bcb->CDB, 0, sizeof(bcb->CDB));
memcpy(bcb->CDB, cmd, bcb->Length);
/* send it to out endpoint */
result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
bcb, cbwlen, NULL);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
/* DATA STAGE */
/* send/receive data payload, if there is any */
if (buf && buf_len) {
unsigned int pipe = (dir == DMA_FROM_DEVICE) ?
us->recv_bulk_pipe : us->send_bulk_pipe;
result = usb_stor_bulk_transfer_buf(us, pipe,
buf, buf_len, NULL);
if (result == USB_STOR_XFER_ERROR)
return USB_STOR_TRANSPORT_ERROR;
}
/* get CSW for device status */
result = usb_bulk_msg(us->pusb_dev, us->recv_bulk_pipe, bcs,
US_BULK_CS_WRAP_LEN, &cswlen, 250);
return result;
}
/* Determine what the maximum LUN supported is */
static int rts51x_get_max_lun(struct us_data *us)
{
int result;
/* issue the command */
us->iobuf[0] = 0;
result = usb_stor_control_msg(us, us->recv_ctrl_pipe,
US_BULK_GET_MAX_LUN,
USB_DIR_IN | USB_TYPE_CLASS |
USB_RECIP_INTERFACE,
0, us->ifnum, us->iobuf, 1, 10 * HZ);
usb_stor_dbg(us, "GetMaxLUN command result is %d, data is %d\n",
result, us->iobuf[0]);
/* if we have a successful request, return the result */
if (result > 0)
return us->iobuf[0];
return 0;
}
static int rts51x_read_mem(struct us_data *us, u16 addr, u8 *data, u16 len)
{
int retval;
u8 cmnd[12] = { 0 };
u8 *buf;
buf = kmalloc(len, GFP_NOIO);
if (buf == NULL)
return -ENOMEM;
usb_stor_dbg(us, "addr = 0x%x, len = %d\n", addr, len);
cmnd[0] = 0xF0;
cmnd[1] = 0x0D;
cmnd[2] = (u8) (addr >> 8);
cmnd[3] = (u8) addr;
cmnd[4] = (u8) (len >> 8);
cmnd[5] = (u8) len;
retval = rts51x_bulk_transport(us, 0, cmnd, 12,
buf, len, DMA_FROM_DEVICE, NULL);
if (retval != USB_STOR_TRANSPORT_GOOD) {
kfree(buf);
return -EIO;
}
memcpy(data, buf, len);
kfree(buf);
return 0;
}
static int rts51x_write_mem(struct us_data *us, u16 addr, u8 *data, u16 len)
{
int retval;
u8 cmnd[12] = { 0 };
u8 *buf;
buf = kmemdup(data, len, GFP_NOIO);
if (buf == NULL)
return USB_STOR_TRANSPORT_ERROR;
usb_stor_dbg(us, "addr = 0x%x, len = %d\n", addr, len);
cmnd[0] = 0xF0;
cmnd[1] = 0x0E;
cmnd[2] = (u8) (addr >> 8);
cmnd[3] = (u8) addr;
cmnd[4] = (u8) (len >> 8);
cmnd[5] = (u8) len;
retval = rts51x_bulk_transport(us, 0, cmnd, 12,
buf, len, DMA_TO_DEVICE, NULL);
kfree(buf);
if (retval != USB_STOR_TRANSPORT_GOOD)
return -EIO;
return 0;
}
static int rts51x_read_status(struct us_data *us,
u8 lun, u8 *status, int len, int *actlen)
{
int retval;
u8 cmnd[12] = { 0 };
u8 *buf;
buf = kmalloc(len, GFP_NOIO);
if (buf == NULL)
return USB_STOR_TRANSPORT_ERROR;
usb_stor_dbg(us, "lun = %d\n", lun);
cmnd[0] = 0xF0;
cmnd[1] = 0x09;
retval = rts51x_bulk_transport(us, lun, cmnd, 12,
buf, len, DMA_FROM_DEVICE, actlen);
if (retval != USB_STOR_TRANSPORT_GOOD) {
kfree(buf);
return -EIO;
}
memcpy(status, buf, len);
kfree(buf);
return 0;
}
static int rts51x_check_status(struct us_data *us, u8 lun)
{
struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra);
int retval;
u8 buf[16];
retval = rts51x_read_status(us, lun, buf, 16, &(chip->status_len));
if (retval != STATUS_SUCCESS)
return -EIO;
usb_stor_dbg(us, "chip->status_len = %d\n", chip->status_len);
chip->status[lun].vid = ((u16) buf[0] << 8) | buf[1];
chip->status[lun].pid = ((u16) buf[2] << 8) | buf[3];
chip->status[lun].cur_lun = buf[4];
chip->status[lun].card_type = buf[5];
chip->status[lun].total_lun = buf[6];
chip->status[lun].fw_ver = ((u16) buf[7] << 8) | buf[8];
chip->status[lun].phy_exist = buf[9];
chip->status[lun].multi_flag = buf[10];
chip->status[lun].multi_card = buf[11];
chip->status[lun].log_exist = buf[12];
if (chip->status_len == 16) {
chip->status[lun].detailed_type.detailed_type1 = buf[13];
chip->status[lun].function[0] = buf[14];
chip->status[lun].function[1] = buf[15];
}
return 0;
}
static int enable_oscillator(struct us_data *us)
{
int retval;
u8 value;
retval = rts51x_read_mem(us, 0xFE77, &value, 1);
if (retval < 0)
return -EIO;
value |= 0x04;
retval = rts51x_write_mem(us, 0xFE77, &value, 1);
if (retval < 0)
return -EIO;
retval = rts51x_read_mem(us, 0xFE77, &value, 1);
if (retval < 0)
return -EIO;
if (!(value & 0x04))
return -EIO;
return 0;
}
static int __do_config_autodelink(struct us_data *us, u8 *data, u16 len)
{
int retval;
u8 cmnd[12] = {0};
u8 *buf;
usb_stor_dbg(us, "addr = 0xfe47, len = %d\n", len);
buf = kmemdup(data, len, GFP_NOIO);
if (!buf)
return USB_STOR_TRANSPORT_ERROR;
cmnd[0] = 0xF0;
cmnd[1] = 0x0E;
cmnd[2] = 0xfe;
cmnd[3] = 0x47;
cmnd[4] = (u8)(len >> 8);
cmnd[5] = (u8)len;
retval = rts51x_bulk_transport_special(us, 0, cmnd, 12, buf, len, DMA_TO_DEVICE, NULL);
kfree(buf);
if (retval != USB_STOR_TRANSPORT_GOOD) {
return -EIO;
}
return 0;
}
static int do_config_autodelink(struct us_data *us, int enable, int force)
{
int retval;
u8 value;
retval = rts51x_read_mem(us, 0xFE47, &value, 1);
if (retval < 0)
return -EIO;
if (enable) {
if (force)
value |= 0x03;
else
value |= 0x01;
} else {
value &= ~0x03;
}
usb_stor_dbg(us, "set 0xfe47 to 0x%x\n", value);
/* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */
retval = __do_config_autodelink(us, &value, 1);
if (retval < 0)
return -EIO;
return 0;
}
static int config_autodelink_after_power_on(struct us_data *us)
{
struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra);
int retval;
u8 value;
if (!CHK_AUTO_DELINK(chip))
return 0;
retval = rts51x_read_mem(us, 0xFE47, &value, 1);
if (retval < 0)
return -EIO;
if (auto_delink_en) {
CLR_BIT(value, 0);
CLR_BIT(value, 1);
SET_BIT(value, 2);
if (CHECK_ID(chip, 0x0138, 0x3882))
CLR_BIT(value, 2);
SET_BIT(value, 7);
/* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */
retval = __do_config_autodelink(us, &value, 1);
if (retval < 0)
return -EIO;
retval = enable_oscillator(us);
if (retval == 0)
(void)do_config_autodelink(us, 1, 0);
} else {
/* Autodelink controlled by firmware */
SET_BIT(value, 2);
if (CHECK_ID(chip, 0x0138, 0x3882))
CLR_BIT(value, 2);
if (CHECK_ID(chip, 0x0159, 0x5889) ||
CHECK_ID(chip, 0x0138, 0x3880)) {
CLR_BIT(value, 0);
CLR_BIT(value, 7);
}
/* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */
retval = __do_config_autodelink(us, &value, 1);
if (retval < 0)
return -EIO;
if (CHECK_ID(chip, 0x0159, 0x5888)) {
value = 0xFF;
retval = rts51x_write_mem(us, 0xFE79, &value, 1);
if (retval < 0)
return -EIO;
value = 0x01;
retval = rts51x_write_mem(us, 0x48, &value, 1);
if (retval < 0)
return -EIO;
}
}
return 0;
}
#ifdef CONFIG_PM
static int config_autodelink_before_power_down(struct us_data *us)
{
struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra);
int retval;
u8 value;
if (!CHK_AUTO_DELINK(chip))
return 0;
if (auto_delink_en) {
retval = rts51x_read_mem(us, 0xFE77, &value, 1);
if (retval < 0)
return -EIO;
SET_BIT(value, 2);
retval = rts51x_write_mem(us, 0xFE77, &value, 1);
if (retval < 0)
return -EIO;
if (CHECK_ID(chip, 0x0159, 0x5888)) {
value = 0x01;
retval = rts51x_write_mem(us, 0x48, &value, 1);
if (retval < 0)
return -EIO;
}
retval = rts51x_read_mem(us, 0xFE47, &value, 1);
if (retval < 0)
return -EIO;
SET_BIT(value, 0);
if (CHECK_ID(chip, 0x0138, 0x3882))
SET_BIT(value, 2);
retval = rts51x_write_mem(us, 0xFE77, &value, 1);
if (retval < 0)
return -EIO;
} else {
if (CHECK_ID(chip, 0x0159, 0x5889) ||
CHECK_ID(chip, 0x0138, 0x3880) ||
CHECK_ID(chip, 0x0138, 0x3882)) {
retval = rts51x_read_mem(us, 0xFE47, &value, 1);
if (retval < 0)
return -EIO;
if (CHECK_ID(chip, 0x0159, 0x5889) ||
CHECK_ID(chip, 0x0138, 0x3880)) {
SET_BIT(value, 0);
SET_BIT(value, 7);
}
if (CHECK_ID(chip, 0x0138, 0x3882))
SET_BIT(value, 2);
/* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */
retval = __do_config_autodelink(us, &value, 1);
if (retval < 0)
return -EIO;
}
if (CHECK_ID(chip, 0x0159, 0x5888)) {
value = 0x01;
retval = rts51x_write_mem(us, 0x48, &value, 1);
if (retval < 0)
return -EIO;
}
}
return 0;
}
static void fw5895_init(struct us_data *us)
{
struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra);
int retval;
u8 val;
if ((PRODUCT_ID(chip) != 0x0158) || (FW_VERSION(chip) != 0x5895)) {
usb_stor_dbg(us, "Not the specified device, return immediately!\n");
} else {
retval = rts51x_read_mem(us, 0xFD6F, &val, 1);
if (retval == STATUS_SUCCESS && (val & 0x1F) == 0) {
val = 0x1F;
retval = rts51x_write_mem(us, 0xFD70, &val, 1);
if (retval != STATUS_SUCCESS)
usb_stor_dbg(us, "Write memory fail\n");
} else {
usb_stor_dbg(us, "Read memory fail, OR (val & 0x1F) != 0\n");
}
}
}
#endif
#ifdef CONFIG_REALTEK_AUTOPM
static void fw5895_set_mmc_wp(struct us_data *us)
{
struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra);
int retval;
u8 buf[13];
if ((PRODUCT_ID(chip) != 0x0158) || (FW_VERSION(chip) != 0x5895)) {
usb_stor_dbg(us, "Not the specified device, return immediately!\n");
} else {
retval = rts51x_read_mem(us, 0xFD6F, buf, 1);
if (retval == STATUS_SUCCESS && (buf[0] & 0x24) == 0x24) {
/* SD Exist and SD WP */
retval = rts51x_read_mem(us, 0xD04E, buf, 1);
if (retval == STATUS_SUCCESS) {
buf[0] |= 0x04;
retval = rts51x_write_mem(us, 0xFD70, buf, 1);
if (retval != STATUS_SUCCESS)
usb_stor_dbg(us, "Write memory fail\n");
} else {
usb_stor_dbg(us, "Read memory fail\n");
}
} else {
usb_stor_dbg(us, "Read memory fail, OR (buf[0]&0x24)!=0x24\n");
}
}
}
static void rts51x_modi_suspend_timer(struct rts51x_chip *chip)
{
struct us_data *us = chip->us;
usb_stor_dbg(us, "state:%d\n", rts51x_get_stat(chip));
chip->timer_expires = jiffies + msecs_to_jiffies(1000*ss_delay);
mod_timer(&chip->rts51x_suspend_timer, chip->timer_expires);
}
static void rts51x_suspend_timer_fn(struct timer_list *t)
{
struct rts51x_chip *chip = from_timer(chip, t, rts51x_suspend_timer);
struct us_data *us = chip->us;
switch (rts51x_get_stat(chip)) {
case RTS51X_STAT_INIT:
case RTS51X_STAT_RUN:
rts51x_modi_suspend_timer(chip);
break;
case RTS51X_STAT_IDLE:
case RTS51X_STAT_SS:
usb_stor_dbg(us, "RTS51X_STAT_SS, power.usage:%d\n",
atomic_read(&us->pusb_intf->dev.power.usage_count));
if (atomic_read(&us->pusb_intf->dev.power.usage_count) > 0) {
usb_stor_dbg(us, "Ready to enter SS state\n");
rts51x_set_stat(chip, RTS51X_STAT_SS);
/* ignore mass storage interface's children */
pm_suspend_ignore_children(&us->pusb_intf->dev, true);
usb_autopm_put_interface_async(us->pusb_intf);
usb_stor_dbg(us, "RTS51X_STAT_SS 01, power.usage:%d\n",
atomic_read(&us->pusb_intf->dev.power.usage_count));
}
break;
default:
usb_stor_dbg(us, "Unknown state !!!\n");
break;
}
}
static inline int working_scsi(struct scsi_cmnd *srb)
{
if ((srb->cmnd[0] == TEST_UNIT_READY) ||
(srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL)) {
return 0;
}
return 1;
}
static void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
{
struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra);
static int card_first_show = 1;
static u8 media_not_present[] = { 0x70, 0, 0x02, 0, 0, 0, 0,
10, 0, 0, 0, 0, 0x3A, 0, 0, 0, 0, 0
};
static u8 invalid_cmd_field[] = { 0x70, 0, 0x05, 0, 0, 0, 0,
10, 0, 0, 0, 0, 0x24, 0, 0, 0, 0, 0
};
int ret;
if (working_scsi(srb)) {
usb_stor_dbg(us, "working scsi, power.usage:%d\n",
atomic_read(&us->pusb_intf->dev.power.usage_count));
if (atomic_read(&us->pusb_intf->dev.power.usage_count) <= 0) {
ret = usb_autopm_get_interface(us->pusb_intf);
usb_stor_dbg(us, "working scsi, ret=%d\n", ret);
}
if (rts51x_get_stat(chip) != RTS51X_STAT_RUN)
rts51x_set_stat(chip, RTS51X_STAT_RUN);
chip->proto_handler_backup(srb, us);
} else {
if (rts51x_get_stat(chip) == RTS51X_STAT_SS) {
usb_stor_dbg(us, "NOT working scsi\n");
if ((srb->cmnd[0] == TEST_UNIT_READY) &&
(chip->pwr_state == US_SUSPEND)) {
if (TST_LUN_READY(chip, srb->device->lun)) {
srb->result = SAM_STAT_GOOD;
} else {
srb->result = SAM_STAT_CHECK_CONDITION;
memcpy(srb->sense_buffer,
media_not_present,
US_SENSE_SIZE);
}
usb_stor_dbg(us, "TEST_UNIT_READY\n");
goto out;
}
if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) {
int prevent = srb->cmnd[4] & 0x1;
if (prevent) {
srb->result = SAM_STAT_CHECK_CONDITION;
memcpy(srb->sense_buffer,
invalid_cmd_field,
US_SENSE_SIZE);
} else {
srb->result = SAM_STAT_GOOD;
}
usb_stor_dbg(us, "ALLOW_MEDIUM_REMOVAL\n");
goto out;
}
} else {
usb_stor_dbg(us, "NOT working scsi, not SS\n");
chip->proto_handler_backup(srb, us);
/* Check whether card is plugged in */
if (srb->cmnd[0] == TEST_UNIT_READY) {
if (srb->result == SAM_STAT_GOOD) {
SET_LUN_READY(chip, srb->device->lun);
if (card_first_show) {
card_first_show = 0;
fw5895_set_mmc_wp(us);
}
} else {
CLR_LUN_READY(chip, srb->device->lun);
card_first_show = 1;
}
}
if (rts51x_get_stat(chip) != RTS51X_STAT_IDLE)
rts51x_set_stat(chip, RTS51X_STAT_IDLE);
}
}
out:
usb_stor_dbg(us, "state:%d\n", rts51x_get_stat(chip));
if (rts51x_get_stat(chip) == RTS51X_STAT_RUN)
rts51x_modi_suspend_timer(chip);
}
static int realtek_cr_autosuspend_setup(struct us_data *us)
{
struct rts51x_chip *chip;
struct rts51x_status *status = NULL;
u8 buf[16];
int retval;
chip = (struct rts51x_chip *)us->extra;
chip->support_auto_delink = 0;
chip->pwr_state = US_RESUME;
chip->lun_ready = 0;
rts51x_set_stat(chip, RTS51X_STAT_INIT);
retval = rts51x_read_status(us, 0, buf, 16, &(chip->status_len));
if (retval != STATUS_SUCCESS) {
usb_stor_dbg(us, "Read status fail\n");
return -EIO;
}
status = chip->status;
status->vid = ((u16) buf[0] << 8) | buf[1];
status->pid = ((u16) buf[2] << 8) | buf[3];
status->cur_lun = buf[4];
status->card_type = buf[5];
status->total_lun = buf[6];
status->fw_ver = ((u16) buf[7] << 8) | buf[8];
status->phy_exist = buf[9];
status->multi_flag = buf[10];
status->multi_card = buf[11];
status->log_exist = buf[12];
if (chip->status_len == 16) {
status->detailed_type.detailed_type1 = buf[13];
status->function[0] = buf[14];
status->function[1] = buf[15];
}
/* back up the proto_handler in us->extra */
chip = (struct rts51x_chip *)(us->extra);
chip->proto_handler_backup = us->proto_handler;
/* Set the autosuspend_delay to 0 */
pm_runtime_set_autosuspend_delay(&us->pusb_dev->dev, 0);
/* override us->proto_handler setted in get_protocol() */
us->proto_handler = rts51x_invoke_transport;
chip->timer_expires = 0;
timer_setup(&chip->rts51x_suspend_timer, rts51x_suspend_timer_fn, 0);
fw5895_init(us);
/* enable autosuspend function of the usb device */
usb_enable_autosuspend(us->pusb_dev);
return 0;
}
#endif
static void realtek_cr_destructor(void *extra)
{
struct rts51x_chip *chip = extra;
if (!chip)
return;
#ifdef CONFIG_REALTEK_AUTOPM
if (ss_en) {
del_timer(&chip->rts51x_suspend_timer);
chip->timer_expires = 0;
}
#endif
kfree(chip->status);
}
#ifdef CONFIG_PM
static int realtek_cr_suspend(struct usb_interface *iface, pm_message_t message)
{
struct us_data *us = usb_get_intfdata(iface);
/* wait until no command is running */
mutex_lock(&us->dev_mutex);
config_autodelink_before_power_down(us);
mutex_unlock(&us->dev_mutex);
return 0;
}
static int realtek_cr_resume(struct usb_interface *iface)
{
struct us_data *us = usb_get_intfdata(iface);
fw5895_init(us);
config_autodelink_after_power_on(us);
return 0;
}
#else
#define realtek_cr_suspend NULL
#define realtek_cr_resume NULL
#endif
static int init_realtek_cr(struct us_data *us)
{
struct rts51x_chip *chip;
int size, i, retval;
chip = kzalloc(sizeof(struct rts51x_chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
us->extra = chip;
us->extra_destructor = realtek_cr_destructor;
us->max_lun = chip->max_lun = rts51x_get_max_lun(us);
chip->us = us;
usb_stor_dbg(us, "chip->max_lun = %d\n", chip->max_lun);
size = (chip->max_lun + 1) * sizeof(struct rts51x_status);
chip->status = kzalloc(size, GFP_KERNEL);
if (!chip->status)
goto INIT_FAIL;
for (i = 0; i <= (int)(chip->max_lun); i++) {
retval = rts51x_check_status(us, (u8) i);
if (retval < 0)
goto INIT_FAIL;
}
if (CHECK_PID(chip, 0x0138) || CHECK_PID(chip, 0x0158) ||
CHECK_PID(chip, 0x0159)) {
if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
CHECK_FW_VER(chip, 0x5901))
SET_AUTO_DELINK(chip);
if (STATUS_LEN(chip) == 16) {
if (SUPPORT_AUTO_DELINK(chip))
SET_AUTO_DELINK(chip);
}
}
#ifdef CONFIG_REALTEK_AUTOPM
if (ss_en)
realtek_cr_autosuspend_setup(us);
#endif
usb_stor_dbg(us, "chip->flag = 0x%x\n", chip->flag);
(void)config_autodelink_after_power_on(us);
return 0;
INIT_FAIL:
if (us->extra) {
kfree(chip->status);
kfree(us->extra);
us->extra = NULL;
}
return -EIO;
}
static struct scsi_host_template realtek_cr_host_template;
static int realtek_cr_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct us_data *us;
int result;
dev_dbg(&intf->dev, "Probe Realtek Card Reader!\n");
result = usb_stor_probe1(&us, intf, id,
(id - realtek_cr_ids) +
realtek_cr_unusual_dev_list,
&realtek_cr_host_template);
if (result)
return result;
result = usb_stor_probe2(us);
return result;
}
static struct usb_driver realtek_cr_driver = {
.name = DRV_NAME,
.probe = realtek_cr_probe,
.disconnect = usb_stor_disconnect,
/* .suspend = usb_stor_suspend, */
/* .resume = usb_stor_resume, */
.reset_resume = usb_stor_reset_resume,
.suspend = realtek_cr_suspend,
.resume = realtek_cr_resume,
.pre_reset = usb_stor_pre_reset,
.post_reset = usb_stor_post_reset,
.id_table = realtek_cr_ids,
.soft_unbind = 1,
.supports_autosuspend = 1,
.no_dynamic_id = 1,
};
module_usb_stor_driver(realtek_cr_driver, realtek_cr_host_template, DRV_NAME);
| linux-master | drivers/usb/storage/realtek_cr.c |
// SPDX-License-Identifier: GPL-2.0+
#include <linux/jiffies.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <linux/firmware.h>
#include "usb.h"
#include "transport.h"
#include "protocol.h"
#include "debug.h"
#include "scsiglue.h"
#define SD_INIT1_FIRMWARE "ene-ub6250/sd_init1.bin"
#define SD_INIT2_FIRMWARE "ene-ub6250/sd_init2.bin"
#define SD_RW_FIRMWARE "ene-ub6250/sd_rdwr.bin"
#define MS_INIT_FIRMWARE "ene-ub6250/ms_init.bin"
#define MSP_RW_FIRMWARE "ene-ub6250/msp_rdwr.bin"
#define MS_RW_FIRMWARE "ene-ub6250/ms_rdwr.bin"
#define DRV_NAME "ums_eneub6250"
MODULE_DESCRIPTION("Driver for ENE UB6250 reader");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(USB_STORAGE);
MODULE_FIRMWARE(SD_INIT1_FIRMWARE);
MODULE_FIRMWARE(SD_INIT2_FIRMWARE);
MODULE_FIRMWARE(SD_RW_FIRMWARE);
MODULE_FIRMWARE(MS_INIT_FIRMWARE);
MODULE_FIRMWARE(MSP_RW_FIRMWARE);
MODULE_FIRMWARE(MS_RW_FIRMWARE);
/*
* The table of devices
*/
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)}
static struct usb_device_id ene_ub6250_usb_ids[] = {
# include "unusual_ene_ub6250.h"
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, ene_ub6250_usb_ids);
#undef UNUSUAL_DEV
/*
* The flags table
*/
#define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \
vendor_name, product_name, use_protocol, use_transport, \
init_function, Flags) \
{ \
.vendorName = vendor_name, \
.productName = product_name, \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
.initFunction = init_function, \
}
static struct us_unusual_dev ene_ub6250_unusual_dev_list[] = {
# include "unusual_ene_ub6250.h"
{ } /* Terminating entry */
};
#undef UNUSUAL_DEV
/* ENE bin code len */
#define ENE_BIN_CODE_LEN 0x800
/* EnE HW Register */
#define REG_CARD_STATUS 0xFF83
#define REG_HW_TRAP1 0xFF89
/* SRB Status */
#define SS_SUCCESS 0x000000 /* No Sense */
#define SS_NOT_READY 0x023A00 /* Medium not present */
#define SS_MEDIUM_ERR 0x031100 /* Unrecovered read error */
#define SS_HW_ERR 0x040800 /* Communication failure */
#define SS_ILLEGAL_REQUEST 0x052000 /* Invalid command */
#define SS_UNIT_ATTENTION 0x062900 /* Reset occurred */
/* ENE Load FW Pattern */
#define SD_INIT1_PATTERN 1
#define SD_INIT2_PATTERN 2
#define SD_RW_PATTERN 3
#define MS_INIT_PATTERN 4
#define MSP_RW_PATTERN 5
#define MS_RW_PATTERN 6
#define SM_INIT_PATTERN 7
#define SM_RW_PATTERN 8
#define FDIR_WRITE 0
#define FDIR_READ 1
/* For MS Card */
/* Status Register 1 */
#define MS_REG_ST1_MB 0x80 /* media busy */
#define MS_REG_ST1_FB1 0x40 /* flush busy 1 */
#define MS_REG_ST1_DTER 0x20 /* error on data(corrected) */
#define MS_REG_ST1_UCDT 0x10 /* unable to correct data */
#define MS_REG_ST1_EXER 0x08 /* error on extra(corrected) */
#define MS_REG_ST1_UCEX 0x04 /* unable to correct extra */
#define MS_REG_ST1_FGER 0x02 /* error on overwrite flag(corrected) */
#define MS_REG_ST1_UCFG 0x01 /* unable to correct overwrite flag */
#define MS_REG_ST1_DEFAULT (MS_REG_ST1_MB | MS_REG_ST1_FB1 | MS_REG_ST1_DTER | MS_REG_ST1_UCDT | MS_REG_ST1_EXER | MS_REG_ST1_UCEX | MS_REG_ST1_FGER | MS_REG_ST1_UCFG)
/* Overwrite Area */
#define MS_REG_OVR_BKST 0x80 /* block status */
#define MS_REG_OVR_BKST_OK MS_REG_OVR_BKST /* OK */
#define MS_REG_OVR_BKST_NG 0x00 /* NG */
#define MS_REG_OVR_PGST0 0x40 /* page status */
#define MS_REG_OVR_PGST1 0x20
#define MS_REG_OVR_PGST_MASK (MS_REG_OVR_PGST0 | MS_REG_OVR_PGST1)
#define MS_REG_OVR_PGST_OK (MS_REG_OVR_PGST0 | MS_REG_OVR_PGST1) /* OK */
#define MS_REG_OVR_PGST_NG MS_REG_OVR_PGST1 /* NG */
#define MS_REG_OVR_PGST_DATA_ERROR 0x00 /* data error */
#define MS_REG_OVR_UDST 0x10 /* update status */
#define MS_REG_OVR_UDST_UPDATING 0x00 /* updating */
#define MS_REG_OVR_UDST_NO_UPDATE MS_REG_OVR_UDST
#define MS_REG_OVR_RESERVED 0x08
#define MS_REG_OVR_DEFAULT (MS_REG_OVR_BKST_OK | MS_REG_OVR_PGST_OK | MS_REG_OVR_UDST_NO_UPDATE | MS_REG_OVR_RESERVED)
/* Management Flag */
#define MS_REG_MNG_SCMS0 0x20 /* serial copy management system */
#define MS_REG_MNG_SCMS1 0x10
#define MS_REG_MNG_SCMS_MASK (MS_REG_MNG_SCMS0 | MS_REG_MNG_SCMS1)
#define MS_REG_MNG_SCMS_COPY_OK (MS_REG_MNG_SCMS0 | MS_REG_MNG_SCMS1)
#define MS_REG_MNG_SCMS_ONE_COPY MS_REG_MNG_SCMS1
#define MS_REG_MNG_SCMS_NO_COPY 0x00
#define MS_REG_MNG_ATFLG 0x08 /* address transfer table flag */
#define MS_REG_MNG_ATFLG_OTHER MS_REG_MNG_ATFLG /* other */
#define MS_REG_MNG_ATFLG_ATTBL 0x00 /* address transfer table */
#define MS_REG_MNG_SYSFLG 0x04 /* system flag */
#define MS_REG_MNG_SYSFLG_USER MS_REG_MNG_SYSFLG /* user block */
#define MS_REG_MNG_SYSFLG_BOOT 0x00 /* system block */
#define MS_REG_MNG_RESERVED 0xc3
#define MS_REG_MNG_DEFAULT (MS_REG_MNG_SCMS_COPY_OK | MS_REG_MNG_ATFLG_OTHER | MS_REG_MNG_SYSFLG_USER | MS_REG_MNG_RESERVED)
#define MS_MAX_PAGES_PER_BLOCK 32
#define MS_MAX_INITIAL_ERROR_BLOCKS 10
#define MS_LIB_BITS_PER_BYTE 8
#define MS_SYSINF_FORMAT_FAT 1
#define MS_SYSINF_USAGE_GENERAL 0
#define MS_SYSINF_MSCLASS_TYPE_1 1
#define MS_SYSINF_PAGE_SIZE MS_BYTES_PER_PAGE /* fixed */
#define MS_SYSINF_CARDTYPE_RDONLY 1
#define MS_SYSINF_CARDTYPE_RDWR 2
#define MS_SYSINF_CARDTYPE_HYBRID 3
#define MS_SYSINF_SECURITY 0x01
#define MS_SYSINF_SECURITY_NO_SUPPORT MS_SYSINF_SECURITY
#define MS_SYSINF_SECURITY_SUPPORT 0
#define MS_SYSINF_RESERVED1 1
#define MS_SYSINF_RESERVED2 1
#define MS_SYSENT_TYPE_INVALID_BLOCK 0x01
#define MS_SYSENT_TYPE_CIS_IDI 0x0a /* CIS/IDI */
#define SIZE_OF_KIRO 1024
#define BYTE_MASK 0xff
/* ms error code */
#define MS_STATUS_WRITE_PROTECT 0x0106
#define MS_STATUS_SUCCESS 0x0000
#define MS_ERROR_FLASH_READ 0x8003
#define MS_ERROR_FLASH_ERASE 0x8005
#define MS_LB_ERROR 0xfff0
#define MS_LB_BOOT_BLOCK 0xfff1
#define MS_LB_INITIAL_ERROR 0xfff2
#define MS_STATUS_SUCCESS_WITH_ECC 0xfff3
#define MS_LB_ACQUIRED_ERROR 0xfff4
#define MS_LB_NOT_USED_ERASED 0xfff5
#define MS_NOCARD_ERROR 0xfff8
#define MS_NO_MEMORY_ERROR 0xfff9
#define MS_STATUS_INT_ERROR 0xfffa
#define MS_STATUS_ERROR 0xfffe
#define MS_LB_NOT_USED 0xffff
#define MS_REG_MNG_SYSFLG 0x04 /* system flag */
#define MS_REG_MNG_SYSFLG_USER MS_REG_MNG_SYSFLG /* user block */
#define MS_BOOT_BLOCK_ID 0x0001
#define MS_BOOT_BLOCK_FORMAT_VERSION 0x0100
#define MS_BOOT_BLOCK_DATA_ENTRIES 2
#define MS_NUMBER_OF_SYSTEM_ENTRY 4
#define MS_NUMBER_OF_BOOT_BLOCK 2
#define MS_BYTES_PER_PAGE 512
#define MS_LOGICAL_BLOCKS_PER_SEGMENT 496
#define MS_LOGICAL_BLOCKS_IN_1ST_SEGMENT 494
#define MS_PHYSICAL_BLOCKS_PER_SEGMENT 0x200 /* 512 */
#define MS_PHYSICAL_BLOCKS_PER_SEGMENT_MASK 0x1ff
/* overwrite area */
#define MS_REG_OVR_BKST 0x80 /* block status */
#define MS_REG_OVR_BKST_OK MS_REG_OVR_BKST /* OK */
#define MS_REG_OVR_BKST_NG 0x00 /* NG */
/* Status Register 1 */
#define MS_REG_ST1_DTER 0x20 /* error on data(corrected) */
#define MS_REG_ST1_EXER 0x08 /* error on extra(corrected) */
#define MS_REG_ST1_FGER 0x02 /* error on overwrite flag(corrected) */
/* MemoryStick Register */
/* Status Register 0 */
#define MS_REG_ST0_WP 0x01 /* write protected */
#define MS_REG_ST0_WP_ON MS_REG_ST0_WP
#define MS_LIB_CTRL_RDONLY 0
#define MS_LIB_CTRL_WRPROTECT 1
/*dphy->log table */
#define ms_libconv_to_logical(pdx, PhyBlock) (((PhyBlock) >= (pdx)->MS_Lib.NumberOfPhyBlock) ? MS_STATUS_ERROR : (pdx)->MS_Lib.Phy2LogMap[PhyBlock])
#define ms_libconv_to_physical(pdx, LogBlock) (((LogBlock) >= (pdx)->MS_Lib.NumberOfLogBlock) ? MS_STATUS_ERROR : (pdx)->MS_Lib.Log2PhyMap[LogBlock])
#define ms_lib_ctrl_set(pdx, Flag) ((pdx)->MS_Lib.flags |= (1 << (Flag)))
#define ms_lib_ctrl_reset(pdx, Flag) ((pdx)->MS_Lib.flags &= ~(1 << (Flag)))
#define ms_lib_ctrl_check(pdx, Flag) ((pdx)->MS_Lib.flags & (1 << (Flag)))
#define ms_lib_iswritable(pdx) ((ms_lib_ctrl_check((pdx), MS_LIB_CTRL_RDONLY) == 0) && (ms_lib_ctrl_check(pdx, MS_LIB_CTRL_WRPROTECT) == 0))
#define ms_lib_clear_pagemap(pdx) memset((pdx)->MS_Lib.pagemap, 0, sizeof((pdx)->MS_Lib.pagemap))
#define memstick_logaddr(logadr1, logadr0) ((((u16)(logadr1)) << 8) | (logadr0))
/* SD_STATUS bits */
#define SD_Insert BIT(0)
#define SD_Ready BIT(1)
#define SD_MediaChange BIT(2)
#define SD_IsMMC BIT(3)
#define SD_HiCapacity BIT(4)
#define SD_HiSpeed BIT(5)
#define SD_WtP BIT(6)
/* Bit 7 reserved */
/* MS_STATUS bits */
#define MS_Insert BIT(0)
#define MS_Ready BIT(1)
#define MS_MediaChange BIT(2)
#define MS_IsMSPro BIT(3)
#define MS_IsMSPHG BIT(4)
/* Bit 5 reserved */
#define MS_WtP BIT(6)
/* Bit 7 reserved */
/* SM_STATUS bits */
#define SM_Insert BIT(0)
#define SM_Ready BIT(1)
#define SM_MediaChange BIT(2)
/* Bits 3-5 reserved */
#define SM_WtP BIT(6)
#define SM_IsMS BIT(7)
struct ms_bootblock_cis {
u8 bCistplDEVICE[6]; /* 0 */
u8 bCistplDEVICE0C[6]; /* 6 */
u8 bCistplJEDECC[4]; /* 12 */
u8 bCistplMANFID[6]; /* 16 */
u8 bCistplVER1[32]; /* 22 */
u8 bCistplFUNCID[4]; /* 54 */
u8 bCistplFUNCE0[4]; /* 58 */
u8 bCistplFUNCE1[5]; /* 62 */
u8 bCistplCONF[7]; /* 67 */
u8 bCistplCFTBLENT0[10];/* 74 */
u8 bCistplCFTBLENT1[8]; /* 84 */
u8 bCistplCFTBLENT2[12];/* 92 */
u8 bCistplCFTBLENT3[8]; /* 104 */
u8 bCistplCFTBLENT4[17];/* 112 */
u8 bCistplCFTBLENT5[8]; /* 129 */
u8 bCistplCFTBLENT6[17];/* 137 */
u8 bCistplCFTBLENT7[8]; /* 154 */
u8 bCistplNOLINK[3]; /* 162 */
} ;
struct ms_bootblock_idi {
#define MS_IDI_GENERAL_CONF 0x848A
u16 wIDIgeneralConfiguration; /* 0 */
u16 wIDInumberOfCylinder; /* 1 */
u16 wIDIreserved0; /* 2 */
u16 wIDInumberOfHead; /* 3 */
u16 wIDIbytesPerTrack; /* 4 */
u16 wIDIbytesPerSector; /* 5 */
u16 wIDIsectorsPerTrack; /* 6 */
u16 wIDItotalSectors[2]; /* 7-8 high,low */
u16 wIDIreserved1[11]; /* 9-19 */
u16 wIDIbufferType; /* 20 */
u16 wIDIbufferSize; /* 21 */
u16 wIDIlongCmdECC; /* 22 */
u16 wIDIfirmVersion[4]; /* 23-26 */
u16 wIDImodelName[20]; /* 27-46 */
u16 wIDIreserved2; /* 47 */
u16 wIDIlongWordSupported; /* 48 */
u16 wIDIdmaSupported; /* 49 */
u16 wIDIreserved3; /* 50 */
u16 wIDIpioTiming; /* 51 */
u16 wIDIdmaTiming; /* 52 */
u16 wIDItransferParameter; /* 53 */
u16 wIDIformattedCylinder; /* 54 */
u16 wIDIformattedHead; /* 55 */
u16 wIDIformattedSectorsPerTrack;/* 56 */
u16 wIDIformattedTotalSectors[2];/* 57-58 */
u16 wIDImultiSector; /* 59 */
u16 wIDIlbaSectors[2]; /* 60-61 */
u16 wIDIsingleWordDMA; /* 62 */
u16 wIDImultiWordDMA; /* 63 */
u16 wIDIreserved4[192]; /* 64-255 */
};
struct ms_bootblock_sysent_rec {
u32 dwStart;
u32 dwSize;
u8 bType;
u8 bReserved[3];
};
struct ms_bootblock_sysent {
struct ms_bootblock_sysent_rec entry[MS_NUMBER_OF_SYSTEM_ENTRY];
};
struct ms_bootblock_sysinf {
u8 bMsClass; /* must be 1 */
u8 bCardType; /* see below */
u16 wBlockSize; /* n KB */
u16 wBlockNumber; /* number of physical block */
u16 wTotalBlockNumber; /* number of logical block */
u16 wPageSize; /* must be 0x200 */
u8 bExtraSize; /* 0x10 */
u8 bSecuritySupport;
u8 bAssemblyDate[8];
u8 bFactoryArea[4];
u8 bAssemblyMakerCode;
u8 bAssemblyMachineCode[3];
u16 wMemoryMakerCode;
u16 wMemoryDeviceCode;
u16 wMemorySize;
u8 bReserved1;
u8 bReserved2;
u8 bVCC;
u8 bVPP;
u16 wControllerChipNumber;
u16 wControllerFunction; /* New MS */
u8 bReserved3[9]; /* New MS */
u8 bParallelSupport; /* New MS */
u16 wFormatValue; /* New MS */
u8 bFormatType;
u8 bUsage;
u8 bDeviceType;
u8 bReserved4[22];
u8 bFUValue3;
u8 bFUValue4;
u8 bReserved5[15];
};
struct ms_bootblock_header {
u16 wBlockID;
u16 wFormatVersion;
u8 bReserved1[184];
u8 bNumberOfDataEntry;
u8 bReserved2[179];
};
struct ms_bootblock_page0 {
struct ms_bootblock_header header;
struct ms_bootblock_sysent sysent;
struct ms_bootblock_sysinf sysinf;
};
struct ms_bootblock_cis_idi {
union {
struct ms_bootblock_cis cis;
u8 dmy[256];
} cis;
union {
struct ms_bootblock_idi idi;
u8 dmy[256];
} idi;
};
/* ENE MS Lib struct */
struct ms_lib_type_extdat {
u8 reserved;
u8 intr;
u8 status0;
u8 status1;
u8 ovrflg;
u8 mngflg;
u16 logadr;
};
struct ms_lib_ctrl {
u32 flags;
u32 BytesPerSector;
u32 NumberOfCylinder;
u32 SectorsPerCylinder;
u16 cardType; /* R/W, RO, Hybrid */
u16 blockSize;
u16 PagesPerBlock;
u16 NumberOfPhyBlock;
u16 NumberOfLogBlock;
u16 NumberOfSegment;
u16 *Phy2LogMap; /* phy2log table */
u16 *Log2PhyMap; /* log2phy table */
u16 wrtblk;
unsigned char *pagemap[(MS_MAX_PAGES_PER_BLOCK + (MS_LIB_BITS_PER_BYTE-1)) / MS_LIB_BITS_PER_BYTE];
unsigned char *blkpag;
struct ms_lib_type_extdat *blkext;
unsigned char copybuf[512];
};
/* SD Block Length */
/* 2^9 = 512 Bytes, The HW maximum read/write data length */
#define SD_BLOCK_LEN 9
struct ene_ub6250_info {
/* I/O bounce buffer */
u8 *bbuf;
/* for 6250 code */
u8 SD_Status;
u8 MS_Status;
u8 SM_Status;
/* ----- SD Control Data ---------------- */
/*SD_REGISTER SD_Regs; */
u16 SD_Block_Mult;
u8 SD_READ_BL_LEN;
u16 SD_C_SIZE;
u8 SD_C_SIZE_MULT;
/* SD/MMC New spec. */
u8 SD_SPEC_VER;
u8 SD_CSD_VER;
u8 SD20_HIGH_CAPACITY;
u32 HC_C_SIZE;
u8 MMC_SPEC_VER;
u8 MMC_BusWidth;
u8 MMC_HIGH_CAPACITY;
/*----- MS Control Data ---------------- */
bool MS_SWWP;
u32 MSP_TotalBlock;
struct ms_lib_ctrl MS_Lib;
bool MS_IsRWPage;
u16 MS_Model;
/*----- SM Control Data ---------------- */
u8 SM_DeviceID;
u8 SM_CardID;
unsigned char *testbuf;
u8 BIN_FLAG;
u32 bl_num;
int SrbStatus;
/*------Power Managerment ---------------*/
bool Power_IsResum;
};
static int ene_sd_init(struct us_data *us);
static int ene_ms_init(struct us_data *us);
static int ene_load_bincode(struct us_data *us, unsigned char flag);
static void ene_ub6250_info_destructor(void *extra)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) extra;
if (!extra)
return;
kfree(info->bbuf);
}
static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf;
int result;
unsigned int residue;
unsigned int cswlen = 0, partial = 0;
unsigned int transfer_length = bcb->DataTransferLength;
/* usb_stor_dbg(us, "transport --- ene_send_scsi_cmd\n"); */
/* send cmd to out endpoint */
result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
bcb, US_BULK_CB_WRAP_LEN, NULL);
if (result != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "send cmd to out endpoint fail ---\n");
return USB_STOR_TRANSPORT_ERROR;
}
if (buf) {
unsigned int pipe = fDir;
if (fDir == FDIR_READ)
pipe = us->recv_bulk_pipe;
else
pipe = us->send_bulk_pipe;
/* Bulk */
if (use_sg) {
result = usb_stor_bulk_srb(us, pipe, us->srb);
} else {
result = usb_stor_bulk_transfer_sg(us, pipe, buf,
transfer_length, 0, &partial);
}
if (result != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "data transfer fail ---\n");
return USB_STOR_TRANSPORT_ERROR;
}
}
/* Get CSW for device status */
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs,
US_BULK_CS_WRAP_LEN, &cswlen);
if (result == USB_STOR_XFER_SHORT && cswlen == 0) {
usb_stor_dbg(us, "Received 0-length CSW; retrying...\n");
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
bcs, US_BULK_CS_WRAP_LEN, &cswlen);
}
if (result == USB_STOR_XFER_STALLED) {
/* get the status again */
usb_stor_dbg(us, "Attempting to get CSW (2nd try)...\n");
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
bcs, US_BULK_CS_WRAP_LEN, NULL);
}
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
/* check bulk status */
residue = le32_to_cpu(bcs->Residue);
/*
* try to compute the actual residue, based on how much data
* was really transferred and what the device tells us
*/
if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE)) {
residue = min(residue, transfer_length);
if (us->srb != NULL)
scsi_set_resid(us->srb, max(scsi_get_resid(us->srb),
residue));
}
if (bcs->Status != US_BULK_STAT_OK)
return USB_STOR_TRANSPORT_ERROR;
return USB_STOR_TRANSPORT_GOOD;
}
static int do_scsi_request_sense(struct us_data *us, struct scsi_cmnd *srb)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
unsigned char buf[18];
memset(buf, 0, 18);
buf[0] = 0x70; /* Current error */
buf[2] = info->SrbStatus >> 16; /* Sense key */
buf[7] = 10; /* Additional length */
buf[12] = info->SrbStatus >> 8; /* ASC */
buf[13] = info->SrbStatus; /* ASCQ */
usb_stor_set_xfer_buf(buf, sizeof(buf), srb);
return USB_STOR_TRANSPORT_GOOD;
}
static int do_scsi_inquiry(struct us_data *us, struct scsi_cmnd *srb)
{
unsigned char data_ptr[36] = {
0x00, 0x00, 0x02, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x55,
0x53, 0x42, 0x32, 0x2E, 0x30, 0x20, 0x20, 0x43, 0x61,
0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x30, 0x31, 0x30, 0x30 };
usb_stor_set_xfer_buf(data_ptr, 36, srb);
return USB_STOR_TRANSPORT_GOOD;
}
static int sd_scsi_test_unit_ready(struct us_data *us, struct scsi_cmnd *srb)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
if ((info->SD_Status & SD_Insert) && (info->SD_Status & SD_Ready))
return USB_STOR_TRANSPORT_GOOD;
else {
ene_sd_init(us);
return USB_STOR_TRANSPORT_GOOD;
}
return USB_STOR_TRANSPORT_GOOD;
}
static int sd_scsi_mode_sense(struct us_data *us, struct scsi_cmnd *srb)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
unsigned char mediaNoWP[12] = {
0x0b, 0x00, 0x00, 0x08, 0x00, 0x00,
0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 };
unsigned char mediaWP[12] = {
0x0b, 0x00, 0x80, 0x08, 0x00, 0x00,
0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 };
if (info->SD_Status & SD_WtP)
usb_stor_set_xfer_buf(mediaWP, 12, srb);
else
usb_stor_set_xfer_buf(mediaNoWP, 12, srb);
return USB_STOR_TRANSPORT_GOOD;
}
static int sd_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb)
{
u32 bl_num;
u32 bl_len;
unsigned int offset = 0;
unsigned char buf[8];
struct scatterlist *sg = NULL;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
usb_stor_dbg(us, "sd_scsi_read_capacity\n");
if (info->SD_Status & SD_HiCapacity) {
bl_len = 0x200;
if (info->SD_Status & SD_IsMMC)
bl_num = info->HC_C_SIZE-1;
else
bl_num = (info->HC_C_SIZE + 1) * 1024 - 1;
} else {
bl_len = 1 << (info->SD_READ_BL_LEN);
bl_num = info->SD_Block_Mult * (info->SD_C_SIZE + 1)
* (1 << (info->SD_C_SIZE_MULT + 2)) - 1;
}
info->bl_num = bl_num;
usb_stor_dbg(us, "bl_len = %x\n", bl_len);
usb_stor_dbg(us, "bl_num = %x\n", bl_num);
/*srb->request_bufflen = 8; */
buf[0] = (bl_num >> 24) & 0xff;
buf[1] = (bl_num >> 16) & 0xff;
buf[2] = (bl_num >> 8) & 0xff;
buf[3] = (bl_num >> 0) & 0xff;
buf[4] = (bl_len >> 24) & 0xff;
buf[5] = (bl_len >> 16) & 0xff;
buf[6] = (bl_len >> 8) & 0xff;
buf[7] = (bl_len >> 0) & 0xff;
usb_stor_access_xfer_buf(buf, 8, srb, &sg, &offset, TO_XFER_BUF);
return USB_STOR_TRANSPORT_GOOD;
}
static int sd_scsi_read(struct us_data *us, struct scsi_cmnd *srb)
{
int result;
unsigned char *cdb = srb->cmnd;
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
u32 bn = ((cdb[2] << 24) & 0xff000000) | ((cdb[3] << 16) & 0x00ff0000) |
((cdb[4] << 8) & 0x0000ff00) | ((cdb[5] << 0) & 0x000000ff);
u16 blen = ((cdb[7] << 8) & 0xff00) | ((cdb[8] << 0) & 0x00ff);
u32 bnByte = bn * 0x200;
u32 blenByte = blen * 0x200;
if (bn > info->bl_num)
return USB_STOR_TRANSPORT_ERROR;
result = ene_load_bincode(us, SD_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "Load SD RW pattern Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
if (info->SD_Status & SD_HiCapacity)
bnByte = bn;
/* set up the command wrapper */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = blenByte;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF1;
bcb->CDB[5] = (unsigned char)(bnByte);
bcb->CDB[4] = (unsigned char)(bnByte>>8);
bcb->CDB[3] = (unsigned char)(bnByte>>16);
bcb->CDB[2] = (unsigned char)(bnByte>>24);
result = ene_send_scsi_cmd(us, FDIR_READ, scsi_sglist(srb), 1);
return result;
}
static int sd_scsi_write(struct us_data *us, struct scsi_cmnd *srb)
{
int result;
unsigned char *cdb = srb->cmnd;
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
u32 bn = ((cdb[2] << 24) & 0xff000000) | ((cdb[3] << 16) & 0x00ff0000) |
((cdb[4] << 8) & 0x0000ff00) | ((cdb[5] << 0) & 0x000000ff);
u16 blen = ((cdb[7] << 8) & 0xff00) | ((cdb[8] << 0) & 0x00ff);
u32 bnByte = bn * 0x200;
u32 blenByte = blen * 0x200;
if (bn > info->bl_num)
return USB_STOR_TRANSPORT_ERROR;
result = ene_load_bincode(us, SD_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "Load SD RW pattern Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
if (info->SD_Status & SD_HiCapacity)
bnByte = bn;
/* set up the command wrapper */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = blenByte;
bcb->Flags = 0x00;
bcb->CDB[0] = 0xF0;
bcb->CDB[5] = (unsigned char)(bnByte);
bcb->CDB[4] = (unsigned char)(bnByte>>8);
bcb->CDB[3] = (unsigned char)(bnByte>>16);
bcb->CDB[2] = (unsigned char)(bnByte>>24);
result = ene_send_scsi_cmd(us, FDIR_WRITE, scsi_sglist(srb), 1);
return result;
}
/*
* ENE MS Card
*/
static int ms_lib_set_logicalpair(struct us_data *us, u16 logblk, u16 phyblk)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
if ((logblk >= info->MS_Lib.NumberOfLogBlock) || (phyblk >= info->MS_Lib.NumberOfPhyBlock))
return (u32)-1;
info->MS_Lib.Phy2LogMap[phyblk] = logblk;
info->MS_Lib.Log2PhyMap[logblk] = phyblk;
return 0;
}
static int ms_lib_set_logicalblockmark(struct us_data *us, u16 phyblk, u16 mark)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
if (phyblk >= info->MS_Lib.NumberOfPhyBlock)
return (u32)-1;
info->MS_Lib.Phy2LogMap[phyblk] = mark;
return 0;
}
static int ms_lib_set_initialerrorblock(struct us_data *us, u16 phyblk)
{
return ms_lib_set_logicalblockmark(us, phyblk, MS_LB_INITIAL_ERROR);
}
static int ms_lib_set_bootblockmark(struct us_data *us, u16 phyblk)
{
return ms_lib_set_logicalblockmark(us, phyblk, MS_LB_BOOT_BLOCK);
}
static int ms_lib_free_logicalmap(struct us_data *us)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
kfree(info->MS_Lib.Phy2LogMap);
info->MS_Lib.Phy2LogMap = NULL;
kfree(info->MS_Lib.Log2PhyMap);
info->MS_Lib.Log2PhyMap = NULL;
return 0;
}
static int ms_lib_alloc_logicalmap(struct us_data *us)
{
u32 i;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
info->MS_Lib.Phy2LogMap = kmalloc_array(info->MS_Lib.NumberOfPhyBlock,
sizeof(u16),
GFP_KERNEL);
info->MS_Lib.Log2PhyMap = kmalloc_array(info->MS_Lib.NumberOfLogBlock,
sizeof(u16),
GFP_KERNEL);
if ((info->MS_Lib.Phy2LogMap == NULL) || (info->MS_Lib.Log2PhyMap == NULL)) {
ms_lib_free_logicalmap(us);
return (u32)-1;
}
for (i = 0; i < info->MS_Lib.NumberOfPhyBlock; i++)
info->MS_Lib.Phy2LogMap[i] = MS_LB_NOT_USED;
for (i = 0; i < info->MS_Lib.NumberOfLogBlock; i++)
info->MS_Lib.Log2PhyMap[i] = MS_LB_NOT_USED;
return 0;
}
static void ms_lib_clear_writebuf(struct us_data *us)
{
int i;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
info->MS_Lib.wrtblk = (u16)-1;
ms_lib_clear_pagemap(info);
if (info->MS_Lib.blkpag)
memset(info->MS_Lib.blkpag, 0xff, info->MS_Lib.PagesPerBlock * info->MS_Lib.BytesPerSector);
if (info->MS_Lib.blkext) {
for (i = 0; i < info->MS_Lib.PagesPerBlock; i++) {
info->MS_Lib.blkext[i].status1 = MS_REG_ST1_DEFAULT;
info->MS_Lib.blkext[i].ovrflg = MS_REG_OVR_DEFAULT;
info->MS_Lib.blkext[i].mngflg = MS_REG_MNG_DEFAULT;
info->MS_Lib.blkext[i].logadr = MS_LB_NOT_USED;
}
}
}
static int ms_count_freeblock(struct us_data *us, u16 PhyBlock)
{
u32 Ende, Count;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
Ende = PhyBlock + MS_PHYSICAL_BLOCKS_PER_SEGMENT;
for (Count = 0; PhyBlock < Ende; PhyBlock++) {
switch (info->MS_Lib.Phy2LogMap[PhyBlock]) {
case MS_LB_NOT_USED:
case MS_LB_NOT_USED_ERASED:
Count++;
break;
default:
break;
}
}
return Count;
}
static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr,
u8 PageNum, u32 *PageBuf, struct ms_lib_type_extdat *ExtraDat)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
u8 *bbuf = info->bbuf;
int result;
u32 bn = PhyBlockAddr * 0x20 + PageNum;
result = ene_load_bincode(us, MS_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
/* Read Page Data */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x200;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x02; /* in init.c ENE_MSInit() is 0x01 */
bcb->CDB[5] = (unsigned char)(bn);
bcb->CDB[4] = (unsigned char)(bn>>8);
bcb->CDB[3] = (unsigned char)(bn>>16);
bcb->CDB[2] = (unsigned char)(bn>>24);
result = ene_send_scsi_cmd(us, FDIR_READ, PageBuf, 0);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
/* Read Extra Data */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x4;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x03;
bcb->CDB[5] = (unsigned char)(PageNum);
bcb->CDB[4] = (unsigned char)(PhyBlockAddr);
bcb->CDB[3] = (unsigned char)(PhyBlockAddr>>8);
bcb->CDB[2] = (unsigned char)(PhyBlockAddr>>16);
bcb->CDB[6] = 0x01;
result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
ExtraDat->reserved = 0;
ExtraDat->intr = 0x80; /* Not yet,fireware support */
ExtraDat->status0 = 0x10; /* Not yet,fireware support */
ExtraDat->status1 = 0x00; /* Not yet,fireware support */
ExtraDat->ovrflg = bbuf[0];
ExtraDat->mngflg = bbuf[1];
ExtraDat->logadr = memstick_logaddr(bbuf[2], bbuf[3]);
return USB_STOR_TRANSPORT_GOOD;
}
static int ms_lib_process_bootblock(struct us_data *us, u16 PhyBlock, u8 *PageData)
{
struct ms_bootblock_sysent *SysEntry;
struct ms_bootblock_sysinf *SysInfo;
u32 i, result;
u8 PageNumber;
u8 *PageBuffer;
struct ms_lib_type_extdat ExtraData;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
PageBuffer = kzalloc(MS_BYTES_PER_PAGE * 2, GFP_KERNEL);
if (PageBuffer == NULL)
return (u32)-1;
result = (u32)-1;
SysInfo = &(((struct ms_bootblock_page0 *)PageData)->sysinf);
if ((SysInfo->bMsClass != MS_SYSINF_MSCLASS_TYPE_1) ||
(be16_to_cpu(SysInfo->wPageSize) != MS_SYSINF_PAGE_SIZE) ||
((SysInfo->bSecuritySupport & MS_SYSINF_SECURITY) == MS_SYSINF_SECURITY_SUPPORT) ||
(SysInfo->bReserved1 != MS_SYSINF_RESERVED1) ||
(SysInfo->bReserved2 != MS_SYSINF_RESERVED2) ||
(SysInfo->bFormatType != MS_SYSINF_FORMAT_FAT) ||
(SysInfo->bUsage != MS_SYSINF_USAGE_GENERAL))
goto exit;
/* */
switch (info->MS_Lib.cardType = SysInfo->bCardType) {
case MS_SYSINF_CARDTYPE_RDONLY:
ms_lib_ctrl_set(info, MS_LIB_CTRL_RDONLY);
break;
case MS_SYSINF_CARDTYPE_RDWR:
ms_lib_ctrl_reset(info, MS_LIB_CTRL_RDONLY);
break;
case MS_SYSINF_CARDTYPE_HYBRID:
default:
goto exit;
}
info->MS_Lib.blockSize = be16_to_cpu(SysInfo->wBlockSize);
info->MS_Lib.NumberOfPhyBlock = be16_to_cpu(SysInfo->wBlockNumber);
info->MS_Lib.NumberOfLogBlock = be16_to_cpu(SysInfo->wTotalBlockNumber)-2;
info->MS_Lib.PagesPerBlock = info->MS_Lib.blockSize * SIZE_OF_KIRO / MS_BYTES_PER_PAGE;
info->MS_Lib.NumberOfSegment = info->MS_Lib.NumberOfPhyBlock / MS_PHYSICAL_BLOCKS_PER_SEGMENT;
info->MS_Model = be16_to_cpu(SysInfo->wMemorySize);
/*Allocate to all number of logicalblock and physicalblock */
if (ms_lib_alloc_logicalmap(us))
goto exit;
/* Mark the book block */
ms_lib_set_bootblockmark(us, PhyBlock);
SysEntry = &(((struct ms_bootblock_page0 *)PageData)->sysent);
for (i = 0; i < MS_NUMBER_OF_SYSTEM_ENTRY; i++) {
u32 EntryOffset, EntrySize;
EntryOffset = be32_to_cpu(SysEntry->entry[i].dwStart);
if (EntryOffset == 0xffffff)
continue;
EntrySize = be32_to_cpu(SysEntry->entry[i].dwSize);
if (EntrySize == 0)
continue;
if (EntryOffset + MS_BYTES_PER_PAGE + EntrySize > info->MS_Lib.blockSize * (u32)SIZE_OF_KIRO)
continue;
if (i == 0) {
u8 PrevPageNumber = 0;
u16 phyblk;
if (SysEntry->entry[i].bType != MS_SYSENT_TYPE_INVALID_BLOCK)
goto exit;
while (EntrySize > 0) {
PageNumber = (u8)(EntryOffset / MS_BYTES_PER_PAGE + 1);
if (PageNumber != PrevPageNumber) {
switch (ms_read_readpage(us, PhyBlock, PageNumber, (u32 *)PageBuffer, &ExtraData)) {
case MS_STATUS_SUCCESS:
break;
case MS_STATUS_WRITE_PROTECT:
case MS_ERROR_FLASH_READ:
case MS_STATUS_ERROR:
default:
goto exit;
}
PrevPageNumber = PageNumber;
}
phyblk = be16_to_cpu(*(u16 *)(PageBuffer + (EntryOffset % MS_BYTES_PER_PAGE)));
if (phyblk < 0x0fff)
ms_lib_set_initialerrorblock(us, phyblk);
EntryOffset += 2;
EntrySize -= 2;
}
} else if (i == 1) { /* CIS/IDI */
struct ms_bootblock_idi *idi;
if (SysEntry->entry[i].bType != MS_SYSENT_TYPE_CIS_IDI)
goto exit;
switch (ms_read_readpage(us, PhyBlock, (u8)(EntryOffset / MS_BYTES_PER_PAGE + 1), (u32 *)PageBuffer, &ExtraData)) {
case MS_STATUS_SUCCESS:
break;
case MS_STATUS_WRITE_PROTECT:
case MS_ERROR_FLASH_READ:
case MS_STATUS_ERROR:
default:
goto exit;
}
idi = &((struct ms_bootblock_cis_idi *)(PageBuffer + (EntryOffset % MS_BYTES_PER_PAGE)))->idi.idi;
if (le16_to_cpu(idi->wIDIgeneralConfiguration) != MS_IDI_GENERAL_CONF)
goto exit;
info->MS_Lib.BytesPerSector = le16_to_cpu(idi->wIDIbytesPerSector);
if (info->MS_Lib.BytesPerSector != MS_BYTES_PER_PAGE)
goto exit;
}
} /* End for .. */
result = 0;
exit:
if (result)
ms_lib_free_logicalmap(us);
kfree(PageBuffer);
result = 0;
return result;
}
static void ms_lib_free_writebuf(struct us_data *us)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
info->MS_Lib.wrtblk = (u16)-1; /* set to -1 */
/* memset((fdoExt)->MS_Lib.pagemap, 0, sizeof((fdoExt)->MS_Lib.pagemap)) */
ms_lib_clear_pagemap(info); /* (pdx)->MS_Lib.pagemap memset 0 in ms.h */
if (info->MS_Lib.blkpag) {
kfree(info->MS_Lib.blkpag); /* Arnold test ... */
info->MS_Lib.blkpag = NULL;
}
if (info->MS_Lib.blkext) {
kfree(info->MS_Lib.blkext); /* Arnold test ... */
info->MS_Lib.blkext = NULL;
}
}
static void ms_lib_free_allocatedarea(struct us_data *us)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
ms_lib_free_writebuf(us); /* Free MS_Lib.pagemap */
ms_lib_free_logicalmap(us); /* kfree MS_Lib.Phy2LogMap and MS_Lib.Log2PhyMap */
/* set struct us point flag to 0 */
info->MS_Lib.flags = 0;
info->MS_Lib.BytesPerSector = 0;
info->MS_Lib.SectorsPerCylinder = 0;
info->MS_Lib.cardType = 0;
info->MS_Lib.blockSize = 0;
info->MS_Lib.PagesPerBlock = 0;
info->MS_Lib.NumberOfPhyBlock = 0;
info->MS_Lib.NumberOfLogBlock = 0;
}
static int ms_lib_alloc_writebuf(struct us_data *us)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
info->MS_Lib.wrtblk = (u16)-1;
info->MS_Lib.blkpag = kmalloc_array(info->MS_Lib.PagesPerBlock,
info->MS_Lib.BytesPerSector,
GFP_KERNEL);
info->MS_Lib.blkext = kmalloc_array(info->MS_Lib.PagesPerBlock,
sizeof(struct ms_lib_type_extdat),
GFP_KERNEL);
if ((info->MS_Lib.blkpag == NULL) || (info->MS_Lib.blkext == NULL)) {
ms_lib_free_writebuf(us);
return (u32)-1;
}
ms_lib_clear_writebuf(us);
return 0;
}
static int ms_lib_force_setlogical_pair(struct us_data *us, u16 logblk, u16 phyblk)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
if (logblk == MS_LB_NOT_USED)
return 0;
if ((logblk >= info->MS_Lib.NumberOfLogBlock) ||
(phyblk >= info->MS_Lib.NumberOfPhyBlock))
return (u32)-1;
info->MS_Lib.Phy2LogMap[phyblk] = logblk;
info->MS_Lib.Log2PhyMap[logblk] = phyblk;
return 0;
}
static int ms_read_copyblock(struct us_data *us, u16 oldphy, u16 newphy,
u16 PhyBlockAddr, u8 PageNum, unsigned char *buf, u16 len)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
result = ene_load_bincode(us, MS_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x200*len;
bcb->Flags = 0x00;
bcb->CDB[0] = 0xF0;
bcb->CDB[1] = 0x08;
bcb->CDB[4] = (unsigned char)(oldphy);
bcb->CDB[3] = (unsigned char)(oldphy>>8);
bcb->CDB[2] = 0; /* (BYTE)(oldphy>>16) */
bcb->CDB[7] = (unsigned char)(newphy);
bcb->CDB[6] = (unsigned char)(newphy>>8);
bcb->CDB[5] = 0; /* (BYTE)(newphy>>16) */
bcb->CDB[9] = (unsigned char)(PhyBlockAddr);
bcb->CDB[8] = (unsigned char)(PhyBlockAddr>>8);
bcb->CDB[10] = PageNum;
result = ene_send_scsi_cmd(us, FDIR_WRITE, buf, 0);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
return USB_STOR_TRANSPORT_GOOD;
}
static int ms_read_eraseblock(struct us_data *us, u32 PhyBlockAddr)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
u32 bn = PhyBlockAddr;
result = ene_load_bincode(us, MS_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x200;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF2;
bcb->CDB[1] = 0x06;
bcb->CDB[4] = (unsigned char)(bn);
bcb->CDB[3] = (unsigned char)(bn>>8);
bcb->CDB[2] = (unsigned char)(bn>>16);
result = ene_send_scsi_cmd(us, FDIR_READ, NULL, 0);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
return USB_STOR_TRANSPORT_GOOD;
}
static int ms_lib_check_disableblock(struct us_data *us, u16 PhyBlock)
{
unsigned char *PageBuf = NULL;
u16 result = MS_STATUS_SUCCESS;
u16 blk, index = 0;
struct ms_lib_type_extdat extdat;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
PageBuf = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL);
if (PageBuf == NULL) {
result = MS_NO_MEMORY_ERROR;
goto exit;
}
ms_read_readpage(us, PhyBlock, 1, (u32 *)PageBuf, &extdat);
do {
blk = be16_to_cpu(PageBuf[index]);
if (blk == MS_LB_NOT_USED)
break;
if (blk == info->MS_Lib.Log2PhyMap[0]) {
result = MS_ERROR_FLASH_READ;
break;
}
index++;
} while (1);
exit:
kfree(PageBuf);
return result;
}
static int ms_lib_setacquired_errorblock(struct us_data *us, u16 phyblk)
{
u16 log;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
if (phyblk >= info->MS_Lib.NumberOfPhyBlock)
return (u32)-1;
log = info->MS_Lib.Phy2LogMap[phyblk];
if (log < info->MS_Lib.NumberOfLogBlock)
info->MS_Lib.Log2PhyMap[log] = MS_LB_NOT_USED;
if (info->MS_Lib.Phy2LogMap[phyblk] != MS_LB_INITIAL_ERROR)
info->MS_Lib.Phy2LogMap[phyblk] = MS_LB_ACQUIRED_ERROR;
return 0;
}
static int ms_lib_overwrite_extra(struct us_data *us, u32 PhyBlockAddr,
u8 PageNum, u8 OverwriteFlag)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
result = ene_load_bincode(us, MS_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x4;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF2;
bcb->CDB[1] = 0x05;
bcb->CDB[5] = (unsigned char)(PageNum);
bcb->CDB[4] = (unsigned char)(PhyBlockAddr);
bcb->CDB[3] = (unsigned char)(PhyBlockAddr>>8);
bcb->CDB[2] = (unsigned char)(PhyBlockAddr>>16);
bcb->CDB[6] = OverwriteFlag;
bcb->CDB[7] = 0xFF;
bcb->CDB[8] = 0xFF;
bcb->CDB[9] = 0xFF;
result = ene_send_scsi_cmd(us, FDIR_READ, NULL, 0);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
return USB_STOR_TRANSPORT_GOOD;
}
static int ms_lib_error_phyblock(struct us_data *us, u16 phyblk)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
if (phyblk >= info->MS_Lib.NumberOfPhyBlock)
return MS_STATUS_ERROR;
ms_lib_setacquired_errorblock(us, phyblk);
if (ms_lib_iswritable(info))
return ms_lib_overwrite_extra(us, phyblk, 0, (u8)(~MS_REG_OVR_BKST & BYTE_MASK));
return MS_STATUS_SUCCESS;
}
static int ms_lib_erase_phyblock(struct us_data *us, u16 phyblk)
{
u16 log;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
if (phyblk >= info->MS_Lib.NumberOfPhyBlock)
return MS_STATUS_ERROR;
log = info->MS_Lib.Phy2LogMap[phyblk];
if (log < info->MS_Lib.NumberOfLogBlock)
info->MS_Lib.Log2PhyMap[log] = MS_LB_NOT_USED;
info->MS_Lib.Phy2LogMap[phyblk] = MS_LB_NOT_USED;
if (ms_lib_iswritable(info)) {
switch (ms_read_eraseblock(us, phyblk)) {
case MS_STATUS_SUCCESS:
info->MS_Lib.Phy2LogMap[phyblk] = MS_LB_NOT_USED_ERASED;
return MS_STATUS_SUCCESS;
case MS_ERROR_FLASH_ERASE:
case MS_STATUS_INT_ERROR:
ms_lib_error_phyblock(us, phyblk);
return MS_ERROR_FLASH_ERASE;
case MS_STATUS_ERROR:
default:
ms_lib_ctrl_set(info, MS_LIB_CTRL_RDONLY); /* MS_LibCtrlSet will used by ENE_MSInit ,need check, and why us to info*/
ms_lib_setacquired_errorblock(us, phyblk);
return MS_STATUS_ERROR;
}
}
ms_lib_setacquired_errorblock(us, phyblk);
return MS_STATUS_SUCCESS;
}
static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock,
u8 PageNum, struct ms_lib_type_extdat *ExtraDat)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
u8 *bbuf = info->bbuf;
int result;
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x4;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x03;
bcb->CDB[5] = (unsigned char)(PageNum);
bcb->CDB[4] = (unsigned char)(PhyBlock);
bcb->CDB[3] = (unsigned char)(PhyBlock>>8);
bcb->CDB[2] = (unsigned char)(PhyBlock>>16);
bcb->CDB[6] = 0x01;
result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
ExtraDat->reserved = 0;
ExtraDat->intr = 0x80; /* Not yet, waiting for fireware support */
ExtraDat->status0 = 0x10; /* Not yet, waiting for fireware support */
ExtraDat->status1 = 0x00; /* Not yet, waiting for fireware support */
ExtraDat->ovrflg = bbuf[0];
ExtraDat->mngflg = bbuf[1];
ExtraDat->logadr = memstick_logaddr(bbuf[2], bbuf[3]);
return USB_STOR_TRANSPORT_GOOD;
}
static int ms_libsearch_block_from_physical(struct us_data *us, u16 phyblk)
{
u16 blk;
struct ms_lib_type_extdat extdat; /* need check */
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
if (phyblk >= info->MS_Lib.NumberOfPhyBlock)
return MS_LB_ERROR;
for (blk = phyblk + 1; blk != phyblk; blk++) {
if ((blk & MS_PHYSICAL_BLOCKS_PER_SEGMENT_MASK) == 0)
blk -= MS_PHYSICAL_BLOCKS_PER_SEGMENT;
if (info->MS_Lib.Phy2LogMap[blk] == MS_LB_NOT_USED_ERASED) {
return blk;
} else if (info->MS_Lib.Phy2LogMap[blk] == MS_LB_NOT_USED) {
switch (ms_lib_read_extra(us, blk, 0, &extdat)) {
case MS_STATUS_SUCCESS:
case MS_STATUS_SUCCESS_WITH_ECC:
break;
case MS_NOCARD_ERROR:
return MS_NOCARD_ERROR;
case MS_STATUS_INT_ERROR:
return MS_LB_ERROR;
case MS_ERROR_FLASH_READ:
default:
ms_lib_setacquired_errorblock(us, blk);
continue;
} /* End switch */
if ((extdat.ovrflg & MS_REG_OVR_BKST) != MS_REG_OVR_BKST_OK) {
ms_lib_setacquired_errorblock(us, blk);
continue;
}
switch (ms_lib_erase_phyblock(us, blk)) {
case MS_STATUS_SUCCESS:
return blk;
case MS_STATUS_ERROR:
return MS_LB_ERROR;
case MS_ERROR_FLASH_ERASE:
default:
ms_lib_error_phyblock(us, blk);
break;
}
}
} /* End for */
return MS_LB_ERROR;
}
static int ms_libsearch_block_from_logical(struct us_data *us, u16 logblk)
{
u16 phyblk;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
phyblk = ms_libconv_to_physical(info, logblk);
if (phyblk >= MS_LB_ERROR) {
if (logblk >= info->MS_Lib.NumberOfLogBlock)
return MS_LB_ERROR;
phyblk = (logblk + MS_NUMBER_OF_BOOT_BLOCK) / MS_LOGICAL_BLOCKS_PER_SEGMENT;
phyblk *= MS_PHYSICAL_BLOCKS_PER_SEGMENT;
phyblk += MS_PHYSICAL_BLOCKS_PER_SEGMENT - 1;
}
return ms_libsearch_block_from_physical(us, phyblk);
}
static int ms_scsi_test_unit_ready(struct us_data *us, struct scsi_cmnd *srb)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
/* pr_info("MS_SCSI_Test_Unit_Ready\n"); */
if ((info->MS_Status & MS_Insert) && (info->MS_Status & MS_Ready)) {
return USB_STOR_TRANSPORT_GOOD;
} else {
ene_ms_init(us);
return USB_STOR_TRANSPORT_GOOD;
}
return USB_STOR_TRANSPORT_GOOD;
}
static int ms_scsi_mode_sense(struct us_data *us, struct scsi_cmnd *srb)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
unsigned char mediaNoWP[12] = {
0x0b, 0x00, 0x00, 0x08, 0x00, 0x00,
0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 };
unsigned char mediaWP[12] = {
0x0b, 0x00, 0x80, 0x08, 0x00, 0x00,
0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 };
if (info->MS_Status & MS_WtP)
usb_stor_set_xfer_buf(mediaWP, 12, srb);
else
usb_stor_set_xfer_buf(mediaNoWP, 12, srb);
return USB_STOR_TRANSPORT_GOOD;
}
static int ms_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb)
{
u32 bl_num;
u16 bl_len;
unsigned int offset = 0;
unsigned char buf[8];
struct scatterlist *sg = NULL;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
usb_stor_dbg(us, "ms_scsi_read_capacity\n");
bl_len = 0x200;
if (info->MS_Status & MS_IsMSPro)
bl_num = info->MSP_TotalBlock - 1;
else
bl_num = info->MS_Lib.NumberOfLogBlock * info->MS_Lib.blockSize * 2 - 1;
info->bl_num = bl_num;
usb_stor_dbg(us, "bl_len = %x\n", bl_len);
usb_stor_dbg(us, "bl_num = %x\n", bl_num);
/*srb->request_bufflen = 8; */
buf[0] = (bl_num >> 24) & 0xff;
buf[1] = (bl_num >> 16) & 0xff;
buf[2] = (bl_num >> 8) & 0xff;
buf[3] = (bl_num >> 0) & 0xff;
buf[4] = (bl_len >> 24) & 0xff;
buf[5] = (bl_len >> 16) & 0xff;
buf[6] = (bl_len >> 8) & 0xff;
buf[7] = (bl_len >> 0) & 0xff;
usb_stor_access_xfer_buf(buf, 8, srb, &sg, &offset, TO_XFER_BUF);
return USB_STOR_TRANSPORT_GOOD;
}
static void ms_lib_phy_to_log_range(u16 PhyBlock, u16 *LogStart, u16 *LogEnde)
{
PhyBlock /= MS_PHYSICAL_BLOCKS_PER_SEGMENT;
if (PhyBlock) {
*LogStart = MS_LOGICAL_BLOCKS_IN_1ST_SEGMENT + (PhyBlock - 1) * MS_LOGICAL_BLOCKS_PER_SEGMENT;/*496*/
*LogEnde = *LogStart + MS_LOGICAL_BLOCKS_PER_SEGMENT;/*496*/
} else {
*LogStart = 0;
*LogEnde = MS_LOGICAL_BLOCKS_IN_1ST_SEGMENT;/*494*/
}
}
static int ms_lib_read_extrablock(struct us_data *us, u32 PhyBlock,
u8 PageNum, u8 blen, void *buf)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
/* Read Extra Data */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x4 * blen;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x03;
bcb->CDB[5] = (unsigned char)(PageNum);
bcb->CDB[4] = (unsigned char)(PhyBlock);
bcb->CDB[3] = (unsigned char)(PhyBlock>>8);
bcb->CDB[2] = (unsigned char)(PhyBlock>>16);
bcb->CDB[6] = blen;
result = ene_send_scsi_cmd(us, FDIR_READ, buf, 0);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
return USB_STOR_TRANSPORT_GOOD;
}
static int ms_lib_scan_logicalblocknumber(struct us_data *us, u16 btBlk1st)
{
u16 PhyBlock, newblk, i;
u16 LogStart, LogEnde;
struct ms_lib_type_extdat extdat;
u32 count = 0, index = 0;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
u8 *bbuf = info->bbuf;
for (PhyBlock = 0; PhyBlock < info->MS_Lib.NumberOfPhyBlock;) {
ms_lib_phy_to_log_range(PhyBlock, &LogStart, &LogEnde);
for (i = 0; i < MS_PHYSICAL_BLOCKS_PER_SEGMENT; i++, PhyBlock++) {
switch (ms_libconv_to_logical(info, PhyBlock)) {
case MS_STATUS_ERROR:
continue;
default:
break;
}
if (count == PhyBlock) {
ms_lib_read_extrablock(us, PhyBlock, 0, 0x80,
bbuf);
count += 0x80;
}
index = (PhyBlock % 0x80) * 4;
extdat.ovrflg = bbuf[index];
extdat.mngflg = bbuf[index+1];
extdat.logadr = memstick_logaddr(bbuf[index+2],
bbuf[index+3]);
if ((extdat.ovrflg & MS_REG_OVR_BKST) != MS_REG_OVR_BKST_OK) {
ms_lib_setacquired_errorblock(us, PhyBlock);
continue;
}
if ((extdat.mngflg & MS_REG_MNG_ATFLG) == MS_REG_MNG_ATFLG_ATTBL) {
ms_lib_erase_phyblock(us, PhyBlock);
continue;
}
if (extdat.logadr != MS_LB_NOT_USED) {
if ((extdat.logadr < LogStart) || (LogEnde <= extdat.logadr)) {
ms_lib_erase_phyblock(us, PhyBlock);
continue;
}
newblk = ms_libconv_to_physical(info, extdat.logadr);
if (newblk != MS_LB_NOT_USED) {
if (extdat.logadr == 0) {
ms_lib_set_logicalpair(us, extdat.logadr, PhyBlock);
if (ms_lib_check_disableblock(us, btBlk1st)) {
ms_lib_set_logicalpair(us, extdat.logadr, newblk);
continue;
}
}
ms_lib_read_extra(us, newblk, 0, &extdat);
if ((extdat.ovrflg & MS_REG_OVR_UDST) == MS_REG_OVR_UDST_UPDATING) {
ms_lib_erase_phyblock(us, PhyBlock);
continue;
} else {
ms_lib_erase_phyblock(us, newblk);
}
}
ms_lib_set_logicalpair(us, extdat.logadr, PhyBlock);
}
}
} /* End for ... */
return MS_STATUS_SUCCESS;
}
static int ms_scsi_read(struct us_data *us, struct scsi_cmnd *srb)
{
int result;
unsigned char *cdb = srb->cmnd;
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
u32 bn = ((cdb[2] << 24) & 0xff000000) | ((cdb[3] << 16) & 0x00ff0000) |
((cdb[4] << 8) & 0x0000ff00) | ((cdb[5] << 0) & 0x000000ff);
u16 blen = ((cdb[7] << 8) & 0xff00) | ((cdb[8] << 0) & 0x00ff);
u32 blenByte = blen * 0x200;
if (bn > info->bl_num)
return USB_STOR_TRANSPORT_ERROR;
if (info->MS_Status & MS_IsMSPro) {
result = ene_load_bincode(us, MSP_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "Load MPS RW pattern Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
/* set up the command wrapper */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = blenByte;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x02;
bcb->CDB[5] = (unsigned char)(bn);
bcb->CDB[4] = (unsigned char)(bn>>8);
bcb->CDB[3] = (unsigned char)(bn>>16);
bcb->CDB[2] = (unsigned char)(bn>>24);
result = ene_send_scsi_cmd(us, FDIR_READ, scsi_sglist(srb), 1);
} else {
void *buf;
int offset = 0;
u16 phyblk, logblk;
u8 PageNum;
u16 len;
u32 blkno;
buf = kmalloc(blenByte, GFP_KERNEL);
if (buf == NULL)
return USB_STOR_TRANSPORT_ERROR;
result = ene_load_bincode(us, MS_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
pr_info("Load MS RW pattern Fail !!\n");
result = USB_STOR_TRANSPORT_ERROR;
goto exit;
}
logblk = (u16)(bn / info->MS_Lib.PagesPerBlock);
PageNum = (u8)(bn % info->MS_Lib.PagesPerBlock);
while (1) {
if (blen > (info->MS_Lib.PagesPerBlock-PageNum))
len = info->MS_Lib.PagesPerBlock-PageNum;
else
len = blen;
phyblk = ms_libconv_to_physical(info, logblk);
blkno = phyblk * 0x20 + PageNum;
/* set up the command wrapper */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x200 * len;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x02;
bcb->CDB[5] = (unsigned char)(blkno);
bcb->CDB[4] = (unsigned char)(blkno>>8);
bcb->CDB[3] = (unsigned char)(blkno>>16);
bcb->CDB[2] = (unsigned char)(blkno>>24);
result = ene_send_scsi_cmd(us, FDIR_READ, buf+offset, 0);
if (result != USB_STOR_XFER_GOOD) {
pr_info("MS_SCSI_Read --- result = %x\n", result);
result = USB_STOR_TRANSPORT_ERROR;
goto exit;
}
blen -= len;
if (blen <= 0)
break;
logblk++;
PageNum = 0;
offset += MS_BYTES_PER_PAGE*len;
}
usb_stor_set_xfer_buf(buf, blenByte, srb);
exit:
kfree(buf);
}
return result;
}
static int ms_scsi_write(struct us_data *us, struct scsi_cmnd *srb)
{
int result;
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
unsigned char *cdb = srb->cmnd;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
u32 bn = ((cdb[2] << 24) & 0xff000000) |
((cdb[3] << 16) & 0x00ff0000) |
((cdb[4] << 8) & 0x0000ff00) |
((cdb[5] << 0) & 0x000000ff);
u16 blen = ((cdb[7] << 8) & 0xff00) | ((cdb[8] << 0) & 0x00ff);
u32 blenByte = blen * 0x200;
if (bn > info->bl_num)
return USB_STOR_TRANSPORT_ERROR;
if (info->MS_Status & MS_IsMSPro) {
result = ene_load_bincode(us, MSP_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
pr_info("Load MSP RW pattern Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
/* set up the command wrapper */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = blenByte;
bcb->Flags = 0x00;
bcb->CDB[0] = 0xF0;
bcb->CDB[1] = 0x04;
bcb->CDB[5] = (unsigned char)(bn);
bcb->CDB[4] = (unsigned char)(bn>>8);
bcb->CDB[3] = (unsigned char)(bn>>16);
bcb->CDB[2] = (unsigned char)(bn>>24);
result = ene_send_scsi_cmd(us, FDIR_WRITE, scsi_sglist(srb), 1);
} else {
void *buf;
int offset = 0;
u16 PhyBlockAddr;
u8 PageNum;
u16 len, oldphy, newphy;
buf = kmalloc(blenByte, GFP_KERNEL);
if (buf == NULL)
return USB_STOR_TRANSPORT_ERROR;
usb_stor_set_xfer_buf(buf, blenByte, srb);
result = ene_load_bincode(us, MS_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
pr_info("Load MS RW pattern Fail !!\n");
result = USB_STOR_TRANSPORT_ERROR;
goto exit;
}
PhyBlockAddr = (u16)(bn / info->MS_Lib.PagesPerBlock);
PageNum = (u8)(bn % info->MS_Lib.PagesPerBlock);
while (1) {
if (blen > (info->MS_Lib.PagesPerBlock-PageNum))
len = info->MS_Lib.PagesPerBlock-PageNum;
else
len = blen;
oldphy = ms_libconv_to_physical(info, PhyBlockAddr); /* need check us <-> info */
newphy = ms_libsearch_block_from_logical(us, PhyBlockAddr);
result = ms_read_copyblock(us, oldphy, newphy, PhyBlockAddr, PageNum, buf+offset, len);
if (result != USB_STOR_XFER_GOOD) {
pr_info("MS_SCSI_Write --- result = %x\n", result);
result = USB_STOR_TRANSPORT_ERROR;
goto exit;
}
info->MS_Lib.Phy2LogMap[oldphy] = MS_LB_NOT_USED_ERASED;
ms_lib_force_setlogical_pair(us, PhyBlockAddr, newphy);
blen -= len;
if (blen <= 0)
break;
PhyBlockAddr++;
PageNum = 0;
offset += MS_BYTES_PER_PAGE*len;
}
exit:
kfree(buf);
}
return result;
}
/*
* ENE MS Card
*/
static int ene_get_card_type(struct us_data *us, u16 index, void *buf)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x01;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xED;
bcb->CDB[2] = (unsigned char)(index>>8);
bcb->CDB[3] = (unsigned char)index;
result = ene_send_scsi_cmd(us, FDIR_READ, buf, 0);
return result;
}
static int ene_get_card_status(struct us_data *us, u8 *buf)
{
u16 tmpreg;
u32 reg4b;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
/*usb_stor_dbg(us, "transport --- ENE_ReadSDReg\n");*/
reg4b = *(u32 *)&buf[0x18];
info->SD_READ_BL_LEN = (u8)((reg4b >> 8) & 0x0f);
tmpreg = (u16) reg4b;
reg4b = *(u32 *)(&buf[0x14]);
if ((info->SD_Status & SD_HiCapacity) && !(info->SD_Status & SD_IsMMC))
info->HC_C_SIZE = (reg4b >> 8) & 0x3fffff;
info->SD_C_SIZE = ((tmpreg & 0x03) << 10) | (u16)(reg4b >> 22);
info->SD_C_SIZE_MULT = (u8)(reg4b >> 7) & 0x07;
if ((info->SD_Status & SD_HiCapacity) && (info->SD_Status & SD_IsMMC))
info->HC_C_SIZE = *(u32 *)(&buf[0x100]);
if (info->SD_READ_BL_LEN > SD_BLOCK_LEN) {
info->SD_Block_Mult = 1 << (info->SD_READ_BL_LEN-SD_BLOCK_LEN);
info->SD_READ_BL_LEN = SD_BLOCK_LEN;
} else {
info->SD_Block_Mult = 1;
}
return USB_STOR_TRANSPORT_GOOD;
}
static int ene_load_bincode(struct us_data *us, unsigned char flag)
{
int err;
char *fw_name = NULL;
unsigned char *buf = NULL;
const struct firmware *sd_fw = NULL;
int result = USB_STOR_TRANSPORT_ERROR;
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
if (info->BIN_FLAG == flag)
return USB_STOR_TRANSPORT_GOOD;
switch (flag) {
/* For SD */
case SD_INIT1_PATTERN:
usb_stor_dbg(us, "SD_INIT1_PATTERN\n");
fw_name = SD_INIT1_FIRMWARE;
break;
case SD_INIT2_PATTERN:
usb_stor_dbg(us, "SD_INIT2_PATTERN\n");
fw_name = SD_INIT2_FIRMWARE;
break;
case SD_RW_PATTERN:
usb_stor_dbg(us, "SD_RW_PATTERN\n");
fw_name = SD_RW_FIRMWARE;
break;
/* For MS */
case MS_INIT_PATTERN:
usb_stor_dbg(us, "MS_INIT_PATTERN\n");
fw_name = MS_INIT_FIRMWARE;
break;
case MSP_RW_PATTERN:
usb_stor_dbg(us, "MSP_RW_PATTERN\n");
fw_name = MSP_RW_FIRMWARE;
break;
case MS_RW_PATTERN:
usb_stor_dbg(us, "MS_RW_PATTERN\n");
fw_name = MS_RW_FIRMWARE;
break;
default:
usb_stor_dbg(us, "----------- Unknown PATTERN ----------\n");
goto nofw;
}
err = request_firmware(&sd_fw, fw_name, &us->pusb_dev->dev);
if (err) {
usb_stor_dbg(us, "load firmware %s failed\n", fw_name);
goto nofw;
}
buf = kmemdup(sd_fw->data, sd_fw->size, GFP_KERNEL);
if (buf == NULL)
goto nofw;
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = sd_fw->size;
bcb->Flags = 0x00;
bcb->CDB[0] = 0xEF;
result = ene_send_scsi_cmd(us, FDIR_WRITE, buf, 0);
if (us->srb != NULL)
scsi_set_resid(us->srb, 0);
info->BIN_FLAG = flag;
kfree(buf);
nofw:
release_firmware(sd_fw);
return result;
}
static int ms_card_init(struct us_data *us)
{
u32 result;
u16 TmpBlock;
unsigned char *PageBuffer0 = NULL, *PageBuffer1 = NULL;
struct ms_lib_type_extdat extdat;
u16 btBlk1st, btBlk2nd;
u32 btBlk1stErred;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
printk(KERN_INFO "MS_CardInit start\n");
ms_lib_free_allocatedarea(us); /* Clean buffer and set struct us_data flag to 0 */
/* get two PageBuffer */
PageBuffer0 = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL);
PageBuffer1 = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL);
if ((PageBuffer0 == NULL) || (PageBuffer1 == NULL)) {
result = MS_NO_MEMORY_ERROR;
goto exit;
}
btBlk1st = btBlk2nd = MS_LB_NOT_USED;
btBlk1stErred = 0;
for (TmpBlock = 0; TmpBlock < MS_MAX_INITIAL_ERROR_BLOCKS+2; TmpBlock++) {
switch (ms_read_readpage(us, TmpBlock, 0, (u32 *)PageBuffer0, &extdat)) {
case MS_STATUS_SUCCESS:
break;
case MS_STATUS_INT_ERROR:
break;
case MS_STATUS_ERROR:
default:
continue;
}
if ((extdat.ovrflg & MS_REG_OVR_BKST) == MS_REG_OVR_BKST_NG)
continue;
if (((extdat.mngflg & MS_REG_MNG_SYSFLG) == MS_REG_MNG_SYSFLG_USER) ||
(be16_to_cpu(((struct ms_bootblock_page0 *)PageBuffer0)->header.wBlockID) != MS_BOOT_BLOCK_ID) ||
(be16_to_cpu(((struct ms_bootblock_page0 *)PageBuffer0)->header.wFormatVersion) != MS_BOOT_BLOCK_FORMAT_VERSION) ||
(((struct ms_bootblock_page0 *)PageBuffer0)->header.bNumberOfDataEntry != MS_BOOT_BLOCK_DATA_ENTRIES))
continue;
if (btBlk1st != MS_LB_NOT_USED) {
btBlk2nd = TmpBlock;
break;
}
btBlk1st = TmpBlock;
memcpy(PageBuffer1, PageBuffer0, MS_BYTES_PER_PAGE);
if (extdat.status1 & (MS_REG_ST1_DTER | MS_REG_ST1_EXER | MS_REG_ST1_FGER))
btBlk1stErred = 1;
}
if (btBlk1st == MS_LB_NOT_USED) {
result = MS_STATUS_ERROR;
goto exit;
}
/* write protect */
if ((extdat.status0 & MS_REG_ST0_WP) == MS_REG_ST0_WP_ON)
ms_lib_ctrl_set(info, MS_LIB_CTRL_WRPROTECT);
result = MS_STATUS_ERROR;
/* 1st Boot Block */
if (btBlk1stErred == 0)
result = ms_lib_process_bootblock(us, btBlk1st, PageBuffer1);
/* 1st */
/* 2nd Boot Block */
if (result && (btBlk2nd != MS_LB_NOT_USED))
result = ms_lib_process_bootblock(us, btBlk2nd, PageBuffer0);
if (result) {
result = MS_STATUS_ERROR;
goto exit;
}
for (TmpBlock = 0; TmpBlock < btBlk1st; TmpBlock++)
info->MS_Lib.Phy2LogMap[TmpBlock] = MS_LB_INITIAL_ERROR;
info->MS_Lib.Phy2LogMap[btBlk1st] = MS_LB_BOOT_BLOCK;
if (btBlk2nd != MS_LB_NOT_USED) {
for (TmpBlock = btBlk1st + 1; TmpBlock < btBlk2nd; TmpBlock++)
info->MS_Lib.Phy2LogMap[TmpBlock] = MS_LB_INITIAL_ERROR;
info->MS_Lib.Phy2LogMap[btBlk2nd] = MS_LB_BOOT_BLOCK;
}
result = ms_lib_scan_logicalblocknumber(us, btBlk1st);
if (result)
goto exit;
for (TmpBlock = MS_PHYSICAL_BLOCKS_PER_SEGMENT;
TmpBlock < info->MS_Lib.NumberOfPhyBlock;
TmpBlock += MS_PHYSICAL_BLOCKS_PER_SEGMENT) {
if (ms_count_freeblock(us, TmpBlock) == 0) {
ms_lib_ctrl_set(info, MS_LIB_CTRL_WRPROTECT);
break;
}
}
/* write */
if (ms_lib_alloc_writebuf(us)) {
result = MS_NO_MEMORY_ERROR;
goto exit;
}
result = MS_STATUS_SUCCESS;
exit:
kfree(PageBuffer1);
kfree(PageBuffer0);
printk(KERN_INFO "MS_CardInit end\n");
return result;
}
static int ene_ms_init(struct us_data *us)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
u16 MSP_BlockSize, MSP_UserAreaBlocks;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
u8 *bbuf = info->bbuf;
unsigned int s;
printk(KERN_INFO "transport --- ENE_MSInit\n");
/* the same part to test ENE */
result = ene_load_bincode(us, MS_INIT_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
printk(KERN_ERR "Load MS Init Code Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x200;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x01;
result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
if (result != USB_STOR_XFER_GOOD) {
printk(KERN_ERR "Execution MS Init Code Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
/* the same part to test ENE */
info->MS_Status = bbuf[0];
s = info->MS_Status;
if ((s & MS_Insert) && (s & MS_Ready)) {
printk(KERN_INFO "Insert = %x\n", !!(s & MS_Insert));
printk(KERN_INFO "Ready = %x\n", !!(s & MS_Ready));
printk(KERN_INFO "IsMSPro = %x\n", !!(s & MS_IsMSPro));
printk(KERN_INFO "IsMSPHG = %x\n", !!(s & MS_IsMSPHG));
printk(KERN_INFO "WtP= %x\n", !!(s & MS_WtP));
if (s & MS_IsMSPro) {
MSP_BlockSize = (bbuf[6] << 8) | bbuf[7];
MSP_UserAreaBlocks = (bbuf[10] << 8) | bbuf[11];
info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks;
} else {
ms_card_init(us); /* Card is MS (to ms.c)*/
}
usb_stor_dbg(us, "MS Init Code OK !!\n");
} else {
usb_stor_dbg(us, "MS Card Not Ready --- %x\n", bbuf[0]);
return USB_STOR_TRANSPORT_ERROR;
}
return USB_STOR_TRANSPORT_GOOD;
}
static int ene_sd_init(struct us_data *us)
{
int result;
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
u8 *bbuf = info->bbuf;
usb_stor_dbg(us, "transport --- ENE_SDInit\n");
/* SD Init Part-1 */
result = ene_load_bincode(us, SD_INIT1_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "Load SD Init Code Part-1 Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF2;
result = ene_send_scsi_cmd(us, FDIR_READ, NULL, 0);
if (result != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "Execution SD Init Code Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
/* SD Init Part-2 */
result = ene_load_bincode(us, SD_INIT2_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "Load SD Init Code Part-2 Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x200;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF1;
result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
if (result != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "Execution SD Init Code Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
info->SD_Status = bbuf[0];
if ((info->SD_Status & SD_Insert) && (info->SD_Status & SD_Ready)) {
unsigned int s = info->SD_Status;
ene_get_card_status(us, bbuf);
usb_stor_dbg(us, "Insert = %x\n", !!(s & SD_Insert));
usb_stor_dbg(us, "Ready = %x\n", !!(s & SD_Ready));
usb_stor_dbg(us, "IsMMC = %x\n", !!(s & SD_IsMMC));
usb_stor_dbg(us, "HiCapacity = %x\n", !!(s & SD_HiCapacity));
usb_stor_dbg(us, "HiSpeed = %x\n", !!(s & SD_HiSpeed));
usb_stor_dbg(us, "WtP = %x\n", !!(s & SD_WtP));
} else {
usb_stor_dbg(us, "SD Card Not Ready --- %x\n", bbuf[0]);
return USB_STOR_TRANSPORT_ERROR;
}
return USB_STOR_TRANSPORT_GOOD;
}
static int ene_init(struct us_data *us)
{
int result;
u8 misc_reg03;
struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
u8 *bbuf = info->bbuf;
result = ene_get_card_type(us, REG_CARD_STATUS, bbuf);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
misc_reg03 = bbuf[0];
if (misc_reg03 & 0x01) {
if (!(info->SD_Status & SD_Ready)) {
result = ene_sd_init(us);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
}
}
if (misc_reg03 & 0x02) {
if (!(info->MS_Status & MS_Ready)) {
result = ene_ms_init(us);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
}
}
return result;
}
/*----- sd_scsi_irp() ---------*/
static int sd_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
{
int result;
struct ene_ub6250_info *info = (struct ene_ub6250_info *)us->extra;
switch (srb->cmnd[0]) {
case TEST_UNIT_READY:
result = sd_scsi_test_unit_ready(us, srb);
break; /* 0x00 */
case REQUEST_SENSE:
result = do_scsi_request_sense(us, srb);
break; /* 0x03 */
case INQUIRY:
result = do_scsi_inquiry(us, srb);
break; /* 0x12 */
case MODE_SENSE:
result = sd_scsi_mode_sense(us, srb);
break; /* 0x1A */
/*
case START_STOP:
result = SD_SCSI_Start_Stop(us, srb);
break; //0x1B
*/
case READ_CAPACITY:
result = sd_scsi_read_capacity(us, srb);
break; /* 0x25 */
case READ_10:
result = sd_scsi_read(us, srb);
break; /* 0x28 */
case WRITE_10:
result = sd_scsi_write(us, srb);
break; /* 0x2A */
default:
info->SrbStatus = SS_ILLEGAL_REQUEST;
result = USB_STOR_TRANSPORT_FAILED;
break;
}
if (result == USB_STOR_TRANSPORT_GOOD)
info->SrbStatus = SS_SUCCESS;
return result;
}
/*
* ms_scsi_irp()
*/
static int ms_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
{
int result;
struct ene_ub6250_info *info = (struct ene_ub6250_info *)us->extra;
switch (srb->cmnd[0]) {
case TEST_UNIT_READY:
result = ms_scsi_test_unit_ready(us, srb);
break; /* 0x00 */
case REQUEST_SENSE:
result = do_scsi_request_sense(us, srb);
break; /* 0x03 */
case INQUIRY:
result = do_scsi_inquiry(us, srb);
break; /* 0x12 */
case MODE_SENSE:
result = ms_scsi_mode_sense(us, srb);
break; /* 0x1A */
case READ_CAPACITY:
result = ms_scsi_read_capacity(us, srb);
break; /* 0x25 */
case READ_10:
result = ms_scsi_read(us, srb);
break; /* 0x28 */
case WRITE_10:
result = ms_scsi_write(us, srb);
break; /* 0x2A */
default:
info->SrbStatus = SS_ILLEGAL_REQUEST;
result = USB_STOR_TRANSPORT_FAILED;
break;
}
if (result == USB_STOR_TRANSPORT_GOOD)
info->SrbStatus = SS_SUCCESS;
return result;
}
static int ene_transport(struct scsi_cmnd *srb, struct us_data *us)
{
int result = USB_STOR_XFER_GOOD;
struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
/*US_DEBUG(usb_stor_show_command(us, srb)); */
scsi_set_resid(srb, 0);
if (unlikely(!(info->SD_Status & SD_Ready) || (info->MS_Status & MS_Ready)))
result = ene_init(us);
if (result == USB_STOR_XFER_GOOD) {
result = USB_STOR_TRANSPORT_ERROR;
if (info->SD_Status & SD_Ready)
result = sd_scsi_irp(us, srb);
if (info->MS_Status & MS_Ready)
result = ms_scsi_irp(us, srb);
}
return result;
}
static struct scsi_host_template ene_ub6250_host_template;
static int ene_ub6250_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
int result;
u8 misc_reg03;
struct us_data *us;
struct ene_ub6250_info *info;
result = usb_stor_probe1(&us, intf, id,
(id - ene_ub6250_usb_ids) + ene_ub6250_unusual_dev_list,
&ene_ub6250_host_template);
if (result)
return result;
/* FIXME: where should the code alloc extra buf ? */
us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL);
if (!us->extra)
return -ENOMEM;
us->extra_destructor = ene_ub6250_info_destructor;
info = (struct ene_ub6250_info *)(us->extra);
info->bbuf = kmalloc(512, GFP_KERNEL);
if (!info->bbuf) {
kfree(us->extra);
return -ENOMEM;
}
us->transport_name = "ene_ub6250";
us->transport = ene_transport;
us->max_lun = 0;
result = usb_stor_probe2(us);
if (result)
return result;
/* probe card type */
result = ene_get_card_type(us, REG_CARD_STATUS, info->bbuf);
if (result != USB_STOR_XFER_GOOD) {
usb_stor_disconnect(intf);
return USB_STOR_TRANSPORT_ERROR;
}
misc_reg03 = info->bbuf[0];
if (!(misc_reg03 & 0x01)) {
pr_info("ums_eneub6250: This driver only supports SD/MS cards. "
"It does not support SM cards.\n");
}
return result;
}
#ifdef CONFIG_PM
static int ene_ub6250_resume(struct usb_interface *iface)
{
struct us_data *us = usb_get_intfdata(iface);
struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
mutex_lock(&us->dev_mutex);
if (us->suspend_resume_hook)
(us->suspend_resume_hook)(us, US_RESUME);
mutex_unlock(&us->dev_mutex);
info->Power_IsResum = true;
/* info->SD_Status &= ~SD_Ready; */
info->SD_Status = 0;
info->MS_Status = 0;
info->SM_Status = 0;
return 0;
}
static int ene_ub6250_reset_resume(struct usb_interface *iface)
{
struct us_data *us = usb_get_intfdata(iface);
struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
/* Report the reset to the SCSI core */
usb_stor_reset_resume(iface);
/*
* FIXME: Notify the subdrivers that they need to reinitialize
* the device
*/
info->Power_IsResum = true;
/* info->SD_Status &= ~SD_Ready; */
info->SD_Status = 0;
info->MS_Status = 0;
info->SM_Status = 0;
return 0;
}
#else
#define ene_ub6250_resume NULL
#define ene_ub6250_reset_resume NULL
#endif
static struct usb_driver ene_ub6250_driver = {
.name = DRV_NAME,
.probe = ene_ub6250_probe,
.disconnect = usb_stor_disconnect,
.suspend = usb_stor_suspend,
.resume = ene_ub6250_resume,
.reset_resume = ene_ub6250_reset_resume,
.pre_reset = usb_stor_pre_reset,
.post_reset = usb_stor_post_reset,
.id_table = ene_ub6250_usb_ids,
.soft_unbind = 1,
.no_dynamic_id = 1,
};
module_usb_stor_driver(ene_ub6250_driver, ene_ub6250_host_template, DRV_NAME);
| linux-master | drivers/usb/storage/ene_ub6250.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for SanDisk SDDR-09 SmartMedia reader
*
* (c) 2000, 2001 Robert Baruch ([email protected])
* (c) 2002 Andries Brouwer ([email protected])
* Developed with the assistance of:
* (c) 2002 Alan Stern <[email protected]>
*
* The SanDisk SDDR-09 SmartMedia reader uses the Shuttle EUSB-01 chip.
* This chip is a programmable USB controller. In the SDDR-09, it has
* been programmed to obey a certain limited set of SCSI commands.
* This driver translates the "real" SCSI commands to the SDDR-09 SCSI
* commands.
*/
/*
* Known vendor commands: 12 bytes, first byte is opcode
*
* E7: read scatter gather
* E8: read
* E9: write
* EA: erase
* EB: reset
* EC: read status
* ED: read ID
* EE: write CIS (?)
* EF: compute checksum (?)
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include "usb.h"
#include "transport.h"
#include "protocol.h"
#include "debug.h"
#include "scsiglue.h"
#define DRV_NAME "ums-sddr09"
MODULE_DESCRIPTION("Driver for SanDisk SDDR-09 SmartMedia reader");
MODULE_AUTHOR("Andries Brouwer <[email protected]>, Robert Baruch <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(USB_STORAGE);
static int usb_stor_sddr09_dpcm_init(struct us_data *us);
static int sddr09_transport(struct scsi_cmnd *srb, struct us_data *us);
static int usb_stor_sddr09_init(struct us_data *us);
/*
* The table of devices
*/
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
static struct usb_device_id sddr09_usb_ids[] = {
# include "unusual_sddr09.h"
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, sddr09_usb_ids);
#undef UNUSUAL_DEV
/*
* The flags table
*/
#define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \
vendor_name, product_name, use_protocol, use_transport, \
init_function, Flags) \
{ \
.vendorName = vendor_name, \
.productName = product_name, \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
.initFunction = init_function, \
}
static struct us_unusual_dev sddr09_unusual_dev_list[] = {
# include "unusual_sddr09.h"
{ } /* Terminating entry */
};
#undef UNUSUAL_DEV
#define short_pack(lsb,msb) ( ((u16)(lsb)) | ( ((u16)(msb))<<8 ) )
#define LSB_of(s) ((s)&0xFF)
#define MSB_of(s) ((s)>>8)
/*
* First some stuff that does not belong here:
* data on SmartMedia and other cards, completely
* unrelated to this driver.
* Similar stuff occurs in <linux/mtd/nand_ids.h>.
*/
struct nand_flash_dev {
int model_id;
int chipshift; /* 1<<cs bytes total capacity */
char pageshift; /* 1<<ps bytes in a page */
char blockshift; /* 1<<bs pages in an erase block */
char zoneshift; /* 1<<zs blocks in a zone */
/* # of logical blocks is 125/128 of this */
char pageadrlen; /* length of an address in bytes - 1 */
};
/*
* NAND Flash Manufacturer ID Codes
*/
#define NAND_MFR_AMD 0x01
#define NAND_MFR_NATSEMI 0x8f
#define NAND_MFR_TOSHIBA 0x98
#define NAND_MFR_SAMSUNG 0xec
static inline char *nand_flash_manufacturer(int manuf_id) {
switch(manuf_id) {
case NAND_MFR_AMD:
return "AMD";
case NAND_MFR_NATSEMI:
return "NATSEMI";
case NAND_MFR_TOSHIBA:
return "Toshiba";
case NAND_MFR_SAMSUNG:
return "Samsung";
default:
return "unknown";
}
}
/*
* It looks like it is unnecessary to attach manufacturer to the
* remaining data: SSFDC prescribes manufacturer-independent id codes.
*
* 256 MB NAND flash has a 5-byte ID with 2nd byte 0xaa, 0xba, 0xca or 0xda.
*/
static struct nand_flash_dev nand_flash_ids[] = {
/* NAND flash */
{ 0x6e, 20, 8, 4, 8, 2}, /* 1 MB */
{ 0xe8, 20, 8, 4, 8, 2}, /* 1 MB */
{ 0xec, 20, 8, 4, 8, 2}, /* 1 MB */
{ 0x64, 21, 8, 4, 9, 2}, /* 2 MB */
{ 0xea, 21, 8, 4, 9, 2}, /* 2 MB */
{ 0x6b, 22, 9, 4, 9, 2}, /* 4 MB */
{ 0xe3, 22, 9, 4, 9, 2}, /* 4 MB */
{ 0xe5, 22, 9, 4, 9, 2}, /* 4 MB */
{ 0xe6, 23, 9, 4, 10, 2}, /* 8 MB */
{ 0x73, 24, 9, 5, 10, 2}, /* 16 MB */
{ 0x75, 25, 9, 5, 10, 2}, /* 32 MB */
{ 0x76, 26, 9, 5, 10, 3}, /* 64 MB */
{ 0x79, 27, 9, 5, 10, 3}, /* 128 MB */
/* MASK ROM */
{ 0x5d, 21, 9, 4, 8, 2}, /* 2 MB */
{ 0xd5, 22, 9, 4, 9, 2}, /* 4 MB */
{ 0xd6, 23, 9, 4, 10, 2}, /* 8 MB */
{ 0x57, 24, 9, 4, 11, 2}, /* 16 MB */
{ 0x58, 25, 9, 4, 12, 2}, /* 32 MB */
{ 0,}
};
static struct nand_flash_dev *
nand_find_id(unsigned char id) {
int i;
for (i = 0; i < ARRAY_SIZE(nand_flash_ids); i++)
if (nand_flash_ids[i].model_id == id)
return &(nand_flash_ids[i]);
return NULL;
}
/*
* ECC computation.
*/
static unsigned char parity[256];
static unsigned char ecc2[256];
static void nand_init_ecc(void) {
int i, j, a;
parity[0] = 0;
for (i = 1; i < 256; i++)
parity[i] = (parity[i&(i-1)] ^ 1);
for (i = 0; i < 256; i++) {
a = 0;
for (j = 0; j < 8; j++) {
if (i & (1<<j)) {
if ((j & 1) == 0)
a ^= 0x04;
if ((j & 2) == 0)
a ^= 0x10;
if ((j & 4) == 0)
a ^= 0x40;
}
}
ecc2[i] = ~(a ^ (a<<1) ^ (parity[i] ? 0xa8 : 0));
}
}
/* compute 3-byte ecc on 256 bytes */
static void nand_compute_ecc(unsigned char *data, unsigned char *ecc) {
int i, j, a;
unsigned char par = 0, bit, bits[8] = {0};
/* collect 16 checksum bits */
for (i = 0; i < 256; i++) {
par ^= data[i];
bit = parity[data[i]];
for (j = 0; j < 8; j++)
if ((i & (1<<j)) == 0)
bits[j] ^= bit;
}
/* put 4+4+4 = 12 bits in the ecc */
a = (bits[3] << 6) + (bits[2] << 4) + (bits[1] << 2) + bits[0];
ecc[0] = ~(a ^ (a<<1) ^ (parity[par] ? 0xaa : 0));
a = (bits[7] << 6) + (bits[6] << 4) + (bits[5] << 2) + bits[4];
ecc[1] = ~(a ^ (a<<1) ^ (parity[par] ? 0xaa : 0));
ecc[2] = ecc2[par];
}
static int nand_compare_ecc(unsigned char *data, unsigned char *ecc) {
return (data[0] == ecc[0] && data[1] == ecc[1] && data[2] == ecc[2]);
}
static void nand_store_ecc(unsigned char *data, unsigned char *ecc) {
memcpy(data, ecc, 3);
}
/*
* The actual driver starts here.
*/
struct sddr09_card_info {
unsigned long capacity; /* Size of card in bytes */
int pagesize; /* Size of page in bytes */
int pageshift; /* log2 of pagesize */
int blocksize; /* Size of block in pages */
int blockshift; /* log2 of blocksize */
int blockmask; /* 2^blockshift - 1 */
int *lba_to_pba; /* logical to physical map */
int *pba_to_lba; /* physical to logical map */
int lbact; /* number of available pages */
int flags;
#define SDDR09_WP 1 /* write protected */
};
/*
* On my 16MB card, control blocks have size 64 (16 real control bytes,
* and 48 junk bytes). In reality of course the card uses 16 control bytes,
* so the reader makes up the remaining 48. Don't know whether these numbers
* depend on the card. For now a constant.
*/
#define CONTROL_SHIFT 6
/*
* On my Combo CF/SM reader, the SM reader has LUN 1.
* (and things fail with LUN 0).
* It seems LUN is irrelevant for others.
*/
#define LUN 1
#define LUNBITS (LUN << 5)
/*
* LBA and PBA are unsigned ints. Special values.
*/
#define UNDEF 0xffffffff
#define SPARE 0xfffffffe
#define UNUSABLE 0xfffffffd
static const int erase_bad_lba_entries = 0;
/* send vendor interface command (0x41) */
/* called for requests 0, 1, 8 */
static int
sddr09_send_command(struct us_data *us,
unsigned char request,
unsigned char direction,
unsigned char *xfer_data,
unsigned int xfer_len) {
unsigned int pipe;
unsigned char requesttype = (0x41 | direction);
int rc;
// Get the receive or send control pipe number
if (direction == USB_DIR_IN)
pipe = us->recv_ctrl_pipe;
else
pipe = us->send_ctrl_pipe;
rc = usb_stor_ctrl_transfer(us, pipe, request, requesttype,
0, 0, xfer_data, xfer_len);
switch (rc) {
case USB_STOR_XFER_GOOD: return 0;
case USB_STOR_XFER_STALLED: return -EPIPE;
default: return -EIO;
}
}
static int
sddr09_send_scsi_command(struct us_data *us,
unsigned char *command,
unsigned int command_len) {
return sddr09_send_command(us, 0, USB_DIR_OUT, command, command_len);
}
#if 0
/*
* Test Unit Ready Command: 12 bytes.
* byte 0: opcode: 00
*/
static int
sddr09_test_unit_ready(struct us_data *us) {
unsigned char *command = us->iobuf;
int result;
memset(command, 0, 6);
command[1] = LUNBITS;
result = sddr09_send_scsi_command(us, command, 6);
usb_stor_dbg(us, "sddr09_test_unit_ready returns %d\n", result);
return result;
}
#endif
/*
* Request Sense Command: 12 bytes.
* byte 0: opcode: 03
* byte 4: data length
*/
static int
sddr09_request_sense(struct us_data *us, unsigned char *sensebuf, int buflen) {
unsigned char *command = us->iobuf;
int result;
memset(command, 0, 12);
command[0] = 0x03;
command[1] = LUNBITS;
command[4] = buflen;
result = sddr09_send_scsi_command(us, command, 12);
if (result)
return result;
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
sensebuf, buflen, NULL);
return (result == USB_STOR_XFER_GOOD ? 0 : -EIO);
}
/*
* Read Command: 12 bytes.
* byte 0: opcode: E8
* byte 1: last two bits: 00: read data, 01: read blockwise control,
* 10: read both, 11: read pagewise control.
* It turns out we need values 20, 21, 22, 23 here (LUN 1).
* bytes 2-5: address (interpretation depends on byte 1, see below)
* bytes 10-11: count (idem)
*
* A page has 512 data bytes and 64 control bytes (16 control and 48 junk).
* A read data command gets data in 512-byte pages.
* A read control command gets control in 64-byte chunks.
* A read both command gets data+control in 576-byte chunks.
*
* Blocks are groups of 32 pages, and read blockwise control jumps to the
* next block, while read pagewise control jumps to the next page after
* reading a group of 64 control bytes.
* [Here 512 = 1<<pageshift, 32 = 1<<blockshift, 64 is constant?]
*
* (1 MB and 2 MB cards are a bit different, but I have only a 16 MB card.)
*/
static int
sddr09_readX(struct us_data *us, int x, unsigned long fromaddress,
int nr_of_pages, int bulklen, unsigned char *buf,
int use_sg) {
unsigned char *command = us->iobuf;
int result;
command[0] = 0xE8;
command[1] = LUNBITS | x;
command[2] = MSB_of(fromaddress>>16);
command[3] = LSB_of(fromaddress>>16);
command[4] = MSB_of(fromaddress & 0xFFFF);
command[5] = LSB_of(fromaddress & 0xFFFF);
command[6] = 0;
command[7] = 0;
command[8] = 0;
command[9] = 0;
command[10] = MSB_of(nr_of_pages);
command[11] = LSB_of(nr_of_pages);
result = sddr09_send_scsi_command(us, command, 12);
if (result) {
usb_stor_dbg(us, "Result for send_control in sddr09_read2%d %d\n",
x, result);
return result;
}
result = usb_stor_bulk_transfer_sg(us, us->recv_bulk_pipe,
buf, bulklen, use_sg, NULL);
if (result != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "Result for bulk_transfer in sddr09_read2%d %d\n",
x, result);
return -EIO;
}
return 0;
}
/*
* Read Data
*
* fromaddress counts data shorts:
* increasing it by 256 shifts the bytestream by 512 bytes;
* the last 8 bits are ignored.
*
* nr_of_pages counts pages of size (1 << pageshift).
*/
static int
sddr09_read20(struct us_data *us, unsigned long fromaddress,
int nr_of_pages, int pageshift, unsigned char *buf, int use_sg) {
int bulklen = nr_of_pages << pageshift;
/* The last 8 bits of fromaddress are ignored. */
return sddr09_readX(us, 0, fromaddress, nr_of_pages, bulklen,
buf, use_sg);
}
/*
* Read Blockwise Control
*
* fromaddress gives the starting position (as in read data;
* the last 8 bits are ignored); increasing it by 32*256 shifts
* the output stream by 64 bytes.
*
* count counts control groups of size (1 << controlshift).
* For me, controlshift = 6. Is this constant?
*
* After getting one control group, jump to the next block
* (fromaddress += 8192).
*/
static int
sddr09_read21(struct us_data *us, unsigned long fromaddress,
int count, int controlshift, unsigned char *buf, int use_sg) {
int bulklen = (count << controlshift);
return sddr09_readX(us, 1, fromaddress, count, bulklen,
buf, use_sg);
}
/*
* Read both Data and Control
*
* fromaddress counts data shorts, ignoring control:
* increasing it by 256 shifts the bytestream by 576 = 512+64 bytes;
* the last 8 bits are ignored.
*
* nr_of_pages counts pages of size (1 << pageshift) + (1 << controlshift).
*/
static int
sddr09_read22(struct us_data *us, unsigned long fromaddress,
int nr_of_pages, int pageshift, unsigned char *buf, int use_sg) {
int bulklen = (nr_of_pages << pageshift) + (nr_of_pages << CONTROL_SHIFT);
usb_stor_dbg(us, "reading %d pages, %d bytes\n", nr_of_pages, bulklen);
return sddr09_readX(us, 2, fromaddress, nr_of_pages, bulklen,
buf, use_sg);
}
#if 0
/*
* Read Pagewise Control
*
* fromaddress gives the starting position (as in read data;
* the last 8 bits are ignored); increasing it by 256 shifts
* the output stream by 64 bytes.
*
* count counts control groups of size (1 << controlshift).
* For me, controlshift = 6. Is this constant?
*
* After getting one control group, jump to the next page
* (fromaddress += 256).
*/
static int
sddr09_read23(struct us_data *us, unsigned long fromaddress,
int count, int controlshift, unsigned char *buf, int use_sg) {
int bulklen = (count << controlshift);
return sddr09_readX(us, 3, fromaddress, count, bulklen,
buf, use_sg);
}
#endif
/*
* Erase Command: 12 bytes.
* byte 0: opcode: EA
* bytes 6-9: erase address (big-endian, counting shorts, sector aligned).
*
* Always precisely one block is erased; bytes 2-5 and 10-11 are ignored.
* The byte address being erased is 2*Eaddress.
* The CIS cannot be erased.
*/
static int
sddr09_erase(struct us_data *us, unsigned long Eaddress) {
unsigned char *command = us->iobuf;
int result;
usb_stor_dbg(us, "erase address %lu\n", Eaddress);
memset(command, 0, 12);
command[0] = 0xEA;
command[1] = LUNBITS;
command[6] = MSB_of(Eaddress>>16);
command[7] = LSB_of(Eaddress>>16);
command[8] = MSB_of(Eaddress & 0xFFFF);
command[9] = LSB_of(Eaddress & 0xFFFF);
result = sddr09_send_scsi_command(us, command, 12);
if (result)
usb_stor_dbg(us, "Result for send_control in sddr09_erase %d\n",
result);
return result;
}
/*
* Write CIS Command: 12 bytes.
* byte 0: opcode: EE
* bytes 2-5: write address in shorts
* bytes 10-11: sector count
*
* This writes at the indicated address. Don't know how it differs
* from E9. Maybe it does not erase? However, it will also write to
* the CIS.
*
* When two such commands on the same page follow each other directly,
* the second one is not done.
*/
/*
* Write Command: 12 bytes.
* byte 0: opcode: E9
* bytes 2-5: write address (big-endian, counting shorts, sector aligned).
* bytes 6-9: erase address (big-endian, counting shorts, sector aligned).
* bytes 10-11: sector count (big-endian, in 512-byte sectors).
*
* If write address equals erase address, the erase is done first,
* otherwise the write is done first. When erase address equals zero
* no erase is done?
*/
static int
sddr09_writeX(struct us_data *us,
unsigned long Waddress, unsigned long Eaddress,
int nr_of_pages, int bulklen, unsigned char *buf, int use_sg) {
unsigned char *command = us->iobuf;
int result;
command[0] = 0xE9;
command[1] = LUNBITS;
command[2] = MSB_of(Waddress>>16);
command[3] = LSB_of(Waddress>>16);
command[4] = MSB_of(Waddress & 0xFFFF);
command[5] = LSB_of(Waddress & 0xFFFF);
command[6] = MSB_of(Eaddress>>16);
command[7] = LSB_of(Eaddress>>16);
command[8] = MSB_of(Eaddress & 0xFFFF);
command[9] = LSB_of(Eaddress & 0xFFFF);
command[10] = MSB_of(nr_of_pages);
command[11] = LSB_of(nr_of_pages);
result = sddr09_send_scsi_command(us, command, 12);
if (result) {
usb_stor_dbg(us, "Result for send_control in sddr09_writeX %d\n",
result);
return result;
}
result = usb_stor_bulk_transfer_sg(us, us->send_bulk_pipe,
buf, bulklen, use_sg, NULL);
if (result != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "Result for bulk_transfer in sddr09_writeX %d\n",
result);
return -EIO;
}
return 0;
}
/* erase address, write same address */
static int
sddr09_write_inplace(struct us_data *us, unsigned long address,
int nr_of_pages, int pageshift, unsigned char *buf,
int use_sg) {
int bulklen = (nr_of_pages << pageshift) + (nr_of_pages << CONTROL_SHIFT);
return sddr09_writeX(us, address, address, nr_of_pages, bulklen,
buf, use_sg);
}
#if 0
/*
* Read Scatter Gather Command: 3+4n bytes.
* byte 0: opcode E7
* byte 2: n
* bytes 4i-1,4i,4i+1: page address
* byte 4i+2: page count
* (i=1..n)
*
* This reads several pages from the card to a single memory buffer.
* The last two bits of byte 1 have the same meaning as for E8.
*/
static int
sddr09_read_sg_test_only(struct us_data *us) {
unsigned char *command = us->iobuf;
int result, bulklen, nsg, ct;
unsigned char *buf;
unsigned long address;
nsg = bulklen = 0;
command[0] = 0xE7;
command[1] = LUNBITS;
command[2] = 0;
address = 040000; ct = 1;
nsg++;
bulklen += (ct << 9);
command[4*nsg+2] = ct;
command[4*nsg+1] = ((address >> 9) & 0xFF);
command[4*nsg+0] = ((address >> 17) & 0xFF);
command[4*nsg-1] = ((address >> 25) & 0xFF);
address = 0340000; ct = 1;
nsg++;
bulklen += (ct << 9);
command[4*nsg+2] = ct;
command[4*nsg+1] = ((address >> 9) & 0xFF);
command[4*nsg+0] = ((address >> 17) & 0xFF);
command[4*nsg-1] = ((address >> 25) & 0xFF);
address = 01000000; ct = 2;
nsg++;
bulklen += (ct << 9);
command[4*nsg+2] = ct;
command[4*nsg+1] = ((address >> 9) & 0xFF);
command[4*nsg+0] = ((address >> 17) & 0xFF);
command[4*nsg-1] = ((address >> 25) & 0xFF);
command[2] = nsg;
result = sddr09_send_scsi_command(us, command, 4*nsg+3);
if (result) {
usb_stor_dbg(us, "Result for send_control in sddr09_read_sg %d\n",
result);
return result;
}
buf = kmalloc(bulklen, GFP_NOIO);
if (!buf)
return -ENOMEM;
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
buf, bulklen, NULL);
kfree(buf);
if (result != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "Result for bulk_transfer in sddr09_read_sg %d\n",
result);
return -EIO;
}
return 0;
}
#endif
/*
* Read Status Command: 12 bytes.
* byte 0: opcode: EC
*
* Returns 64 bytes, all zero except for the first.
* bit 0: 1: Error
* bit 5: 1: Suspended
* bit 6: 1: Ready
* bit 7: 1: Not write-protected
*/
static int
sddr09_read_status(struct us_data *us, unsigned char *status) {
unsigned char *command = us->iobuf;
unsigned char *data = us->iobuf;
int result;
usb_stor_dbg(us, "Reading status...\n");
memset(command, 0, 12);
command[0] = 0xEC;
command[1] = LUNBITS;
result = sddr09_send_scsi_command(us, command, 12);
if (result)
return result;
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
data, 64, NULL);
*status = data[0];
return (result == USB_STOR_XFER_GOOD ? 0 : -EIO);
}
static int
sddr09_read_data(struct us_data *us,
unsigned long address,
unsigned int sectors) {
struct sddr09_card_info *info = (struct sddr09_card_info *) us->extra;
unsigned char *buffer;
unsigned int lba, maxlba, pba;
unsigned int page, pages;
unsigned int len, offset;
struct scatterlist *sg;
int result;
// Figure out the initial LBA and page
lba = address >> info->blockshift;
page = (address & info->blockmask);
maxlba = info->capacity >> (info->pageshift + info->blockshift);
if (lba >= maxlba)
return -EIO;
// Since we only read in one block at a time, we have to create
// a bounce buffer and move the data a piece at a time between the
// bounce buffer and the actual transfer buffer.
len = min(sectors, (unsigned int) info->blocksize) * info->pagesize;
buffer = kmalloc(len, GFP_NOIO);
if (!buffer)
return -ENOMEM;
// This could be made much more efficient by checking for
// contiguous LBA's. Another exercise left to the student.
result = 0;
offset = 0;
sg = NULL;
while (sectors > 0) {
/* Find number of pages we can read in this block */
pages = min(sectors, info->blocksize - page);
len = pages << info->pageshift;
/* Not overflowing capacity? */
if (lba >= maxlba) {
usb_stor_dbg(us, "Error: Requested lba %u exceeds maximum %u\n",
lba, maxlba);
result = -EIO;
break;
}
/* Find where this lba lives on disk */
pba = info->lba_to_pba[lba];
if (pba == UNDEF) { /* this lba was never written */
usb_stor_dbg(us, "Read %d zero pages (LBA %d) page %d\n",
pages, lba, page);
/*
* This is not really an error. It just means
* that the block has never been written.
* Instead of returning an error
* it is better to return all zero data.
*/
memset(buffer, 0, len);
} else {
usb_stor_dbg(us, "Read %d pages, from PBA %d (LBA %d) page %d\n",
pages, pba, lba, page);
address = ((pba << info->blockshift) + page) <<
info->pageshift;
result = sddr09_read20(us, address>>1,
pages, info->pageshift, buffer, 0);
if (result)
break;
}
// Store the data in the transfer buffer
usb_stor_access_xfer_buf(buffer, len, us->srb,
&sg, &offset, TO_XFER_BUF);
page = 0;
lba++;
sectors -= pages;
}
kfree(buffer);
return result;
}
static unsigned int
sddr09_find_unused_pba(struct sddr09_card_info *info, unsigned int lba) {
static unsigned int lastpba = 1;
int zonestart, end, i;
zonestart = (lba/1000) << 10;
end = info->capacity >> (info->blockshift + info->pageshift);
end -= zonestart;
if (end > 1024)
end = 1024;
for (i = lastpba+1; i < end; i++) {
if (info->pba_to_lba[zonestart+i] == UNDEF) {
lastpba = i;
return zonestart+i;
}
}
for (i = 0; i <= lastpba; i++) {
if (info->pba_to_lba[zonestart+i] == UNDEF) {
lastpba = i;
return zonestart+i;
}
}
return 0;
}
static int
sddr09_write_lba(struct us_data *us, unsigned int lba,
unsigned int page, unsigned int pages,
unsigned char *ptr, unsigned char *blockbuffer) {
struct sddr09_card_info *info = (struct sddr09_card_info *) us->extra;
unsigned long address;
unsigned int pba, lbap;
unsigned int pagelen;
unsigned char *bptr, *cptr, *xptr;
unsigned char ecc[3];
int i, result;
lbap = ((lba % 1000) << 1) | 0x1000;
if (parity[MSB_of(lbap) ^ LSB_of(lbap)])
lbap ^= 1;
pba = info->lba_to_pba[lba];
if (pba == UNDEF) {
pba = sddr09_find_unused_pba(info, lba);
if (!pba) {
printk(KERN_WARNING
"sddr09_write_lba: Out of unused blocks\n");
return -ENOSPC;
}
info->pba_to_lba[pba] = lba;
info->lba_to_pba[lba] = pba;
}
if (pba == 1) {
/*
* Maybe it is impossible to write to PBA 1.
* Fake success, but don't do anything.
*/
printk(KERN_WARNING "sddr09: avoid writing to pba 1\n");
return 0;
}
pagelen = (1 << info->pageshift) + (1 << CONTROL_SHIFT);
/* read old contents */
address = (pba << (info->pageshift + info->blockshift));
result = sddr09_read22(us, address>>1, info->blocksize,
info->pageshift, blockbuffer, 0);
if (result)
return result;
/* check old contents and fill lba */
for (i = 0; i < info->blocksize; i++) {
bptr = blockbuffer + i*pagelen;
cptr = bptr + info->pagesize;
nand_compute_ecc(bptr, ecc);
if (!nand_compare_ecc(cptr+13, ecc)) {
usb_stor_dbg(us, "Warning: bad ecc in page %d- of pba %d\n",
i, pba);
nand_store_ecc(cptr+13, ecc);
}
nand_compute_ecc(bptr+(info->pagesize / 2), ecc);
if (!nand_compare_ecc(cptr+8, ecc)) {
usb_stor_dbg(us, "Warning: bad ecc in page %d+ of pba %d\n",
i, pba);
nand_store_ecc(cptr+8, ecc);
}
cptr[6] = cptr[11] = MSB_of(lbap);
cptr[7] = cptr[12] = LSB_of(lbap);
}
/* copy in new stuff and compute ECC */
xptr = ptr;
for (i = page; i < page+pages; i++) {
bptr = blockbuffer + i*pagelen;
cptr = bptr + info->pagesize;
memcpy(bptr, xptr, info->pagesize);
xptr += info->pagesize;
nand_compute_ecc(bptr, ecc);
nand_store_ecc(cptr+13, ecc);
nand_compute_ecc(bptr+(info->pagesize / 2), ecc);
nand_store_ecc(cptr+8, ecc);
}
usb_stor_dbg(us, "Rewrite PBA %d (LBA %d)\n", pba, lba);
result = sddr09_write_inplace(us, address>>1, info->blocksize,
info->pageshift, blockbuffer, 0);
usb_stor_dbg(us, "sddr09_write_inplace returns %d\n", result);
#if 0
{
unsigned char status = 0;
int result2 = sddr09_read_status(us, &status);
if (result2)
usb_stor_dbg(us, "cannot read status\n");
else if (status != 0xc0)
usb_stor_dbg(us, "status after write: 0x%x\n", status);
}
#endif
#if 0
{
int result2 = sddr09_test_unit_ready(us);
}
#endif
return result;
}
static int
sddr09_write_data(struct us_data *us,
unsigned long address,
unsigned int sectors) {
struct sddr09_card_info *info = (struct sddr09_card_info *) us->extra;
unsigned int lba, maxlba, page, pages;
unsigned int pagelen, blocklen;
unsigned char *blockbuffer;
unsigned char *buffer;
unsigned int len, offset;
struct scatterlist *sg;
int result;
/* Figure out the initial LBA and page */
lba = address >> info->blockshift;
page = (address & info->blockmask);
maxlba = info->capacity >> (info->pageshift + info->blockshift);
if (lba >= maxlba)
return -EIO;
/*
* blockbuffer is used for reading in the old data, overwriting
* with the new data, and performing ECC calculations
*/
/*
* TODO: instead of doing kmalloc/kfree for each write,
* add a bufferpointer to the info structure
*/
pagelen = (1 << info->pageshift) + (1 << CONTROL_SHIFT);
blocklen = (pagelen << info->blockshift);
blockbuffer = kmalloc(blocklen, GFP_NOIO);
if (!blockbuffer)
return -ENOMEM;
/*
* Since we don't write the user data directly to the device,
* we have to create a bounce buffer and move the data a piece
* at a time between the bounce buffer and the actual transfer buffer.
*/
len = min(sectors, (unsigned int) info->blocksize) * info->pagesize;
buffer = kmalloc(len, GFP_NOIO);
if (!buffer) {
kfree(blockbuffer);
return -ENOMEM;
}
result = 0;
offset = 0;
sg = NULL;
while (sectors > 0) {
/* Write as many sectors as possible in this block */
pages = min(sectors, info->blocksize - page);
len = (pages << info->pageshift);
/* Not overflowing capacity? */
if (lba >= maxlba) {
usb_stor_dbg(us, "Error: Requested lba %u exceeds maximum %u\n",
lba, maxlba);
result = -EIO;
break;
}
/* Get the data from the transfer buffer */
usb_stor_access_xfer_buf(buffer, len, us->srb,
&sg, &offset, FROM_XFER_BUF);
result = sddr09_write_lba(us, lba, page, pages,
buffer, blockbuffer);
if (result)
break;
page = 0;
lba++;
sectors -= pages;
}
kfree(buffer);
kfree(blockbuffer);
return result;
}
static int
sddr09_read_control(struct us_data *us,
unsigned long address,
unsigned int blocks,
unsigned char *content,
int use_sg) {
usb_stor_dbg(us, "Read control address %lu, blocks %d\n",
address, blocks);
return sddr09_read21(us, address, blocks,
CONTROL_SHIFT, content, use_sg);
}
/*
* Read Device ID Command: 12 bytes.
* byte 0: opcode: ED
*
* Returns 2 bytes: Manufacturer ID and Device ID.
* On more recent cards 3 bytes: the third byte is an option code A5
* signifying that the secret command to read an 128-bit ID is available.
* On still more recent cards 4 bytes: the fourth byte C0 means that
* a second read ID cmd is available.
*/
static int
sddr09_read_deviceID(struct us_data *us, unsigned char *deviceID) {
unsigned char *command = us->iobuf;
unsigned char *content = us->iobuf;
int result, i;
memset(command, 0, 12);
command[0] = 0xED;
command[1] = LUNBITS;
result = sddr09_send_scsi_command(us, command, 12);
if (result)
return result;
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
content, 64, NULL);
for (i = 0; i < 4; i++)
deviceID[i] = content[i];
return (result == USB_STOR_XFER_GOOD ? 0 : -EIO);
}
static int
sddr09_get_wp(struct us_data *us, struct sddr09_card_info *info) {
int result;
unsigned char status;
const char *wp_fmt;
result = sddr09_read_status(us, &status);
if (result) {
usb_stor_dbg(us, "read_status fails\n");
return result;
}
if ((status & 0x80) == 0) {
info->flags |= SDDR09_WP; /* write protected */
wp_fmt = " WP";
} else {
wp_fmt = "";
}
usb_stor_dbg(us, "status 0x%02X%s%s%s%s\n", status, wp_fmt,
status & 0x40 ? " Ready" : "",
status & LUNBITS ? " Suspended" : "",
status & 0x01 ? " Error" : "");
return 0;
}
#if 0
/*
* Reset Command: 12 bytes.
* byte 0: opcode: EB
*/
static int
sddr09_reset(struct us_data *us) {
unsigned char *command = us->iobuf;
memset(command, 0, 12);
command[0] = 0xEB;
command[1] = LUNBITS;
return sddr09_send_scsi_command(us, command, 12);
}
#endif
static struct nand_flash_dev *
sddr09_get_cardinfo(struct us_data *us, unsigned char flags) {
struct nand_flash_dev *cardinfo;
unsigned char deviceID[4];
char blurbtxt[256];
int result;
usb_stor_dbg(us, "Reading capacity...\n");
result = sddr09_read_deviceID(us, deviceID);
if (result) {
usb_stor_dbg(us, "Result of read_deviceID is %d\n", result);
printk(KERN_WARNING "sddr09: could not read card info\n");
return NULL;
}
sprintf(blurbtxt, "sddr09: Found Flash card, ID = %4ph", deviceID);
/* Byte 0 is the manufacturer */
sprintf(blurbtxt + strlen(blurbtxt),
": Manuf. %s",
nand_flash_manufacturer(deviceID[0]));
/* Byte 1 is the device type */
cardinfo = nand_find_id(deviceID[1]);
if (cardinfo) {
/*
* MB or MiB? It is neither. A 16 MB card has
* 17301504 raw bytes, of which 16384000 are
* usable for user data.
*/
sprintf(blurbtxt + strlen(blurbtxt),
", %d MB", 1<<(cardinfo->chipshift - 20));
} else {
sprintf(blurbtxt + strlen(blurbtxt),
", type unrecognized");
}
/* Byte 2 is code to signal availability of 128-bit ID */
if (deviceID[2] == 0xa5) {
sprintf(blurbtxt + strlen(blurbtxt),
", 128-bit ID");
}
/* Byte 3 announces the availability of another read ID command */
if (deviceID[3] == 0xc0) {
sprintf(blurbtxt + strlen(blurbtxt),
", extra cmd");
}
if (flags & SDDR09_WP)
sprintf(blurbtxt + strlen(blurbtxt),
", WP");
printk(KERN_WARNING "%s\n", blurbtxt);
return cardinfo;
}
static int
sddr09_read_map(struct us_data *us) {
struct sddr09_card_info *info = (struct sddr09_card_info *) us->extra;
int numblocks, alloc_len, alloc_blocks;
int i, j, result;
unsigned char *buffer, *buffer_end, *ptr;
unsigned int lba, lbact;
if (!info->capacity)
return -1;
/*
* size of a block is 1 << (blockshift + pageshift) bytes
* divide into the total capacity to get the number of blocks
*/
numblocks = info->capacity >> (info->blockshift + info->pageshift);
/*
* read 64 bytes for every block (actually 1 << CONTROL_SHIFT)
* but only use a 64 KB buffer
* buffer size used must be a multiple of (1 << CONTROL_SHIFT)
*/
#define SDDR09_READ_MAP_BUFSZ 65536
alloc_blocks = min(numblocks, SDDR09_READ_MAP_BUFSZ >> CONTROL_SHIFT);
alloc_len = (alloc_blocks << CONTROL_SHIFT);
buffer = kmalloc(alloc_len, GFP_NOIO);
if (!buffer) {
result = -1;
goto done;
}
buffer_end = buffer + alloc_len;
#undef SDDR09_READ_MAP_BUFSZ
kfree(info->lba_to_pba);
kfree(info->pba_to_lba);
info->lba_to_pba = kmalloc_array(numblocks, sizeof(int), GFP_NOIO);
info->pba_to_lba = kmalloc_array(numblocks, sizeof(int), GFP_NOIO);
if (info->lba_to_pba == NULL || info->pba_to_lba == NULL) {
printk(KERN_WARNING "sddr09_read_map: out of memory\n");
result = -1;
goto done;
}
for (i = 0; i < numblocks; i++)
info->lba_to_pba[i] = info->pba_to_lba[i] = UNDEF;
/*
* Define lba-pba translation table
*/
ptr = buffer_end;
for (i = 0; i < numblocks; i++) {
ptr += (1 << CONTROL_SHIFT);
if (ptr >= buffer_end) {
unsigned long address;
address = i << (info->pageshift + info->blockshift);
result = sddr09_read_control(
us, address>>1,
min(alloc_blocks, numblocks - i),
buffer, 0);
if (result) {
result = -1;
goto done;
}
ptr = buffer;
}
if (i == 0 || i == 1) {
info->pba_to_lba[i] = UNUSABLE;
continue;
}
/* special PBAs have control field 0^16 */
for (j = 0; j < 16; j++)
if (ptr[j] != 0)
goto nonz;
info->pba_to_lba[i] = UNUSABLE;
printk(KERN_WARNING "sddr09: PBA %d has no logical mapping\n",
i);
continue;
nonz:
/* unwritten PBAs have control field FF^16 */
for (j = 0; j < 16; j++)
if (ptr[j] != 0xff)
goto nonff;
continue;
nonff:
/* normal PBAs start with six FFs */
if (j < 6) {
printk(KERN_WARNING
"sddr09: PBA %d has no logical mapping: "
"reserved area = %02X%02X%02X%02X "
"data status %02X block status %02X\n",
i, ptr[0], ptr[1], ptr[2], ptr[3],
ptr[4], ptr[5]);
info->pba_to_lba[i] = UNUSABLE;
continue;
}
if ((ptr[6] >> 4) != 0x01) {
printk(KERN_WARNING
"sddr09: PBA %d has invalid address field "
"%02X%02X/%02X%02X\n",
i, ptr[6], ptr[7], ptr[11], ptr[12]);
info->pba_to_lba[i] = UNUSABLE;
continue;
}
/* check even parity */
if (parity[ptr[6] ^ ptr[7]]) {
printk(KERN_WARNING
"sddr09: Bad parity in LBA for block %d"
" (%02X %02X)\n", i, ptr[6], ptr[7]);
info->pba_to_lba[i] = UNUSABLE;
continue;
}
lba = short_pack(ptr[7], ptr[6]);
lba = (lba & 0x07FF) >> 1;
/*
* Every 1024 physical blocks ("zone"), the LBA numbers
* go back to zero, but are within a higher block of LBA's.
* Also, there is a maximum of 1000 LBA's per zone.
* In other words, in PBA 1024-2047 you will find LBA 0-999
* which are really LBA 1000-1999. This allows for 24 bad
* or special physical blocks per zone.
*/
if (lba >= 1000) {
printk(KERN_WARNING
"sddr09: Bad low LBA %d for block %d\n",
lba, i);
goto possibly_erase;
}
lba += 1000*(i/0x400);
if (info->lba_to_pba[lba] != UNDEF) {
printk(KERN_WARNING
"sddr09: LBA %d seen for PBA %d and %d\n",
lba, info->lba_to_pba[lba], i);
goto possibly_erase;
}
info->pba_to_lba[i] = lba;
info->lba_to_pba[lba] = i;
continue;
possibly_erase:
if (erase_bad_lba_entries) {
unsigned long address;
address = (i << (info->pageshift + info->blockshift));
sddr09_erase(us, address>>1);
info->pba_to_lba[i] = UNDEF;
} else
info->pba_to_lba[i] = UNUSABLE;
}
/*
* Approximate capacity. This is not entirely correct yet,
* since a zone with less than 1000 usable pages leads to
* missing LBAs. Especially if it is the last zone, some
* LBAs can be past capacity.
*/
lbact = 0;
for (i = 0; i < numblocks; i += 1024) {
int ct = 0;
for (j = 0; j < 1024 && i+j < numblocks; j++) {
if (info->pba_to_lba[i+j] != UNUSABLE) {
if (ct >= 1000)
info->pba_to_lba[i+j] = SPARE;
else
ct++;
}
}
lbact += ct;
}
info->lbact = lbact;
usb_stor_dbg(us, "Found %d LBA's\n", lbact);
result = 0;
done:
if (result != 0) {
kfree(info->lba_to_pba);
kfree(info->pba_to_lba);
info->lba_to_pba = NULL;
info->pba_to_lba = NULL;
}
kfree(buffer);
return result;
}
static void
sddr09_card_info_destructor(void *extra) {
struct sddr09_card_info *info = (struct sddr09_card_info *)extra;
if (!info)
return;
kfree(info->lba_to_pba);
kfree(info->pba_to_lba);
}
static int
sddr09_common_init(struct us_data *us) {
int result;
/* set the configuration -- STALL is an acceptable response here */
if (us->pusb_dev->actconfig->desc.bConfigurationValue != 1) {
usb_stor_dbg(us, "active config #%d != 1 ??\n",
us->pusb_dev->actconfig->desc.bConfigurationValue);
return -EINVAL;
}
result = usb_reset_configuration(us->pusb_dev);
usb_stor_dbg(us, "Result of usb_reset_configuration is %d\n", result);
if (result == -EPIPE) {
usb_stor_dbg(us, "-- stall on control interface\n");
} else if (result != 0) {
/* it's not a stall, but another error -- time to bail */
usb_stor_dbg(us, "-- Unknown error. Rejecting device\n");
return -EINVAL;
}
us->extra = kzalloc(sizeof(struct sddr09_card_info), GFP_NOIO);
if (!us->extra)
return -ENOMEM;
us->extra_destructor = sddr09_card_info_destructor;
nand_init_ecc();
return 0;
}
/*
* This is needed at a very early stage. If this is not listed in the
* unusual devices list but called from here then LUN 0 of the combo reader
* is not recognized. But I do not know what precisely these calls do.
*/
static int
usb_stor_sddr09_dpcm_init(struct us_data *us) {
int result;
unsigned char *data = us->iobuf;
result = sddr09_common_init(us);
if (result)
return result;
result = sddr09_send_command(us, 0x01, USB_DIR_IN, data, 2);
if (result) {
usb_stor_dbg(us, "send_command fails\n");
return result;
}
usb_stor_dbg(us, "%02X %02X\n", data[0], data[1]);
// get 07 02
result = sddr09_send_command(us, 0x08, USB_DIR_IN, data, 2);
if (result) {
usb_stor_dbg(us, "2nd send_command fails\n");
return result;
}
usb_stor_dbg(us, "%02X %02X\n", data[0], data[1]);
// get 07 00
result = sddr09_request_sense(us, data, 18);
if (result == 0 && data[2] != 0) {
int j;
for (j=0; j<18; j++)
printk(" %02X", data[j]);
printk("\n");
// get 70 00 00 00 00 00 00 * 00 00 00 00 00 00
// 70: current command
// sense key 0, sense code 0, extd sense code 0
// additional transfer length * = sizeof(data) - 7
// Or: 70 00 06 00 00 00 00 0b 00 00 00 00 28 00 00 00 00 00
// sense key 06, sense code 28: unit attention,
// not ready to ready transition
}
// test unit ready
return 0; /* not result */
}
/*
* Transport for the Microtech DPCM-USB
*/
static int dpcm_transport(struct scsi_cmnd *srb, struct us_data *us)
{
int ret;
usb_stor_dbg(us, "LUN=%d\n", (u8)srb->device->lun);
switch (srb->device->lun) {
case 0:
/*
* LUN 0 corresponds to the CompactFlash card reader.
*/
ret = usb_stor_CB_transport(srb, us);
break;
case 1:
/*
* LUN 1 corresponds to the SmartMedia card reader.
*/
/*
* Set the LUN to 0 (just in case).
*/
srb->device->lun = 0;
ret = sddr09_transport(srb, us);
srb->device->lun = 1;
break;
default:
usb_stor_dbg(us, "Invalid LUN %d\n", (u8)srb->device->lun);
ret = USB_STOR_TRANSPORT_ERROR;
break;
}
return ret;
}
/*
* Transport for the Sandisk SDDR-09
*/
static int sddr09_transport(struct scsi_cmnd *srb, struct us_data *us)
{
static unsigned char sensekey = 0, sensecode = 0;
static unsigned char havefakesense = 0;
int result, i;
unsigned char *ptr = us->iobuf;
unsigned long capacity;
unsigned int page, pages;
struct sddr09_card_info *info;
static unsigned char inquiry_response[8] = {
0x00, 0x80, 0x00, 0x02, 0x1F, 0x00, 0x00, 0x00
};
/* note: no block descriptor support */
static unsigned char mode_page_01[19] = {
0x00, 0x0F, 0x00, 0x0, 0x0, 0x0, 0x00,
0x01, 0x0A,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
info = (struct sddr09_card_info *)us->extra;
if (srb->cmnd[0] == REQUEST_SENSE && havefakesense) {
/* for a faked command, we have to follow with a faked sense */
memset(ptr, 0, 18);
ptr[0] = 0x70;
ptr[2] = sensekey;
ptr[7] = 11;
ptr[12] = sensecode;
usb_stor_set_xfer_buf(ptr, 18, srb);
sensekey = sensecode = havefakesense = 0;
return USB_STOR_TRANSPORT_GOOD;
}
havefakesense = 1;
/*
* Dummy up a response for INQUIRY since SDDR09 doesn't
* respond to INQUIRY commands
*/
if (srb->cmnd[0] == INQUIRY) {
memcpy(ptr, inquiry_response, 8);
fill_inquiry_response(us, ptr, 36);
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == READ_CAPACITY) {
struct nand_flash_dev *cardinfo;
sddr09_get_wp(us, info); /* read WP bit */
cardinfo = sddr09_get_cardinfo(us, info->flags);
if (!cardinfo) {
/* probably no media */
init_error:
sensekey = 0x02; /* not ready */
sensecode = 0x3a; /* medium not present */
return USB_STOR_TRANSPORT_FAILED;
}
info->capacity = (1 << cardinfo->chipshift);
info->pageshift = cardinfo->pageshift;
info->pagesize = (1 << info->pageshift);
info->blockshift = cardinfo->blockshift;
info->blocksize = (1 << info->blockshift);
info->blockmask = info->blocksize - 1;
// map initialization, must follow get_cardinfo()
if (sddr09_read_map(us)) {
/* probably out of memory */
goto init_error;
}
// Report capacity
capacity = (info->lbact << info->blockshift) - 1;
((__be32 *) ptr)[0] = cpu_to_be32(capacity);
// Report page size
((__be32 *) ptr)[1] = cpu_to_be32(info->pagesize);
usb_stor_set_xfer_buf(ptr, 8, srb);
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == MODE_SENSE_10) {
int modepage = (srb->cmnd[2] & 0x3F);
/*
* They ask for the Read/Write error recovery page,
* or for all pages.
*/
/* %% We should check DBD %% */
if (modepage == 0x01 || modepage == 0x3F) {
usb_stor_dbg(us, "Dummy up request for mode page 0x%x\n",
modepage);
memcpy(ptr, mode_page_01, sizeof(mode_page_01));
((__be16*)ptr)[0] = cpu_to_be16(sizeof(mode_page_01) - 2);
ptr[3] = (info->flags & SDDR09_WP) ? 0x80 : 0;
usb_stor_set_xfer_buf(ptr, sizeof(mode_page_01), srb);
return USB_STOR_TRANSPORT_GOOD;
}
sensekey = 0x05; /* illegal request */
sensecode = 0x24; /* invalid field in CDB */
return USB_STOR_TRANSPORT_FAILED;
}
if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL)
return USB_STOR_TRANSPORT_GOOD;
havefakesense = 0;
if (srb->cmnd[0] == READ_10) {
page = short_pack(srb->cmnd[3], srb->cmnd[2]);
page <<= 16;
page |= short_pack(srb->cmnd[5], srb->cmnd[4]);
pages = short_pack(srb->cmnd[8], srb->cmnd[7]);
usb_stor_dbg(us, "READ_10: read page %d pagect %d\n",
page, pages);
result = sddr09_read_data(us, page, pages);
return (result == 0 ? USB_STOR_TRANSPORT_GOOD :
USB_STOR_TRANSPORT_ERROR);
}
if (srb->cmnd[0] == WRITE_10) {
page = short_pack(srb->cmnd[3], srb->cmnd[2]);
page <<= 16;
page |= short_pack(srb->cmnd[5], srb->cmnd[4]);
pages = short_pack(srb->cmnd[8], srb->cmnd[7]);
usb_stor_dbg(us, "WRITE_10: write page %d pagect %d\n",
page, pages);
result = sddr09_write_data(us, page, pages);
return (result == 0 ? USB_STOR_TRANSPORT_GOOD :
USB_STOR_TRANSPORT_ERROR);
}
/*
* catch-all for all other commands, except
* pass TEST_UNIT_READY and REQUEST_SENSE through
*/
if (srb->cmnd[0] != TEST_UNIT_READY &&
srb->cmnd[0] != REQUEST_SENSE) {
sensekey = 0x05; /* illegal request */
sensecode = 0x20; /* invalid command */
havefakesense = 1;
return USB_STOR_TRANSPORT_FAILED;
}
for (; srb->cmd_len<12; srb->cmd_len++)
srb->cmnd[srb->cmd_len] = 0;
srb->cmnd[1] = LUNBITS;
ptr[0] = 0;
for (i=0; i<12; i++)
sprintf(ptr+strlen(ptr), "%02X ", srb->cmnd[i]);
usb_stor_dbg(us, "Send control for command %s\n", ptr);
result = sddr09_send_scsi_command(us, srb->cmnd, 12);
if (result) {
usb_stor_dbg(us, "sddr09_send_scsi_command returns %d\n",
result);
return USB_STOR_TRANSPORT_ERROR;
}
if (scsi_bufflen(srb) == 0)
return USB_STOR_TRANSPORT_GOOD;
if (srb->sc_data_direction == DMA_TO_DEVICE ||
srb->sc_data_direction == DMA_FROM_DEVICE) {
unsigned int pipe = (srb->sc_data_direction == DMA_TO_DEVICE)
? us->send_bulk_pipe : us->recv_bulk_pipe;
usb_stor_dbg(us, "%s %d bytes\n",
(srb->sc_data_direction == DMA_TO_DEVICE) ?
"sending" : "receiving",
scsi_bufflen(srb));
result = usb_stor_bulk_srb(us, pipe, srb);
return (result == USB_STOR_XFER_GOOD ?
USB_STOR_TRANSPORT_GOOD : USB_STOR_TRANSPORT_ERROR);
}
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Initialization routine for the sddr09 subdriver
*/
static int
usb_stor_sddr09_init(struct us_data *us) {
return sddr09_common_init(us);
}
static struct scsi_host_template sddr09_host_template;
static int sddr09_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct us_data *us;
int result;
result = usb_stor_probe1(&us, intf, id,
(id - sddr09_usb_ids) + sddr09_unusual_dev_list,
&sddr09_host_template);
if (result)
return result;
if (us->protocol == USB_PR_DPCM_USB) {
us->transport_name = "Control/Bulk-EUSB/SDDR09";
us->transport = dpcm_transport;
us->transport_reset = usb_stor_CB_reset;
us->max_lun = 1;
} else {
us->transport_name = "EUSB/SDDR09";
us->transport = sddr09_transport;
us->transport_reset = usb_stor_CB_reset;
us->max_lun = 0;
}
result = usb_stor_probe2(us);
return result;
}
static struct usb_driver sddr09_driver = {
.name = DRV_NAME,
.probe = sddr09_probe,
.disconnect = usb_stor_disconnect,
.suspend = usb_stor_suspend,
.resume = usb_stor_resume,
.reset_resume = usb_stor_reset_resume,
.pre_reset = usb_stor_pre_reset,
.post_reset = usb_stor_post_reset,
.id_table = sddr09_usb_ids,
.soft_unbind = 1,
.no_dynamic_id = 1,
};
module_usb_stor_driver(sddr09_driver, sddr09_host_template, DRV_NAME);
| linux-master | drivers/usb/storage/sddr09.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Freecom USB/IDE adaptor
*
* Freecom v0.1:
*
* First release
*
* Current development and maintenance by:
* (C) 2000 David Brown <[email protected]>
*
* This driver was developed with information provided in FREECOM's USB
* Programmers Reference Guide. For further information contact Freecom
* (https://www.freecom.de/)
*/
#include <linux/module.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include "usb.h"
#include "transport.h"
#include "protocol.h"
#include "debug.h"
#include "scsiglue.h"
#define DRV_NAME "ums-freecom"
MODULE_DESCRIPTION("Driver for Freecom USB/IDE adaptor");
MODULE_AUTHOR("David Brown <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(USB_STORAGE);
#ifdef CONFIG_USB_STORAGE_DEBUG
static void pdump(struct us_data *us, void *ibuffer, int length);
#endif
/* Bits of HD_STATUS */
#define ERR_STAT 0x01
#define DRQ_STAT 0x08
/* All of the outgoing packets are 64 bytes long. */
struct freecom_cb_wrap {
u8 Type; /* Command type. */
u8 Timeout; /* Timeout in seconds. */
u8 Atapi[12]; /* An ATAPI packet. */
u8 Filler[50]; /* Padding Data. */
};
struct freecom_xfer_wrap {
u8 Type; /* Command type. */
u8 Timeout; /* Timeout in seconds. */
__le32 Count; /* Number of bytes to transfer. */
u8 Pad[58];
} __attribute__ ((packed));
struct freecom_ide_out {
u8 Type; /* Type + IDE register. */
u8 Pad;
__le16 Value; /* Value to write. */
u8 Pad2[60];
};
struct freecom_ide_in {
u8 Type; /* Type | IDE register. */
u8 Pad[63];
};
struct freecom_status {
u8 Status;
u8 Reason;
__le16 Count;
u8 Pad[60];
};
/*
* Freecom stuffs the interrupt status in the INDEX_STAT bit of the ide
* register.
*/
#define FCM_INT_STATUS 0x02 /* INDEX_STAT */
#define FCM_STATUS_BUSY 0x80
/*
* These are the packet types. The low bit indicates that this command
* should wait for an interrupt.
*/
#define FCM_PACKET_ATAPI 0x21
#define FCM_PACKET_STATUS 0x20
/*
* Receive data from the IDE interface. The ATAPI packet has already
* waited, so the data should be immediately available.
*/
#define FCM_PACKET_INPUT 0x81
/* Send data to the IDE interface. */
#define FCM_PACKET_OUTPUT 0x01
/*
* Write a value to an ide register. Or the ide register to write after
* munging the address a bit.
*/
#define FCM_PACKET_IDE_WRITE 0x40
#define FCM_PACKET_IDE_READ 0xC0
/* All packets (except for status) are 64 bytes long. */
#define FCM_PACKET_LENGTH 64
#define FCM_STATUS_PACKET_LENGTH 4
static int init_freecom(struct us_data *us);
/*
* The table of devices
*/
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
static struct usb_device_id freecom_usb_ids[] = {
# include "unusual_freecom.h"
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, freecom_usb_ids);
#undef UNUSUAL_DEV
/*
* The flags table
*/
#define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \
vendor_name, product_name, use_protocol, use_transport, \
init_function, Flags) \
{ \
.vendorName = vendor_name, \
.productName = product_name, \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
.initFunction = init_function, \
}
static struct us_unusual_dev freecom_unusual_dev_list[] = {
# include "unusual_freecom.h"
{ } /* Terminating entry */
};
#undef UNUSUAL_DEV
static int
freecom_readdata (struct scsi_cmnd *srb, struct us_data *us,
unsigned int ipipe, unsigned int opipe, int count)
{
struct freecom_xfer_wrap *fxfr =
(struct freecom_xfer_wrap *) us->iobuf;
int result;
fxfr->Type = FCM_PACKET_INPUT | 0x00;
fxfr->Timeout = 0; /* Short timeout for debugging. */
fxfr->Count = cpu_to_le32 (count);
memset (fxfr->Pad, 0, sizeof (fxfr->Pad));
usb_stor_dbg(us, "Read data Freecom! (c=%d)\n", count);
/* Issue the transfer command. */
result = usb_stor_bulk_transfer_buf (us, opipe, fxfr,
FCM_PACKET_LENGTH, NULL);
if (result != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "Freecom readdata transport error\n");
return USB_STOR_TRANSPORT_ERROR;
}
/* Now transfer all of our blocks. */
usb_stor_dbg(us, "Start of read\n");
result = usb_stor_bulk_srb(us, ipipe, srb);
usb_stor_dbg(us, "freecom_readdata done!\n");
if (result > USB_STOR_XFER_SHORT)
return USB_STOR_TRANSPORT_ERROR;
return USB_STOR_TRANSPORT_GOOD;
}
static int
freecom_writedata (struct scsi_cmnd *srb, struct us_data *us,
int unsigned ipipe, unsigned int opipe, int count)
{
struct freecom_xfer_wrap *fxfr =
(struct freecom_xfer_wrap *) us->iobuf;
int result;
fxfr->Type = FCM_PACKET_OUTPUT | 0x00;
fxfr->Timeout = 0; /* Short timeout for debugging. */
fxfr->Count = cpu_to_le32 (count);
memset (fxfr->Pad, 0, sizeof (fxfr->Pad));
usb_stor_dbg(us, "Write data Freecom! (c=%d)\n", count);
/* Issue the transfer command. */
result = usb_stor_bulk_transfer_buf (us, opipe, fxfr,
FCM_PACKET_LENGTH, NULL);
if (result != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "Freecom writedata transport error\n");
return USB_STOR_TRANSPORT_ERROR;
}
/* Now transfer all of our blocks. */
usb_stor_dbg(us, "Start of write\n");
result = usb_stor_bulk_srb(us, opipe, srb);
usb_stor_dbg(us, "freecom_writedata done!\n");
if (result > USB_STOR_XFER_SHORT)
return USB_STOR_TRANSPORT_ERROR;
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Transport for the Freecom USB/IDE adaptor.
*
*/
static int freecom_transport(struct scsi_cmnd *srb, struct us_data *us)
{
struct freecom_cb_wrap *fcb;
struct freecom_status *fst;
unsigned int ipipe, opipe; /* We need both pipes. */
int result;
unsigned int partial;
int length;
fcb = (struct freecom_cb_wrap *) us->iobuf;
fst = (struct freecom_status *) us->iobuf;
usb_stor_dbg(us, "Freecom TRANSPORT STARTED\n");
/* Get handles for both transports. */
opipe = us->send_bulk_pipe;
ipipe = us->recv_bulk_pipe;
/* The ATAPI Command always goes out first. */
fcb->Type = FCM_PACKET_ATAPI | 0x00;
fcb->Timeout = 0;
memcpy (fcb->Atapi, srb->cmnd, 12);
memset (fcb->Filler, 0, sizeof (fcb->Filler));
US_DEBUG(pdump(us, srb->cmnd, 12));
/* Send it out. */
result = usb_stor_bulk_transfer_buf (us, opipe, fcb,
FCM_PACKET_LENGTH, NULL);
/*
* The Freecom device will only fail if there is something wrong in
* USB land. It returns the status in its own registers, which
* come back in the bulk pipe.
*/
if (result != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "freecom transport error\n");
return USB_STOR_TRANSPORT_ERROR;
}
/*
* There are times we can optimize out this status read, but it
* doesn't hurt us to always do it now.
*/
result = usb_stor_bulk_transfer_buf (us, ipipe, fst,
FCM_STATUS_PACKET_LENGTH, &partial);
usb_stor_dbg(us, "foo Status result %d %u\n", result, partial);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
US_DEBUG(pdump(us, (void *)fst, partial));
/*
* The firmware will time-out commands after 20 seconds. Some commands
* can legitimately take longer than this, so we use a different
* command that only waits for the interrupt and then sends status,
* without having to send a new ATAPI command to the device.
*
* NOTE: There is some indication that a data transfer after a timeout
* may not work, but that is a condition that should never happen.
*/
while (fst->Status & FCM_STATUS_BUSY) {
usb_stor_dbg(us, "20 second USB/ATAPI bridge TIMEOUT occurred!\n");
usb_stor_dbg(us, "fst->Status is %x\n", fst->Status);
/* Get the status again */
fcb->Type = FCM_PACKET_STATUS;
fcb->Timeout = 0;
memset (fcb->Atapi, 0, sizeof(fcb->Atapi));
memset (fcb->Filler, 0, sizeof (fcb->Filler));
/* Send it out. */
result = usb_stor_bulk_transfer_buf (us, opipe, fcb,
FCM_PACKET_LENGTH, NULL);
/*
* The Freecom device will only fail if there is something
* wrong in USB land. It returns the status in its own
* registers, which come back in the bulk pipe.
*/
if (result != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "freecom transport error\n");
return USB_STOR_TRANSPORT_ERROR;
}
/* get the data */
result = usb_stor_bulk_transfer_buf (us, ipipe, fst,
FCM_STATUS_PACKET_LENGTH, &partial);
usb_stor_dbg(us, "bar Status result %d %u\n", result, partial);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
US_DEBUG(pdump(us, (void *)fst, partial));
}
if (partial != 4)
return USB_STOR_TRANSPORT_ERROR;
if ((fst->Status & 1) != 0) {
usb_stor_dbg(us, "operation failed\n");
return USB_STOR_TRANSPORT_FAILED;
}
/*
* The device might not have as much data available as we
* requested. If you ask for more than the device has, this reads
* and such will hang.
*/
usb_stor_dbg(us, "Device indicates that it has %d bytes available\n",
le16_to_cpu(fst->Count));
usb_stor_dbg(us, "SCSI requested %d\n", scsi_bufflen(srb));
/* Find the length we desire to read. */
switch (srb->cmnd[0]) {
case INQUIRY:
case REQUEST_SENSE: /* 16 or 18 bytes? spec says 18, lots of devices only have 16 */
case MODE_SENSE:
case MODE_SENSE_10:
length = le16_to_cpu(fst->Count);
break;
default:
length = scsi_bufflen(srb);
}
/* verify that this amount is legal */
if (length > scsi_bufflen(srb)) {
length = scsi_bufflen(srb);
usb_stor_dbg(us, "Truncating request to match buffer length: %d\n",
length);
}
/*
* What we do now depends on what direction the data is supposed to
* move in.
*/
switch (us->srb->sc_data_direction) {
case DMA_FROM_DEVICE:
/* catch bogus "read 0 length" case */
if (!length)
break;
/*
* Make sure that the status indicates that the device
* wants data as well.
*/
if ((fst->Status & DRQ_STAT) == 0 || (fst->Reason & 3) != 2) {
usb_stor_dbg(us, "SCSI wants data, drive doesn't have any\n");
return USB_STOR_TRANSPORT_FAILED;
}
result = freecom_readdata (srb, us, ipipe, opipe, length);
if (result != USB_STOR_TRANSPORT_GOOD)
return result;
usb_stor_dbg(us, "Waiting for status\n");
result = usb_stor_bulk_transfer_buf (us, ipipe, fst,
FCM_PACKET_LENGTH, &partial);
US_DEBUG(pdump(us, (void *)fst, partial));
if (partial != 4 || result > USB_STOR_XFER_SHORT)
return USB_STOR_TRANSPORT_ERROR;
if ((fst->Status & ERR_STAT) != 0) {
usb_stor_dbg(us, "operation failed\n");
return USB_STOR_TRANSPORT_FAILED;
}
if ((fst->Reason & 3) != 3) {
usb_stor_dbg(us, "Drive seems still hungry\n");
return USB_STOR_TRANSPORT_FAILED;
}
usb_stor_dbg(us, "Transfer happy\n");
break;
case DMA_TO_DEVICE:
/* catch bogus "write 0 length" case */
if (!length)
break;
/*
* Make sure the status indicates that the device wants to
* send us data.
*/
/* !!IMPLEMENT!! */
result = freecom_writedata (srb, us, ipipe, opipe, length);
if (result != USB_STOR_TRANSPORT_GOOD)
return result;
usb_stor_dbg(us, "Waiting for status\n");
result = usb_stor_bulk_transfer_buf (us, ipipe, fst,
FCM_PACKET_LENGTH, &partial);
if (partial != 4 || result > USB_STOR_XFER_SHORT)
return USB_STOR_TRANSPORT_ERROR;
if ((fst->Status & ERR_STAT) != 0) {
usb_stor_dbg(us, "operation failed\n");
return USB_STOR_TRANSPORT_FAILED;
}
if ((fst->Reason & 3) != 3) {
usb_stor_dbg(us, "Drive seems still hungry\n");
return USB_STOR_TRANSPORT_FAILED;
}
usb_stor_dbg(us, "Transfer happy\n");
break;
case DMA_NONE:
/* Easy, do nothing. */
break;
default:
/* should never hit here -- filtered in usb.c */
usb_stor_dbg(us, "freecom unimplemented direction: %d\n",
us->srb->sc_data_direction);
/* Return fail, SCSI seems to handle this better. */
return USB_STOR_TRANSPORT_FAILED;
}
return USB_STOR_TRANSPORT_GOOD;
}
static int init_freecom(struct us_data *us)
{
int result;
char *buffer = us->iobuf;
/*
* The DMA-mapped I/O buffer is 64 bytes long, just right for
* all our packets. No need to allocate any extra buffer space.
*/
result = usb_stor_control_msg(us, us->recv_ctrl_pipe,
0x4c, 0xc0, 0x4346, 0x0, buffer, 0x20, 3*HZ);
buffer[32] = '\0';
usb_stor_dbg(us, "String returned from FC init is: %s\n", buffer);
/*
* Special thanks to the people at Freecom for providing me with
* this "magic sequence", which they use in their Windows and MacOS
* drivers to make sure that all the attached perhiperals are
* properly reset.
*/
/* send reset */
result = usb_stor_control_msg(us, us->send_ctrl_pipe,
0x4d, 0x40, 0x24d8, 0x0, NULL, 0x0, 3*HZ);
usb_stor_dbg(us, "result from activate reset is %d\n", result);
/* wait 250ms */
msleep(250);
/* clear reset */
result = usb_stor_control_msg(us, us->send_ctrl_pipe,
0x4d, 0x40, 0x24f8, 0x0, NULL, 0x0, 3*HZ);
usb_stor_dbg(us, "result from clear reset is %d\n", result);
/* wait 3 seconds */
msleep(3 * 1000);
return USB_STOR_TRANSPORT_GOOD;
}
static int usb_stor_freecom_reset(struct us_data *us)
{
printk (KERN_CRIT "freecom reset called\n");
/* We don't really have this feature. */
return FAILED;
}
#ifdef CONFIG_USB_STORAGE_DEBUG
static void pdump(struct us_data *us, void *ibuffer, int length)
{
static char line[80];
int offset = 0;
unsigned char *buffer = (unsigned char *) ibuffer;
int i, j;
int from, base;
offset = 0;
for (i = 0; i < length; i++) {
if ((i & 15) == 0) {
if (i > 0) {
offset += sprintf (line+offset, " - ");
for (j = i - 16; j < i; j++) {
if (buffer[j] >= 32 && buffer[j] <= 126)
line[offset++] = buffer[j];
else
line[offset++] = '.';
}
line[offset] = 0;
usb_stor_dbg(us, "%s\n", line);
offset = 0;
}
offset += sprintf (line+offset, "%08x:", i);
} else if ((i & 7) == 0) {
offset += sprintf (line+offset, " -");
}
offset += sprintf (line+offset, " %02x", buffer[i] & 0xff);
}
/* Add the last "chunk" of data. */
from = (length - 1) % 16;
base = ((length - 1) / 16) * 16;
for (i = from + 1; i < 16; i++)
offset += sprintf (line+offset, " ");
if (from < 8)
offset += sprintf (line+offset, " ");
offset += sprintf (line+offset, " - ");
for (i = 0; i <= from; i++) {
if (buffer[base+i] >= 32 && buffer[base+i] <= 126)
line[offset++] = buffer[base+i];
else
line[offset++] = '.';
}
line[offset] = 0;
usb_stor_dbg(us, "%s\n", line);
offset = 0;
}
#endif
static struct scsi_host_template freecom_host_template;
static int freecom_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct us_data *us;
int result;
result = usb_stor_probe1(&us, intf, id,
(id - freecom_usb_ids) + freecom_unusual_dev_list,
&freecom_host_template);
if (result)
return result;
us->transport_name = "Freecom";
us->transport = freecom_transport;
us->transport_reset = usb_stor_freecom_reset;
us->max_lun = 0;
result = usb_stor_probe2(us);
return result;
}
static struct usb_driver freecom_driver = {
.name = DRV_NAME,
.probe = freecom_probe,
.disconnect = usb_stor_disconnect,
.suspend = usb_stor_suspend,
.resume = usb_stor_resume,
.reset_resume = usb_stor_reset_resume,
.pre_reset = usb_stor_pre_reset,
.post_reset = usb_stor_post_reset,
.id_table = freecom_usb_ids,
.soft_unbind = 1,
.no_dynamic_id = 1,
};
module_usb_stor_driver(freecom_driver, freecom_host_template, DRV_NAME);
| linux-master | drivers/usb/storage/freecom.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Support for the Maxtor OneTouch USB hard drive's button
*
* Current development and maintenance by:
* Copyright (c) 2005 Nick Sillik <[email protected]>
*
* Initial work by:
* Copyright (c) 2003 Erik Thyren <[email protected]>
*
* Based on usbmouse.c (Vojtech Pavlik) and xpad.c (Marko Friedemann)
*
*/
#include <linux/kernel.h>
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb/input.h>
#include "usb.h"
#include "debug.h"
#include "scsiglue.h"
#define DRV_NAME "ums-onetouch"
MODULE_DESCRIPTION("Maxtor USB OneTouch hard drive button driver");
MODULE_AUTHOR("Nick Sillik <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(USB_STORAGE);
#define ONETOUCH_PKT_LEN 0x02
#define ONETOUCH_BUTTON KEY_PROG1
static int onetouch_connect_input(struct us_data *ss);
static void onetouch_release_input(void *onetouch_);
struct usb_onetouch {
char name[128];
char phys[64];
struct input_dev *dev; /* input device interface */
struct usb_device *udev; /* usb device */
struct urb *irq; /* urb for interrupt in report */
unsigned char *data; /* input data */
dma_addr_t data_dma;
unsigned int is_open:1;
};
/*
* The table of devices
*/
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
static struct usb_device_id onetouch_usb_ids[] = {
# include "unusual_onetouch.h"
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, onetouch_usb_ids);
#undef UNUSUAL_DEV
/*
* The flags table
*/
#define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \
vendor_name, product_name, use_protocol, use_transport, \
init_function, Flags) \
{ \
.vendorName = vendor_name, \
.productName = product_name, \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
.initFunction = init_function, \
}
static struct us_unusual_dev onetouch_unusual_dev_list[] = {
# include "unusual_onetouch.h"
{ } /* Terminating entry */
};
#undef UNUSUAL_DEV
static void usb_onetouch_irq(struct urb *urb)
{
struct usb_onetouch *onetouch = urb->context;
signed char *data = onetouch->data;
struct input_dev *dev = onetouch->dev;
int status = urb->status;
int retval;
switch (status) {
case 0: /* success */
break;
case -ECONNRESET: /* unlink */
case -ENOENT:
case -ESHUTDOWN:
return;
/* -EPIPE: should clear the halt */
default: /* error */
goto resubmit;
}
input_report_key(dev, ONETOUCH_BUTTON, data[0] & 0x02);
input_sync(dev);
resubmit:
retval = usb_submit_urb (urb, GFP_ATOMIC);
if (retval)
dev_err(&dev->dev, "can't resubmit intr, %s-%s/input0, "
"retval %d\n", onetouch->udev->bus->bus_name,
onetouch->udev->devpath, retval);
}
static int usb_onetouch_open(struct input_dev *dev)
{
struct usb_onetouch *onetouch = input_get_drvdata(dev);
onetouch->is_open = 1;
onetouch->irq->dev = onetouch->udev;
if (usb_submit_urb(onetouch->irq, GFP_KERNEL)) {
dev_err(&dev->dev, "usb_submit_urb failed\n");
return -EIO;
}
return 0;
}
static void usb_onetouch_close(struct input_dev *dev)
{
struct usb_onetouch *onetouch = input_get_drvdata(dev);
usb_kill_urb(onetouch->irq);
onetouch->is_open = 0;
}
#ifdef CONFIG_PM
static void usb_onetouch_pm_hook(struct us_data *us, int action)
{
struct usb_onetouch *onetouch = (struct usb_onetouch *) us->extra;
if (onetouch->is_open) {
switch (action) {
case US_SUSPEND:
usb_kill_urb(onetouch->irq);
break;
case US_RESUME:
if (usb_submit_urb(onetouch->irq, GFP_NOIO) != 0)
dev_err(&onetouch->irq->dev->dev,
"usb_submit_urb failed\n");
break;
default:
break;
}
}
}
#endif /* CONFIG_PM */
static int onetouch_connect_input(struct us_data *ss)
{
struct usb_device *udev = ss->pusb_dev;
struct usb_host_interface *interface;
struct usb_endpoint_descriptor *endpoint;
struct usb_onetouch *onetouch;
struct input_dev *input_dev;
int pipe, maxp;
int error = -ENOMEM;
interface = ss->pusb_intf->cur_altsetting;
if (interface->desc.bNumEndpoints != 3)
return -ENODEV;
endpoint = &interface->endpoint[2].desc;
if (!usb_endpoint_is_int_in(endpoint))
return -ENODEV;
pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
maxp = usb_maxpacket(udev, pipe);
maxp = min(maxp, ONETOUCH_PKT_LEN);
onetouch = kzalloc(sizeof(struct usb_onetouch), GFP_KERNEL);
input_dev = input_allocate_device();
if (!onetouch || !input_dev)
goto fail1;
onetouch->data = usb_alloc_coherent(udev, ONETOUCH_PKT_LEN,
GFP_KERNEL, &onetouch->data_dma);
if (!onetouch->data)
goto fail1;
onetouch->irq = usb_alloc_urb(0, GFP_KERNEL);
if (!onetouch->irq)
goto fail2;
onetouch->udev = udev;
onetouch->dev = input_dev;
if (udev->manufacturer)
strscpy(onetouch->name, udev->manufacturer,
sizeof(onetouch->name));
if (udev->product) {
if (udev->manufacturer)
strlcat(onetouch->name, " ", sizeof(onetouch->name));
strlcat(onetouch->name, udev->product, sizeof(onetouch->name));
}
if (!strlen(onetouch->name))
snprintf(onetouch->name, sizeof(onetouch->name),
"Maxtor Onetouch %04x:%04x",
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct));
usb_make_path(udev, onetouch->phys, sizeof(onetouch->phys));
strlcat(onetouch->phys, "/input0", sizeof(onetouch->phys));
input_dev->name = onetouch->name;
input_dev->phys = onetouch->phys;
usb_to_input_id(udev, &input_dev->id);
input_dev->dev.parent = &udev->dev;
set_bit(EV_KEY, input_dev->evbit);
set_bit(ONETOUCH_BUTTON, input_dev->keybit);
clear_bit(0, input_dev->keybit);
input_set_drvdata(input_dev, onetouch);
input_dev->open = usb_onetouch_open;
input_dev->close = usb_onetouch_close;
usb_fill_int_urb(onetouch->irq, udev, pipe, onetouch->data, maxp,
usb_onetouch_irq, onetouch, endpoint->bInterval);
onetouch->irq->transfer_dma = onetouch->data_dma;
onetouch->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
ss->extra_destructor = onetouch_release_input;
ss->extra = onetouch;
#ifdef CONFIG_PM
ss->suspend_resume_hook = usb_onetouch_pm_hook;
#endif
error = input_register_device(onetouch->dev);
if (error)
goto fail3;
return 0;
fail3: usb_free_urb(onetouch->irq);
fail2: usb_free_coherent(udev, ONETOUCH_PKT_LEN,
onetouch->data, onetouch->data_dma);
fail1: kfree(onetouch);
input_free_device(input_dev);
return error;
}
static void onetouch_release_input(void *onetouch_)
{
struct usb_onetouch *onetouch = (struct usb_onetouch *) onetouch_;
if (onetouch) {
usb_kill_urb(onetouch->irq);
input_unregister_device(onetouch->dev);
usb_free_urb(onetouch->irq);
usb_free_coherent(onetouch->udev, ONETOUCH_PKT_LEN,
onetouch->data, onetouch->data_dma);
}
}
static struct scsi_host_template onetouch_host_template;
static int onetouch_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct us_data *us;
int result;
result = usb_stor_probe1(&us, intf, id,
(id - onetouch_usb_ids) + onetouch_unusual_dev_list,
&onetouch_host_template);
if (result)
return result;
/* Use default transport and protocol */
result = usb_stor_probe2(us);
return result;
}
static struct usb_driver onetouch_driver = {
.name = DRV_NAME,
.probe = onetouch_probe,
.disconnect = usb_stor_disconnect,
.suspend = usb_stor_suspend,
.resume = usb_stor_resume,
.reset_resume = usb_stor_reset_resume,
.pre_reset = usb_stor_pre_reset,
.post_reset = usb_stor_post_reset,
.id_table = onetouch_usb_ids,
.soft_unbind = 1,
.no_dynamic_id = 1,
};
module_usb_stor_driver(onetouch_driver, onetouch_host_template, DRV_NAME);
| linux-master | drivers/usb/storage/onetouch.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
* Debugging Functions Source Code File
*
* Current development and maintenance by:
* (c) 1999-2002 Matthew Dharm ([email protected])
*
* Developed with the assistance of:
* (c) 2002 Alan Stern <[email protected]>
*
* Initial work by:
* (c) 1999 Michael Gee ([email protected])
*
* This driver is based on the 'USB Mass Storage Class' document. This
* describes in detail the protocol used to communicate with such
* devices. Clearly, the designers had SCSI and ATAPI commands in
* mind when they created this document. The commands are all very
* similar to commands in the SCSI-II and ATAPI specifications.
*
* It is important to note that in a number of cases this class
* exhibits class-specific exemptions from the USB specification.
* Notably the usage of NAK, STALL and ACK differs from the norm, in
* that they are used to communicate wait, failed and OK on commands.
*
* Also, for certain devices, the interrupt endpoint is used to convey
* status of a command.
*/
#include <linux/device.h>
#include <linux/cdrom.h>
#include <linux/export.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include "usb.h"
#include "debug.h"
void usb_stor_show_command(const struct us_data *us, struct scsi_cmnd *srb)
{
char *what = NULL;
switch (srb->cmnd[0]) {
case TEST_UNIT_READY: what = "TEST_UNIT_READY"; break;
case REZERO_UNIT: what = "REZERO_UNIT"; break;
case REQUEST_SENSE: what = "REQUEST_SENSE"; break;
case FORMAT_UNIT: what = "FORMAT_UNIT"; break;
case READ_BLOCK_LIMITS: what = "READ_BLOCK_LIMITS"; break;
case REASSIGN_BLOCKS: what = "REASSIGN_BLOCKS"; break;
case READ_6: what = "READ_6"; break;
case WRITE_6: what = "WRITE_6"; break;
case SEEK_6: what = "SEEK_6"; break;
case READ_REVERSE: what = "READ_REVERSE"; break;
case WRITE_FILEMARKS: what = "WRITE_FILEMARKS"; break;
case SPACE: what = "SPACE"; break;
case INQUIRY: what = "INQUIRY"; break;
case RECOVER_BUFFERED_DATA: what = "RECOVER_BUFFERED_DATA"; break;
case MODE_SELECT: what = "MODE_SELECT"; break;
case RESERVE: what = "RESERVE"; break;
case RELEASE: what = "RELEASE"; break;
case COPY: what = "COPY"; break;
case ERASE: what = "ERASE"; break;
case MODE_SENSE: what = "MODE_SENSE"; break;
case START_STOP: what = "START_STOP"; break;
case RECEIVE_DIAGNOSTIC: what = "RECEIVE_DIAGNOSTIC"; break;
case SEND_DIAGNOSTIC: what = "SEND_DIAGNOSTIC"; break;
case ALLOW_MEDIUM_REMOVAL: what = "ALLOW_MEDIUM_REMOVAL"; break;
case SET_WINDOW: what = "SET_WINDOW"; break;
case READ_CAPACITY: what = "READ_CAPACITY"; break;
case READ_10: what = "READ_10"; break;
case WRITE_10: what = "WRITE_10"; break;
case SEEK_10: what = "SEEK_10"; break;
case WRITE_VERIFY: what = "WRITE_VERIFY"; break;
case VERIFY: what = "VERIFY"; break;
case SEARCH_HIGH: what = "SEARCH_HIGH"; break;
case SEARCH_EQUAL: what = "SEARCH_EQUAL"; break;
case SEARCH_LOW: what = "SEARCH_LOW"; break;
case SET_LIMITS: what = "SET_LIMITS"; break;
case READ_POSITION: what = "READ_POSITION"; break;
case SYNCHRONIZE_CACHE: what = "SYNCHRONIZE_CACHE"; break;
case LOCK_UNLOCK_CACHE: what = "LOCK_UNLOCK_CACHE"; break;
case READ_DEFECT_DATA: what = "READ_DEFECT_DATA"; break;
case MEDIUM_SCAN: what = "MEDIUM_SCAN"; break;
case COMPARE: what = "COMPARE"; break;
case COPY_VERIFY: what = "COPY_VERIFY"; break;
case WRITE_BUFFER: what = "WRITE_BUFFER"; break;
case READ_BUFFER: what = "READ_BUFFER"; break;
case UPDATE_BLOCK: what = "UPDATE_BLOCK"; break;
case READ_LONG: what = "READ_LONG"; break;
case WRITE_LONG: what = "WRITE_LONG"; break;
case CHANGE_DEFINITION: what = "CHANGE_DEFINITION"; break;
case WRITE_SAME: what = "WRITE_SAME"; break;
case GPCMD_READ_SUBCHANNEL: what = "READ SUBCHANNEL"; break;
case READ_TOC: what = "READ_TOC"; break;
case GPCMD_READ_HEADER: what = "READ HEADER"; break;
case GPCMD_PLAY_AUDIO_10: what = "PLAY AUDIO (10)"; break;
case GPCMD_PLAY_AUDIO_MSF: what = "PLAY AUDIO MSF"; break;
case GPCMD_GET_EVENT_STATUS_NOTIFICATION:
what = "GET EVENT/STATUS NOTIFICATION"; break;
case GPCMD_PAUSE_RESUME: what = "PAUSE/RESUME"; break;
case LOG_SELECT: what = "LOG_SELECT"; break;
case LOG_SENSE: what = "LOG_SENSE"; break;
case GPCMD_STOP_PLAY_SCAN: what = "STOP PLAY/SCAN"; break;
case GPCMD_READ_DISC_INFO: what = "READ DISC INFORMATION"; break;
case GPCMD_READ_TRACK_RZONE_INFO:
what = "READ TRACK INFORMATION"; break;
case GPCMD_RESERVE_RZONE_TRACK: what = "RESERVE TRACK"; break;
case GPCMD_SEND_OPC: what = "SEND OPC"; break;
case MODE_SELECT_10: what = "MODE_SELECT_10"; break;
case GPCMD_REPAIR_RZONE_TRACK: what = "REPAIR TRACK"; break;
case 0x59: what = "READ MASTER CUE"; break;
case MODE_SENSE_10: what = "MODE_SENSE_10"; break;
case GPCMD_CLOSE_TRACK: what = "CLOSE TRACK/SESSION"; break;
case 0x5C: what = "READ BUFFER CAPACITY"; break;
case 0x5D: what = "SEND CUE SHEET"; break;
case GPCMD_BLANK: what = "BLANK"; break;
case REPORT_LUNS: what = "REPORT LUNS"; break;
case MOVE_MEDIUM: what = "MOVE_MEDIUM or PLAY AUDIO (12)"; break;
case READ_12: what = "READ_12"; break;
case WRITE_12: what = "WRITE_12"; break;
case WRITE_VERIFY_12: what = "WRITE_VERIFY_12"; break;
case SEARCH_HIGH_12: what = "SEARCH_HIGH_12"; break;
case SEARCH_EQUAL_12: what = "SEARCH_EQUAL_12"; break;
case SEARCH_LOW_12: what = "SEARCH_LOW_12"; break;
case SEND_VOLUME_TAG: what = "SEND_VOLUME_TAG"; break;
case READ_ELEMENT_STATUS: what = "READ_ELEMENT_STATUS"; break;
case GPCMD_READ_CD_MSF: what = "READ CD MSF"; break;
case GPCMD_SCAN: what = "SCAN"; break;
case GPCMD_SET_SPEED: what = "SET CD SPEED"; break;
case GPCMD_MECHANISM_STATUS: what = "MECHANISM STATUS"; break;
case GPCMD_READ_CD: what = "READ CD"; break;
case 0xE1: what = "WRITE CONTINUE"; break;
case WRITE_LONG_2: what = "WRITE_LONG_2"; break;
default: what = "(unknown command)"; break;
}
usb_stor_dbg(us, "Command %s (%d bytes)\n", what, srb->cmd_len);
usb_stor_dbg(us, "bytes: %*ph\n", min_t(int, srb->cmd_len, 16),
(const unsigned char *)srb->cmnd);
}
void usb_stor_show_sense(const struct us_data *us,
unsigned char key,
unsigned char asc,
unsigned char ascq)
{
const char *what, *keystr, *fmt;
keystr = scsi_sense_key_string(key);
what = scsi_extd_sense_format(asc, ascq, &fmt);
if (keystr == NULL)
keystr = "(Unknown Key)";
if (what == NULL)
what = "(unknown ASC/ASCQ)";
if (fmt)
usb_stor_dbg(us, "%s: %s (%s%x)\n", keystr, what, fmt, ascq);
else
usb_stor_dbg(us, "%s: %s\n", keystr, what);
}
void usb_stor_dbg(const struct us_data *us, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
dev_vprintk_emit(LOGLEVEL_DEBUG, &us->pusb_dev->dev, fmt, args);
va_end(args);
}
EXPORT_SYMBOL_GPL(usb_stor_dbg);
| linux-master | drivers/usb/storage/debug.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage devices
* Usual Tables File for usb-storage and libusual
*
* Copyright (C) 2009 Alan Stern ([email protected])
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb_usual.h>
/*
* The table of devices
*/
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
#define COMPLIANT_DEV UNUSUAL_DEV
#define USUAL_DEV(useProto, useTrans) \
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans) }
/* Define the device is matched with Vendor ID and interface descriptors */
#define UNUSUAL_VENDOR_INTF(id_vendor, cl, sc, pr, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ \
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
| USB_DEVICE_ID_MATCH_VENDOR, \
.idVendor = (id_vendor), \
.bInterfaceClass = (cl), \
.bInterfaceSubClass = (sc), \
.bInterfaceProtocol = (pr), \
.driver_info = (flags) \
}
const struct usb_device_id usb_storage_usb_ids[] = {
# include "unusual_devs.h"
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, usb_storage_usb_ids);
#undef UNUSUAL_DEV
#undef COMPLIANT_DEV
#undef USUAL_DEV
#undef UNUSUAL_VENDOR_INTF
/*
* The table of devices to ignore
*/
struct ignore_entry {
u16 vid, pid, bcdmin, bcdmax;
};
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ \
.vid = id_vendor, \
.pid = id_product, \
.bcdmin = bcdDeviceMin, \
.bcdmax = bcdDeviceMax, \
}
static const struct ignore_entry ignore_ids[] = {
# include "unusual_alauda.h"
# include "unusual_cypress.h"
# include "unusual_datafab.h"
# include "unusual_ene_ub6250.h"
# include "unusual_freecom.h"
# include "unusual_isd200.h"
# include "unusual_jumpshot.h"
# include "unusual_karma.h"
# include "unusual_onetouch.h"
# include "unusual_realtek.h"
# include "unusual_sddr09.h"
# include "unusual_sddr55.h"
# include "unusual_usbat.h"
{ } /* Terminating entry */
};
#undef UNUSUAL_DEV
/* Return an error if a device is in the ignore_ids list */
int usb_usual_ignore_device(struct usb_interface *intf)
{
struct usb_device *udev;
unsigned vid, pid, bcd;
const struct ignore_entry *p;
udev = interface_to_usbdev(intf);
vid = le16_to_cpu(udev->descriptor.idVendor);
pid = le16_to_cpu(udev->descriptor.idProduct);
bcd = le16_to_cpu(udev->descriptor.bcdDevice);
for (p = ignore_ids; p->vid; ++p) {
if (p->vid == vid && p->pid == pid &&
p->bcdmin <= bcd && p->bcdmax >= bcd)
return -ENXIO;
}
return 0;
}
| linux-master | drivers/usb/storage/usual-tables.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
*
* Current development and maintenance by:
* (c) 1999-2002 Matthew Dharm ([email protected])
*
* Developed with the assistance of:
* (c) 2000 David L. Brown, Jr. ([email protected])
* (c) 2002 Alan Stern ([email protected])
*
* Initial work by:
* (c) 1999 Michael Gee ([email protected])
*
* This driver is based on the 'USB Mass Storage Class' document. This
* describes in detail the protocol used to communicate with such
* devices. Clearly, the designers had SCSI and ATAPI commands in
* mind when they created this document. The commands are all very
* similar to commands in the SCSI-II and ATAPI specifications.
*
* It is important to note that in a number of cases this class
* exhibits class-specific exemptions from the USB specification.
* Notably the usage of NAK, STALL and ACK differs from the norm, in
* that they are used to communicate wait, failed and OK on commands.
*
* Also, for certain devices, the interrupt endpoint is used to convey
* status of a command.
*/
#include <linux/highmem.h>
#include <linux/export.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include "usb.h"
#include "protocol.h"
#include "debug.h"
#include "scsiglue.h"
#include "transport.h"
/***********************************************************************
* Protocol routines
***********************************************************************/
void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us)
{
/*
* Pad the SCSI command with zeros out to 12 bytes. If the
* command already is 12 bytes or longer, leave it alone.
*
* NOTE: This only works because a scsi_cmnd struct field contains
* a unsigned char cmnd[16], so we know we have storage available
*/
for (; srb->cmd_len < 12; srb->cmd_len++)
srb->cmnd[srb->cmd_len] = 0;
/* send the command to the transport layer */
usb_stor_invoke_transport(srb, us);
}
void usb_stor_ufi_command(struct scsi_cmnd *srb, struct us_data *us)
{
/*
* fix some commands -- this is a form of mode translation
* UFI devices only accept 12 byte long commands
*
* NOTE: This only works because a scsi_cmnd struct field contains
* a unsigned char cmnd[16], so we know we have storage available
*/
/* Pad the ATAPI command with zeros */
for (; srb->cmd_len < 12; srb->cmd_len++)
srb->cmnd[srb->cmd_len] = 0;
/* set command length to 12 bytes (this affects the transport layer) */
srb->cmd_len = 12;
/* XXX We should be constantly re-evaluating the need for these */
/* determine the correct data length for these commands */
switch (srb->cmnd[0]) {
/* for INQUIRY, UFI devices only ever return 36 bytes */
case INQUIRY:
srb->cmnd[4] = 36;
break;
/* again, for MODE_SENSE_10, we get the minimum (8) */
case MODE_SENSE_10:
srb->cmnd[7] = 0;
srb->cmnd[8] = 8;
break;
/* for REQUEST_SENSE, UFI devices only ever return 18 bytes */
case REQUEST_SENSE:
srb->cmnd[4] = 18;
break;
} /* end switch on cmnd[0] */
/* send the command to the transport layer */
usb_stor_invoke_transport(srb, us);
}
void usb_stor_transparent_scsi_command(struct scsi_cmnd *srb,
struct us_data *us)
{
/* send the command to the transport layer */
usb_stor_invoke_transport(srb, us);
}
EXPORT_SYMBOL_GPL(usb_stor_transparent_scsi_command);
/***********************************************************************
* Scatter-gather transfer buffer access routines
***********************************************************************/
/*
* Copy a buffer of length buflen to/from the srb's transfer buffer.
* Update the **sgptr and *offset variables so that the next copy will
* pick up from where this one left off.
*/
unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
unsigned int buflen, struct scsi_cmnd *srb, struct scatterlist **sgptr,
unsigned int *offset, enum xfer_buf_dir dir)
{
unsigned int cnt = 0;
struct scatterlist *sg = *sgptr;
struct sg_mapping_iter miter;
unsigned int nents = scsi_sg_count(srb);
if (sg)
nents = sg_nents(sg);
else
sg = scsi_sglist(srb);
sg_miter_start(&miter, sg, nents, dir == FROM_XFER_BUF ?
SG_MITER_FROM_SG: SG_MITER_TO_SG);
if (!sg_miter_skip(&miter, *offset))
return cnt;
while (sg_miter_next(&miter) && cnt < buflen) {
unsigned int len = min_t(unsigned int, miter.length,
buflen - cnt);
if (dir == FROM_XFER_BUF)
memcpy(buffer + cnt, miter.addr, len);
else
memcpy(miter.addr, buffer + cnt, len);
if (*offset + len < miter.piter.sg->length) {
*offset += len;
*sgptr = miter.piter.sg;
} else {
*offset = 0;
*sgptr = sg_next(miter.piter.sg);
}
cnt += len;
}
sg_miter_stop(&miter);
return cnt;
}
EXPORT_SYMBOL_GPL(usb_stor_access_xfer_buf);
/*
* Store the contents of buffer into srb's transfer buffer and set the
* SCSI residue.
*/
void usb_stor_set_xfer_buf(unsigned char *buffer,
unsigned int buflen, struct scsi_cmnd *srb)
{
unsigned int offset = 0;
struct scatterlist *sg = NULL;
buflen = min(buflen, scsi_bufflen(srb));
buflen = usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, &offset,
TO_XFER_BUF);
if (buflen < scsi_bufflen(srb))
scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
}
EXPORT_SYMBOL_GPL(usb_stor_set_xfer_buf);
| linux-master | drivers/usb/storage/protocol.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Lexar "Jumpshot" Compact Flash reader
*
* jumpshot driver v0.1:
*
* First release
*
* Current development and maintenance by:
* (c) 2000 Jimmie Mayfield ([email protected])
*
* Many thanks to Robert Baruch for the SanDisk SmartMedia reader driver
* which I used as a template for this driver.
*
* Some bugfixes and scatter-gather code by Gregory P. Smith
* ([email protected])
*
* Fix for media change by Joerg Schneider ([email protected])
*
* Developed with the assistance of:
*
* (C) 2002 Alan Stern <[email protected]>
*/
/*
* This driver attempts to support the Lexar Jumpshot USB CompactFlash
* reader. Like many other USB CompactFlash readers, the Jumpshot contains
* a USB-to-ATA chip.
*
* This driver supports reading and writing. If you're truly paranoid,
* however, you can force the driver into a write-protected state by setting
* the WP enable bits in jumpshot_handle_mode_sense. See the comments
* in that routine.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include "usb.h"
#include "transport.h"
#include "protocol.h"
#include "debug.h"
#include "scsiglue.h"
#define DRV_NAME "ums-jumpshot"
MODULE_DESCRIPTION("Driver for Lexar \"Jumpshot\" Compact Flash reader");
MODULE_AUTHOR("Jimmie Mayfield <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(USB_STORAGE);
/*
* The table of devices
*/
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
static struct usb_device_id jumpshot_usb_ids[] = {
# include "unusual_jumpshot.h"
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, jumpshot_usb_ids);
#undef UNUSUAL_DEV
/*
* The flags table
*/
#define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \
vendor_name, product_name, use_protocol, use_transport, \
init_function, Flags) \
{ \
.vendorName = vendor_name, \
.productName = product_name, \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
.initFunction = init_function, \
}
static struct us_unusual_dev jumpshot_unusual_dev_list[] = {
# include "unusual_jumpshot.h"
{ } /* Terminating entry */
};
#undef UNUSUAL_DEV
struct jumpshot_info {
unsigned long sectors; /* total sector count */
unsigned long ssize; /* sector size in bytes */
/* the following aren't used yet */
unsigned char sense_key;
unsigned long sense_asc; /* additional sense code */
unsigned long sense_ascq; /* additional sense code qualifier */
};
static inline int jumpshot_bulk_read(struct us_data *us,
unsigned char *data,
unsigned int len)
{
if (len == 0)
return USB_STOR_XFER_GOOD;
usb_stor_dbg(us, "len = %d\n", len);
return usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
data, len, NULL);
}
static inline int jumpshot_bulk_write(struct us_data *us,
unsigned char *data,
unsigned int len)
{
if (len == 0)
return USB_STOR_XFER_GOOD;
usb_stor_dbg(us, "len = %d\n", len);
return usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
data, len, NULL);
}
static int jumpshot_get_status(struct us_data *us)
{
int rc;
if (!us)
return USB_STOR_TRANSPORT_ERROR;
// send the setup
rc = usb_stor_ctrl_transfer(us, us->recv_ctrl_pipe,
0, 0xA0, 0, 7, us->iobuf, 1);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (us->iobuf[0] != 0x50) {
usb_stor_dbg(us, "0x%2x\n", us->iobuf[0]);
return USB_STOR_TRANSPORT_ERROR;
}
return USB_STOR_TRANSPORT_GOOD;
}
static int jumpshot_read_data(struct us_data *us,
struct jumpshot_info *info,
u32 sector,
u32 sectors)
{
unsigned char *command = us->iobuf;
unsigned char *buffer;
unsigned char thistime;
unsigned int totallen, alloclen;
int len, result;
unsigned int sg_offset = 0;
struct scatterlist *sg = NULL;
// we're working in LBA mode. according to the ATA spec,
// we can support up to 28-bit addressing. I don't know if Jumpshot
// supports beyond 24-bit addressing. It's kind of hard to test
// since it requires > 8GB CF card.
if (sector > 0x0FFFFFFF)
return USB_STOR_TRANSPORT_ERROR;
totallen = sectors * info->ssize;
// Since we don't read more than 64 KB at a time, we have to create
// a bounce buffer and move the data a piece at a time between the
// bounce buffer and the actual transfer buffer.
alloclen = min(totallen, 65536u);
buffer = kmalloc(alloclen, GFP_NOIO);
if (buffer == NULL)
return USB_STOR_TRANSPORT_ERROR;
do {
// loop, never allocate or transfer more than 64k at once
// (min(128k, 255*info->ssize) is the real limit)
len = min(totallen, alloclen);
thistime = (len / info->ssize) & 0xff;
command[0] = 0;
command[1] = thistime;
command[2] = sector & 0xFF;
command[3] = (sector >> 8) & 0xFF;
command[4] = (sector >> 16) & 0xFF;
command[5] = 0xE0 | ((sector >> 24) & 0x0F);
command[6] = 0x20;
// send the setup + command
result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe,
0, 0x20, 0, 1, command, 7);
if (result != USB_STOR_XFER_GOOD)
goto leave;
// read the result
result = jumpshot_bulk_read(us, buffer, len);
if (result != USB_STOR_XFER_GOOD)
goto leave;
usb_stor_dbg(us, "%d bytes\n", len);
// Store the data in the transfer buffer
usb_stor_access_xfer_buf(buffer, len, us->srb,
&sg, &sg_offset, TO_XFER_BUF);
sector += thistime;
totallen -= len;
} while (totallen > 0);
kfree(buffer);
return USB_STOR_TRANSPORT_GOOD;
leave:
kfree(buffer);
return USB_STOR_TRANSPORT_ERROR;
}
static int jumpshot_write_data(struct us_data *us,
struct jumpshot_info *info,
u32 sector,
u32 sectors)
{
unsigned char *command = us->iobuf;
unsigned char *buffer;
unsigned char thistime;
unsigned int totallen, alloclen;
int len, result, waitcount;
unsigned int sg_offset = 0;
struct scatterlist *sg = NULL;
// we're working in LBA mode. according to the ATA spec,
// we can support up to 28-bit addressing. I don't know if Jumpshot
// supports beyond 24-bit addressing. It's kind of hard to test
// since it requires > 8GB CF card.
//
if (sector > 0x0FFFFFFF)
return USB_STOR_TRANSPORT_ERROR;
totallen = sectors * info->ssize;
// Since we don't write more than 64 KB at a time, we have to create
// a bounce buffer and move the data a piece at a time between the
// bounce buffer and the actual transfer buffer.
alloclen = min(totallen, 65536u);
buffer = kmalloc(alloclen, GFP_NOIO);
if (buffer == NULL)
return USB_STOR_TRANSPORT_ERROR;
do {
// loop, never allocate or transfer more than 64k at once
// (min(128k, 255*info->ssize) is the real limit)
len = min(totallen, alloclen);
thistime = (len / info->ssize) & 0xff;
// Get the data from the transfer buffer
usb_stor_access_xfer_buf(buffer, len, us->srb,
&sg, &sg_offset, FROM_XFER_BUF);
command[0] = 0;
command[1] = thistime;
command[2] = sector & 0xFF;
command[3] = (sector >> 8) & 0xFF;
command[4] = (sector >> 16) & 0xFF;
command[5] = 0xE0 | ((sector >> 24) & 0x0F);
command[6] = 0x30;
// send the setup + command
result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe,
0, 0x20, 0, 1, command, 7);
if (result != USB_STOR_XFER_GOOD)
goto leave;
// send the data
result = jumpshot_bulk_write(us, buffer, len);
if (result != USB_STOR_XFER_GOOD)
goto leave;
// read the result. apparently the bulk write can complete
// before the jumpshot drive is finished writing. so we loop
// here until we get a good return code
waitcount = 0;
do {
result = jumpshot_get_status(us);
if (result != USB_STOR_TRANSPORT_GOOD) {
// I have not experimented to find the smallest value.
//
msleep(50);
}
} while ((result != USB_STOR_TRANSPORT_GOOD) && (waitcount < 10));
if (result != USB_STOR_TRANSPORT_GOOD)
usb_stor_dbg(us, "Gah! Waitcount = 10. Bad write!?\n");
sector += thistime;
totallen -= len;
} while (totallen > 0);
kfree(buffer);
return result;
leave:
kfree(buffer);
return USB_STOR_TRANSPORT_ERROR;
}
static int jumpshot_id_device(struct us_data *us,
struct jumpshot_info *info)
{
unsigned char *command = us->iobuf;
unsigned char *reply;
int rc;
if (!info)
return USB_STOR_TRANSPORT_ERROR;
command[0] = 0xE0;
command[1] = 0xEC;
reply = kmalloc(512, GFP_NOIO);
if (!reply)
return USB_STOR_TRANSPORT_ERROR;
// send the setup
rc = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe,
0, 0x20, 0, 6, command, 2);
if (rc != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "Gah! send_control for read_capacity failed\n");
rc = USB_STOR_TRANSPORT_ERROR;
goto leave;
}
// read the reply
rc = jumpshot_bulk_read(us, reply, 512);
if (rc != USB_STOR_XFER_GOOD) {
rc = USB_STOR_TRANSPORT_ERROR;
goto leave;
}
info->sectors = ((u32)(reply[117]) << 24) |
((u32)(reply[116]) << 16) |
((u32)(reply[115]) << 8) |
((u32)(reply[114]) );
rc = USB_STOR_TRANSPORT_GOOD;
leave:
kfree(reply);
return rc;
}
static int jumpshot_handle_mode_sense(struct us_data *us,
struct scsi_cmnd * srb,
int sense_6)
{
static unsigned char rw_err_page[12] = {
0x1, 0xA, 0x21, 1, 0, 0, 0, 0, 1, 0, 0, 0
};
static unsigned char cache_page[12] = {
0x8, 0xA, 0x1, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
static unsigned char rbac_page[12] = {
0x1B, 0xA, 0, 0x81, 0, 0, 0, 0, 0, 0, 0, 0
};
static unsigned char timer_page[8] = {
0x1C, 0x6, 0, 0, 0, 0
};
unsigned char pc, page_code;
unsigned int i = 0;
struct jumpshot_info *info = (struct jumpshot_info *) (us->extra);
unsigned char *ptr = us->iobuf;
pc = srb->cmnd[2] >> 6;
page_code = srb->cmnd[2] & 0x3F;
switch (pc) {
case 0x0:
usb_stor_dbg(us, "Current values\n");
break;
case 0x1:
usb_stor_dbg(us, "Changeable values\n");
break;
case 0x2:
usb_stor_dbg(us, "Default values\n");
break;
case 0x3:
usb_stor_dbg(us, "Saves values\n");
break;
}
memset(ptr, 0, 8);
if (sense_6) {
ptr[2] = 0x00; // WP enable: 0x80
i = 4;
} else {
ptr[3] = 0x00; // WP enable: 0x80
i = 8;
}
switch (page_code) {
case 0x0:
// vendor-specific mode
info->sense_key = 0x05;
info->sense_asc = 0x24;
info->sense_ascq = 0x00;
return USB_STOR_TRANSPORT_FAILED;
case 0x1:
memcpy(ptr + i, rw_err_page, sizeof(rw_err_page));
i += sizeof(rw_err_page);
break;
case 0x8:
memcpy(ptr + i, cache_page, sizeof(cache_page));
i += sizeof(cache_page);
break;
case 0x1B:
memcpy(ptr + i, rbac_page, sizeof(rbac_page));
i += sizeof(rbac_page);
break;
case 0x1C:
memcpy(ptr + i, timer_page, sizeof(timer_page));
i += sizeof(timer_page);
break;
case 0x3F:
memcpy(ptr + i, timer_page, sizeof(timer_page));
i += sizeof(timer_page);
memcpy(ptr + i, rbac_page, sizeof(rbac_page));
i += sizeof(rbac_page);
memcpy(ptr + i, cache_page, sizeof(cache_page));
i += sizeof(cache_page);
memcpy(ptr + i, rw_err_page, sizeof(rw_err_page));
i += sizeof(rw_err_page);
break;
}
if (sense_6)
ptr[0] = i - 1;
else
((__be16 *) ptr)[0] = cpu_to_be16(i - 2);
usb_stor_set_xfer_buf(ptr, i, srb);
return USB_STOR_TRANSPORT_GOOD;
}
static void jumpshot_info_destructor(void *extra)
{
// this routine is a placeholder...
// currently, we don't allocate any extra blocks so we're okay
}
// Transport for the Lexar 'Jumpshot'
//
static int jumpshot_transport(struct scsi_cmnd *srb, struct us_data *us)
{
struct jumpshot_info *info;
int rc;
unsigned long block, blocks;
unsigned char *ptr = us->iobuf;
static unsigned char inquiry_response[8] = {
0x00, 0x80, 0x00, 0x01, 0x1F, 0x00, 0x00, 0x00
};
if (!us->extra) {
us->extra = kzalloc(sizeof(struct jumpshot_info), GFP_NOIO);
if (!us->extra)
return USB_STOR_TRANSPORT_ERROR;
us->extra_destructor = jumpshot_info_destructor;
}
info = (struct jumpshot_info *) (us->extra);
if (srb->cmnd[0] == INQUIRY) {
usb_stor_dbg(us, "INQUIRY - Returning bogus response\n");
memcpy(ptr, inquiry_response, sizeof(inquiry_response));
fill_inquiry_response(us, ptr, 36);
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == READ_CAPACITY) {
info->ssize = 0x200; // hard coded 512 byte sectors as per ATA spec
rc = jumpshot_get_status(us);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
rc = jumpshot_id_device(us, info);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
usb_stor_dbg(us, "READ_CAPACITY: %ld sectors, %ld bytes per sector\n",
info->sectors, info->ssize);
// build the reply
//
((__be32 *) ptr)[0] = cpu_to_be32(info->sectors - 1);
((__be32 *) ptr)[1] = cpu_to_be32(info->ssize);
usb_stor_set_xfer_buf(ptr, 8, srb);
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == MODE_SELECT_10) {
usb_stor_dbg(us, "Gah! MODE_SELECT_10\n");
return USB_STOR_TRANSPORT_ERROR;
}
if (srb->cmnd[0] == READ_10) {
block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5]));
blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8]));
usb_stor_dbg(us, "READ_10: read block 0x%04lx count %ld\n",
block, blocks);
return jumpshot_read_data(us, info, block, blocks);
}
if (srb->cmnd[0] == READ_12) {
// I don't think we'll ever see a READ_12 but support it anyway...
//
block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5]));
blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) |
((u32)(srb->cmnd[8]) << 8) | ((u32)(srb->cmnd[9]));
usb_stor_dbg(us, "READ_12: read block 0x%04lx count %ld\n",
block, blocks);
return jumpshot_read_data(us, info, block, blocks);
}
if (srb->cmnd[0] == WRITE_10) {
block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5]));
blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8]));
usb_stor_dbg(us, "WRITE_10: write block 0x%04lx count %ld\n",
block, blocks);
return jumpshot_write_data(us, info, block, blocks);
}
if (srb->cmnd[0] == WRITE_12) {
// I don't think we'll ever see a WRITE_12 but support it anyway...
//
block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5]));
blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) |
((u32)(srb->cmnd[8]) << 8) | ((u32)(srb->cmnd[9]));
usb_stor_dbg(us, "WRITE_12: write block 0x%04lx count %ld\n",
block, blocks);
return jumpshot_write_data(us, info, block, blocks);
}
if (srb->cmnd[0] == TEST_UNIT_READY) {
usb_stor_dbg(us, "TEST_UNIT_READY\n");
return jumpshot_get_status(us);
}
if (srb->cmnd[0] == REQUEST_SENSE) {
usb_stor_dbg(us, "REQUEST_SENSE\n");
memset(ptr, 0, 18);
ptr[0] = 0xF0;
ptr[2] = info->sense_key;
ptr[7] = 11;
ptr[12] = info->sense_asc;
ptr[13] = info->sense_ascq;
usb_stor_set_xfer_buf(ptr, 18, srb);
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == MODE_SENSE) {
usb_stor_dbg(us, "MODE_SENSE_6 detected\n");
return jumpshot_handle_mode_sense(us, srb, 1);
}
if (srb->cmnd[0] == MODE_SENSE_10) {
usb_stor_dbg(us, "MODE_SENSE_10 detected\n");
return jumpshot_handle_mode_sense(us, srb, 0);
}
if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) {
/*
* sure. whatever. not like we can stop the user from popping
* the media out of the device (no locking doors, etc)
*/
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == START_STOP) {
/*
* this is used by sd.c'check_scsidisk_media_change to detect
* media change
*/
usb_stor_dbg(us, "START_STOP\n");
/*
* the first jumpshot_id_device after a media change returns
* an error (determined experimentally)
*/
rc = jumpshot_id_device(us, info);
if (rc == USB_STOR_TRANSPORT_GOOD) {
info->sense_key = NO_SENSE;
srb->result = SUCCESS;
} else {
info->sense_key = UNIT_ATTENTION;
srb->result = SAM_STAT_CHECK_CONDITION;
}
return rc;
}
usb_stor_dbg(us, "Gah! Unknown command: %d (0x%x)\n",
srb->cmnd[0], srb->cmnd[0]);
info->sense_key = 0x05;
info->sense_asc = 0x20;
info->sense_ascq = 0x00;
return USB_STOR_TRANSPORT_FAILED;
}
static struct scsi_host_template jumpshot_host_template;
static int jumpshot_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct us_data *us;
int result;
result = usb_stor_probe1(&us, intf, id,
(id - jumpshot_usb_ids) + jumpshot_unusual_dev_list,
&jumpshot_host_template);
if (result)
return result;
us->transport_name = "Lexar Jumpshot Control/Bulk";
us->transport = jumpshot_transport;
us->transport_reset = usb_stor_Bulk_reset;
us->max_lun = 1;
result = usb_stor_probe2(us);
return result;
}
static struct usb_driver jumpshot_driver = {
.name = DRV_NAME,
.probe = jumpshot_probe,
.disconnect = usb_stor_disconnect,
.suspend = usb_stor_suspend,
.resume = usb_stor_resume,
.reset_resume = usb_stor_reset_resume,
.pre_reset = usb_stor_pre_reset,
.post_reset = usb_stor_post_reset,
.id_table = jumpshot_usb_ids,
.soft_unbind = 1,
.no_dynamic_id = 1,
};
module_usb_stor_driver(jumpshot_driver, jumpshot_host_template, DRV_NAME);
| linux-master | drivers/usb/storage/jumpshot.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for SanDisk SDDR-55 SmartMedia reader
*
* SDDR55 driver v0.1:
*
* First release
*
* Current development and maintenance by:
* (c) 2002 Simon Munton
*/
#include <linux/jiffies.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include "usb.h"
#include "transport.h"
#include "protocol.h"
#include "debug.h"
#include "scsiglue.h"
#define DRV_NAME "ums-sddr55"
MODULE_DESCRIPTION("Driver for SanDisk SDDR-55 SmartMedia reader");
MODULE_AUTHOR("Simon Munton");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(USB_STORAGE);
/*
* The table of devices
*/
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
static struct usb_device_id sddr55_usb_ids[] = {
# include "unusual_sddr55.h"
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, sddr55_usb_ids);
#undef UNUSUAL_DEV
/*
* The flags table
*/
#define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \
vendor_name, product_name, use_protocol, use_transport, \
init_function, Flags) \
{ \
.vendorName = vendor_name, \
.productName = product_name, \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
.initFunction = init_function, \
}
static struct us_unusual_dev sddr55_unusual_dev_list[] = {
# include "unusual_sddr55.h"
{ } /* Terminating entry */
};
#undef UNUSUAL_DEV
#define short_pack(lsb,msb) ( ((u16)(lsb)) | ( ((u16)(msb))<<8 ) )
#define LSB_of(s) ((s)&0xFF)
#define MSB_of(s) ((s)>>8)
#define PAGESIZE 512
#define set_sense_info(sk, asc, ascq) \
do { \
info->sense_data[2] = sk; \
info->sense_data[12] = asc; \
info->sense_data[13] = ascq; \
} while (0)
struct sddr55_card_info {
unsigned long capacity; /* Size of card in bytes */
int max_log_blks; /* maximum number of logical blocks */
int pageshift; /* log2 of pagesize */
int smallpageshift; /* 1 if pagesize == 256 */
int blocksize; /* Size of block in pages */
int blockshift; /* log2 of blocksize */
int blockmask; /* 2^blockshift - 1 */
int read_only; /* non zero if card is write protected */
int force_read_only; /* non zero if we find a map error*/
int *lba_to_pba; /* logical to physical map */
int *pba_to_lba; /* physical to logical map */
int fatal_error; /* set if we detect something nasty */
unsigned long last_access; /* number of jiffies since we last talked to device */
unsigned char sense_data[18];
};
#define NOT_ALLOCATED 0xffffffff
#define BAD_BLOCK 0xffff
#define CIS_BLOCK 0x400
#define UNUSED_BLOCK 0x3ff
static int
sddr55_bulk_transport(struct us_data *us, int direction,
unsigned char *data, unsigned int len) {
struct sddr55_card_info *info = (struct sddr55_card_info *)us->extra;
unsigned int pipe = (direction == DMA_FROM_DEVICE) ?
us->recv_bulk_pipe : us->send_bulk_pipe;
if (!len)
return USB_STOR_XFER_GOOD;
info->last_access = jiffies;
return usb_stor_bulk_transfer_buf(us, pipe, data, len, NULL);
}
/*
* check if card inserted, if there is, update read_only status
* return non zero if no card
*/
static int sddr55_status(struct us_data *us)
{
int result;
unsigned char *command = us->iobuf;
unsigned char *status = us->iobuf;
struct sddr55_card_info *info = (struct sddr55_card_info *)us->extra;
/* send command */
memset(command, 0, 8);
command[5] = 0xB0;
command[7] = 0x80;
result = sddr55_bulk_transport(us,
DMA_TO_DEVICE, command, 8);
usb_stor_dbg(us, "Result for send_command in status %d\n", result);
if (result != USB_STOR_XFER_GOOD) {
set_sense_info (4, 0, 0); /* hardware error */
return USB_STOR_TRANSPORT_ERROR;
}
result = sddr55_bulk_transport(us,
DMA_FROM_DEVICE, status, 4);
/* expect to get short transfer if no card fitted */
if (result == USB_STOR_XFER_SHORT || result == USB_STOR_XFER_STALLED) {
/* had a short transfer, no card inserted, free map memory */
kfree(info->lba_to_pba);
kfree(info->pba_to_lba);
info->lba_to_pba = NULL;
info->pba_to_lba = NULL;
info->fatal_error = 0;
info->force_read_only = 0;
set_sense_info (2, 0x3a, 0); /* not ready, medium not present */
return USB_STOR_TRANSPORT_FAILED;
}
if (result != USB_STOR_XFER_GOOD) {
set_sense_info (4, 0, 0); /* hardware error */
return USB_STOR_TRANSPORT_FAILED;
}
/* check write protect status */
info->read_only = (status[0] & 0x20);
/* now read status */
result = sddr55_bulk_transport(us,
DMA_FROM_DEVICE, status, 2);
if (result != USB_STOR_XFER_GOOD) {
set_sense_info (4, 0, 0); /* hardware error */
}
return (result == USB_STOR_XFER_GOOD ?
USB_STOR_TRANSPORT_GOOD : USB_STOR_TRANSPORT_FAILED);
}
static int sddr55_read_data(struct us_data *us,
unsigned int lba,
unsigned int page,
unsigned short sectors) {
int result = USB_STOR_TRANSPORT_GOOD;
unsigned char *command = us->iobuf;
unsigned char *status = us->iobuf;
struct sddr55_card_info *info = (struct sddr55_card_info *)us->extra;
unsigned char *buffer;
unsigned int pba;
unsigned long address;
unsigned short pages;
unsigned int len, offset;
struct scatterlist *sg;
// Since we only read in one block at a time, we have to create
// a bounce buffer and move the data a piece at a time between the
// bounce buffer and the actual transfer buffer.
len = min((unsigned int) sectors, (unsigned int) info->blocksize >>
info->smallpageshift) * PAGESIZE;
buffer = kmalloc(len, GFP_NOIO);
if (buffer == NULL)
return USB_STOR_TRANSPORT_ERROR; /* out of memory */
offset = 0;
sg = NULL;
while (sectors>0) {
/* have we got to end? */
if (lba >= info->max_log_blks)
break;
pba = info->lba_to_pba[lba];
// Read as many sectors as possible in this block
pages = min((unsigned int) sectors << info->smallpageshift,
info->blocksize - page);
len = pages << info->pageshift;
usb_stor_dbg(us, "Read %02X pages, from PBA %04X (LBA %04X) page %02X\n",
pages, pba, lba, page);
if (pba == NOT_ALLOCATED) {
/* no pba for this lba, fill with zeroes */
memset (buffer, 0, len);
} else {
address = (pba << info->blockshift) + page;
command[0] = 0;
command[1] = LSB_of(address>>16);
command[2] = LSB_of(address>>8);
command[3] = LSB_of(address);
command[4] = 0;
command[5] = 0xB0;
command[6] = LSB_of(pages << (1 - info->smallpageshift));
command[7] = 0x85;
/* send command */
result = sddr55_bulk_transport(us,
DMA_TO_DEVICE, command, 8);
usb_stor_dbg(us, "Result for send_command in read_data %d\n",
result);
if (result != USB_STOR_XFER_GOOD) {
result = USB_STOR_TRANSPORT_ERROR;
goto leave;
}
/* read data */
result = sddr55_bulk_transport(us,
DMA_FROM_DEVICE, buffer, len);
if (result != USB_STOR_XFER_GOOD) {
result = USB_STOR_TRANSPORT_ERROR;
goto leave;
}
/* now read status */
result = sddr55_bulk_transport(us,
DMA_FROM_DEVICE, status, 2);
if (result != USB_STOR_XFER_GOOD) {
result = USB_STOR_TRANSPORT_ERROR;
goto leave;
}
/* check status for error */
if (status[0] == 0xff && status[1] == 0x4) {
set_sense_info (3, 0x11, 0);
result = USB_STOR_TRANSPORT_FAILED;
goto leave;
}
}
// Store the data in the transfer buffer
usb_stor_access_xfer_buf(buffer, len, us->srb,
&sg, &offset, TO_XFER_BUF);
page = 0;
lba++;
sectors -= pages >> info->smallpageshift;
}
result = USB_STOR_TRANSPORT_GOOD;
leave:
kfree(buffer);
return result;
}
static int sddr55_write_data(struct us_data *us,
unsigned int lba,
unsigned int page,
unsigned short sectors) {
int result = USB_STOR_TRANSPORT_GOOD;
unsigned char *command = us->iobuf;
unsigned char *status = us->iobuf;
struct sddr55_card_info *info = (struct sddr55_card_info *)us->extra;
unsigned char *buffer;
unsigned int pba;
unsigned int new_pba;
unsigned long address;
unsigned short pages;
int i;
unsigned int len, offset;
struct scatterlist *sg;
/* check if we are allowed to write */
if (info->read_only || info->force_read_only) {
set_sense_info (7, 0x27, 0); /* read only */
return USB_STOR_TRANSPORT_FAILED;
}
// Since we only write one block at a time, we have to create
// a bounce buffer and move the data a piece at a time between the
// bounce buffer and the actual transfer buffer.
len = min((unsigned int) sectors, (unsigned int) info->blocksize >>
info->smallpageshift) * PAGESIZE;
buffer = kmalloc(len, GFP_NOIO);
if (buffer == NULL)
return USB_STOR_TRANSPORT_ERROR;
offset = 0;
sg = NULL;
while (sectors > 0) {
/* have we got to end? */
if (lba >= info->max_log_blks)
break;
pba = info->lba_to_pba[lba];
// Write as many sectors as possible in this block
pages = min((unsigned int) sectors << info->smallpageshift,
info->blocksize - page);
len = pages << info->pageshift;
// Get the data from the transfer buffer
usb_stor_access_xfer_buf(buffer, len, us->srb,
&sg, &offset, FROM_XFER_BUF);
usb_stor_dbg(us, "Write %02X pages, to PBA %04X (LBA %04X) page %02X\n",
pages, pba, lba, page);
command[4] = 0;
if (pba == NOT_ALLOCATED) {
/* no pba allocated for this lba, find a free pba to use */
int max_pba = (info->max_log_blks / 250 ) * 256;
int found_count = 0;
int found_pba = -1;
/* set pba to first block in zone lba is in */
pba = (lba / 1000) * 1024;
usb_stor_dbg(us, "No PBA for LBA %04X\n", lba);
if (max_pba > 1024)
max_pba = 1024;
/*
* Scan through the map looking for an unused block
* leave 16 unused blocks at start (or as many as
* possible) since the sddr55 seems to reuse a used
* block when it shouldn't if we don't leave space.
*/
for (i = 0; i < max_pba; i++, pba++) {
if (info->pba_to_lba[pba] == UNUSED_BLOCK) {
found_pba = pba;
if (found_count++ > 16)
break;
}
}
pba = found_pba;
if (pba == -1) {
/* oh dear */
usb_stor_dbg(us, "Couldn't find unallocated block\n");
set_sense_info (3, 0x31, 0); /* medium error */
result = USB_STOR_TRANSPORT_FAILED;
goto leave;
}
usb_stor_dbg(us, "Allocating PBA %04X for LBA %04X\n",
pba, lba);
/* set writing to unallocated block flag */
command[4] = 0x40;
}
address = (pba << info->blockshift) + page;
command[1] = LSB_of(address>>16);
command[2] = LSB_of(address>>8);
command[3] = LSB_of(address);
/* set the lba into the command, modulo 1000 */
command[0] = LSB_of(lba % 1000);
command[6] = MSB_of(lba % 1000);
command[4] |= LSB_of(pages >> info->smallpageshift);
command[5] = 0xB0;
command[7] = 0x86;
/* send command */
result = sddr55_bulk_transport(us,
DMA_TO_DEVICE, command, 8);
if (result != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "Result for send_command in write_data %d\n",
result);
/* set_sense_info is superfluous here? */
set_sense_info (3, 0x3, 0);/* peripheral write error */
result = USB_STOR_TRANSPORT_FAILED;
goto leave;
}
/* send the data */
result = sddr55_bulk_transport(us,
DMA_TO_DEVICE, buffer, len);
if (result != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "Result for send_data in write_data %d\n",
result);
/* set_sense_info is superfluous here? */
set_sense_info (3, 0x3, 0);/* peripheral write error */
result = USB_STOR_TRANSPORT_FAILED;
goto leave;
}
/* now read status */
result = sddr55_bulk_transport(us, DMA_FROM_DEVICE, status, 6);
if (result != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "Result for get_status in write_data %d\n",
result);
/* set_sense_info is superfluous here? */
set_sense_info (3, 0x3, 0);/* peripheral write error */
result = USB_STOR_TRANSPORT_FAILED;
goto leave;
}
new_pba = (status[3] + (status[4] << 8) + (status[5] << 16))
>> info->blockshift;
/* check status for error */
if (status[0] == 0xff && status[1] == 0x4) {
info->pba_to_lba[new_pba] = BAD_BLOCK;
set_sense_info (3, 0x0c, 0);
result = USB_STOR_TRANSPORT_FAILED;
goto leave;
}
usb_stor_dbg(us, "Updating maps for LBA %04X: old PBA %04X, new PBA %04X\n",
lba, pba, new_pba);
/* update the lba<->pba maps, note new_pba might be the same as pba */
info->lba_to_pba[lba] = new_pba;
info->pba_to_lba[pba] = UNUSED_BLOCK;
/* check that new_pba wasn't already being used */
if (info->pba_to_lba[new_pba] != UNUSED_BLOCK) {
printk(KERN_ERR "sddr55 error: new PBA %04X already in use for LBA %04X\n",
new_pba, info->pba_to_lba[new_pba]);
info->fatal_error = 1;
set_sense_info (3, 0x31, 0);
result = USB_STOR_TRANSPORT_FAILED;
goto leave;
}
/* update the pba<->lba maps for new_pba */
info->pba_to_lba[new_pba] = lba % 1000;
page = 0;
lba++;
sectors -= pages >> info->smallpageshift;
}
result = USB_STOR_TRANSPORT_GOOD;
leave:
kfree(buffer);
return result;
}
static int sddr55_read_deviceID(struct us_data *us,
unsigned char *manufacturerID,
unsigned char *deviceID) {
int result;
unsigned char *command = us->iobuf;
unsigned char *content = us->iobuf;
memset(command, 0, 8);
command[5] = 0xB0;
command[7] = 0x84;
result = sddr55_bulk_transport(us, DMA_TO_DEVICE, command, 8);
usb_stor_dbg(us, "Result of send_control for device ID is %d\n",
result);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
result = sddr55_bulk_transport(us,
DMA_FROM_DEVICE, content, 4);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
*manufacturerID = content[0];
*deviceID = content[1];
if (content[0] != 0xff) {
result = sddr55_bulk_transport(us,
DMA_FROM_DEVICE, content, 2);
}
return USB_STOR_TRANSPORT_GOOD;
}
static int sddr55_reset(struct us_data *us)
{
return 0;
}
static unsigned long sddr55_get_capacity(struct us_data *us) {
unsigned char manufacturerID;
unsigned char deviceID;
int result;
struct sddr55_card_info *info = (struct sddr55_card_info *)us->extra;
usb_stor_dbg(us, "Reading capacity...\n");
result = sddr55_read_deviceID(us,
&manufacturerID,
&deviceID);
usb_stor_dbg(us, "Result of read_deviceID is %d\n", result);
if (result != USB_STOR_XFER_GOOD)
return 0;
usb_stor_dbg(us, "Device ID = %02X\n", deviceID);
usb_stor_dbg(us, "Manuf ID = %02X\n", manufacturerID);
info->pageshift = 9;
info->smallpageshift = 0;
info->blocksize = 16;
info->blockshift = 4;
info->blockmask = 15;
switch (deviceID) {
case 0x6e: // 1MB
case 0xe8:
case 0xec:
info->pageshift = 8;
info->smallpageshift = 1;
return 0x00100000;
case 0xea: // 2MB
case 0x64:
info->pageshift = 8;
info->smallpageshift = 1;
fallthrough;
case 0x5d: // 5d is a ROM card with pagesize 512.
return 0x00200000;
case 0xe3: // 4MB
case 0xe5:
case 0x6b:
case 0xd5:
return 0x00400000;
case 0xe6: // 8MB
case 0xd6:
return 0x00800000;
case 0x73: // 16MB
info->blocksize = 32;
info->blockshift = 5;
info->blockmask = 31;
return 0x01000000;
case 0x75: // 32MB
info->blocksize = 32;
info->blockshift = 5;
info->blockmask = 31;
return 0x02000000;
case 0x76: // 64MB
info->blocksize = 32;
info->blockshift = 5;
info->blockmask = 31;
return 0x04000000;
case 0x79: // 128MB
info->blocksize = 32;
info->blockshift = 5;
info->blockmask = 31;
return 0x08000000;
default: // unknown
return 0;
}
}
static int sddr55_read_map(struct us_data *us) {
struct sddr55_card_info *info = (struct sddr55_card_info *)(us->extra);
int numblocks;
unsigned char *buffer;
unsigned char *command = us->iobuf;
int i;
unsigned short lba;
unsigned short max_lba;
int result;
if (!info->capacity)
return -1;
numblocks = info->capacity >> (info->blockshift + info->pageshift);
buffer = kmalloc_array(numblocks, 2, GFP_NOIO );
if (!buffer)
return -1;
memset(command, 0, 8);
command[5] = 0xB0;
command[6] = numblocks * 2 / 256;
command[7] = 0x8A;
result = sddr55_bulk_transport(us, DMA_TO_DEVICE, command, 8);
if ( result != USB_STOR_XFER_GOOD) {
kfree (buffer);
return -1;
}
result = sddr55_bulk_transport(us, DMA_FROM_DEVICE, buffer, numblocks * 2);
if ( result != USB_STOR_XFER_GOOD) {
kfree (buffer);
return -1;
}
result = sddr55_bulk_transport(us, DMA_FROM_DEVICE, command, 2);
if ( result != USB_STOR_XFER_GOOD) {
kfree (buffer);
return -1;
}
kfree(info->lba_to_pba);
kfree(info->pba_to_lba);
info->lba_to_pba = kmalloc_array(numblocks, sizeof(int), GFP_NOIO);
info->pba_to_lba = kmalloc_array(numblocks, sizeof(int), GFP_NOIO);
if (info->lba_to_pba == NULL || info->pba_to_lba == NULL) {
kfree(info->lba_to_pba);
kfree(info->pba_to_lba);
info->lba_to_pba = NULL;
info->pba_to_lba = NULL;
kfree(buffer);
return -1;
}
memset(info->lba_to_pba, 0xff, numblocks*sizeof(int));
memset(info->pba_to_lba, 0xff, numblocks*sizeof(int));
/* set maximum lba */
max_lba = info->max_log_blks;
if (max_lba > 1000)
max_lba = 1000;
/*
* Each block is 64 bytes of control data, so block i is located in
* scatterlist block i*64/128k = i*(2^6)*(2^-17) = i*(2^-11)
*/
for (i=0; i<numblocks; i++) {
int zone = i / 1024;
lba = short_pack(buffer[i * 2], buffer[i * 2 + 1]);
/*
* Every 1024 physical blocks ("zone"), the LBA numbers
* go back to zero, but are within a higher
* block of LBA's. Also, there is a maximum of
* 1000 LBA's per zone. In other words, in PBA
* 1024-2047 you will find LBA 0-999 which are
* really LBA 1000-1999. Yes, this wastes 24
* physical blocks per zone. Go figure.
* These devices can have blocks go bad, so there
* are 24 spare blocks to use when blocks do go bad.
*/
/*
* SDDR55 returns 0xffff for a bad block, and 0x400 for the
* CIS block. (Is this true for cards 8MB or less??)
* Record these in the physical to logical map
*/
info->pba_to_lba[i] = lba;
if (lba >= max_lba) {
continue;
}
if (info->lba_to_pba[lba + zone * 1000] != NOT_ALLOCATED &&
!info->force_read_only) {
printk(KERN_WARNING
"sddr55: map inconsistency at LBA %04X\n",
lba + zone * 1000);
info->force_read_only = 1;
}
if (lba<0x10 || (lba>=0x3E0 && lba<0x3EF))
usb_stor_dbg(us, "LBA %04X <-> PBA %04X\n", lba, i);
info->lba_to_pba[lba + zone * 1000] = i;
}
kfree(buffer);
return 0;
}
static void sddr55_card_info_destructor(void *extra) {
struct sddr55_card_info *info = (struct sddr55_card_info *)extra;
if (!extra)
return;
kfree(info->lba_to_pba);
kfree(info->pba_to_lba);
}
/*
* Transport for the Sandisk SDDR-55
*/
static int sddr55_transport(struct scsi_cmnd *srb, struct us_data *us)
{
int result;
static unsigned char inquiry_response[8] = {
0x00, 0x80, 0x00, 0x02, 0x1F, 0x00, 0x00, 0x00
};
// write-protected for now, no block descriptor support
static unsigned char mode_page_01[20] = {
0x0, 0x12, 0x00, 0x80, 0x0, 0x0, 0x0, 0x0,
0x01, 0x0A,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
unsigned char *ptr = us->iobuf;
unsigned long capacity;
unsigned int lba;
unsigned int pba;
unsigned int page;
unsigned short pages;
struct sddr55_card_info *info;
if (!us->extra) {
us->extra = kzalloc(
sizeof(struct sddr55_card_info), GFP_NOIO);
if (!us->extra)
return USB_STOR_TRANSPORT_ERROR;
us->extra_destructor = sddr55_card_info_destructor;
}
info = (struct sddr55_card_info *)(us->extra);
if (srb->cmnd[0] == REQUEST_SENSE) {
usb_stor_dbg(us, "request sense %02x/%02x/%02x\n",
info->sense_data[2],
info->sense_data[12],
info->sense_data[13]);
memcpy (ptr, info->sense_data, sizeof info->sense_data);
ptr[0] = 0x70;
ptr[7] = 11;
usb_stor_set_xfer_buf (ptr, sizeof info->sense_data, srb);
memset (info->sense_data, 0, sizeof info->sense_data);
return USB_STOR_TRANSPORT_GOOD;
}
memset (info->sense_data, 0, sizeof info->sense_data);
/*
* Dummy up a response for INQUIRY since SDDR55 doesn't
* respond to INQUIRY commands
*/
if (srb->cmnd[0] == INQUIRY) {
memcpy(ptr, inquiry_response, 8);
fill_inquiry_response(us, ptr, 36);
return USB_STOR_TRANSPORT_GOOD;
}
/*
* only check card status if the map isn't allocated, ie no card seen yet
* or if it's been over half a second since we last accessed it
*/
if (info->lba_to_pba == NULL || time_after(jiffies, info->last_access + HZ/2)) {
/* check to see if a card is fitted */
result = sddr55_status (us);
if (result) {
result = sddr55_status (us);
if (!result) {
set_sense_info (6, 0x28, 0); /* new media, set unit attention, not ready to ready */
}
return USB_STOR_TRANSPORT_FAILED;
}
}
/*
* if we detected a problem with the map when writing,
* don't allow any more access
*/
if (info->fatal_error) {
set_sense_info (3, 0x31, 0);
return USB_STOR_TRANSPORT_FAILED;
}
if (srb->cmnd[0] == READ_CAPACITY) {
capacity = sddr55_get_capacity(us);
if (!capacity) {
set_sense_info (3, 0x30, 0); /* incompatible medium */
return USB_STOR_TRANSPORT_FAILED;
}
info->capacity = capacity;
/*
* figure out the maximum logical block number, allowing for
* the fact that only 250 out of every 256 are used
*/
info->max_log_blks = ((info->capacity >> (info->pageshift + info->blockshift)) / 256) * 250;
/*
* Last page in the card, adjust as we only use 250 out of
* every 256 pages
*/
capacity = (capacity / 256) * 250;
capacity /= PAGESIZE;
capacity--;
((__be32 *) ptr)[0] = cpu_to_be32(capacity);
((__be32 *) ptr)[1] = cpu_to_be32(PAGESIZE);
usb_stor_set_xfer_buf(ptr, 8, srb);
sddr55_read_map(us);
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == MODE_SENSE_10) {
memcpy(ptr, mode_page_01, sizeof mode_page_01);
ptr[3] = (info->read_only || info->force_read_only) ? 0x80 : 0;
usb_stor_set_xfer_buf(ptr, sizeof(mode_page_01), srb);
if ( (srb->cmnd[2] & 0x3F) == 0x01 ) {
usb_stor_dbg(us, "Dummy up request for mode page 1\n");
return USB_STOR_TRANSPORT_GOOD;
} else if ( (srb->cmnd[2] & 0x3F) == 0x3F ) {
usb_stor_dbg(us, "Dummy up request for all mode pages\n");
return USB_STOR_TRANSPORT_GOOD;
}
set_sense_info (5, 0x24, 0); /* invalid field in command */
return USB_STOR_TRANSPORT_FAILED;
}
if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) {
usb_stor_dbg(us, "%s medium removal. Not that I can do anything about it...\n",
(srb->cmnd[4]&0x03) ? "Prevent" : "Allow");
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10) {
page = short_pack(srb->cmnd[3], srb->cmnd[2]);
page <<= 16;
page |= short_pack(srb->cmnd[5], srb->cmnd[4]);
pages = short_pack(srb->cmnd[8], srb->cmnd[7]);
page <<= info->smallpageshift;
// convert page to block and page-within-block
lba = page >> info->blockshift;
page = page & info->blockmask;
// locate physical block corresponding to logical block
if (lba >= info->max_log_blks) {
usb_stor_dbg(us, "Error: Requested LBA %04X exceeds maximum block %04X\n",
lba, info->max_log_blks - 1);
set_sense_info (5, 0x24, 0); /* invalid field in command */
return USB_STOR_TRANSPORT_FAILED;
}
pba = info->lba_to_pba[lba];
if (srb->cmnd[0] == WRITE_10) {
usb_stor_dbg(us, "WRITE_10: write block %04X (LBA %04X) page %01X pages %d\n",
pba, lba, page, pages);
return sddr55_write_data(us, lba, page, pages);
} else {
usb_stor_dbg(us, "READ_10: read block %04X (LBA %04X) page %01X pages %d\n",
pba, lba, page, pages);
return sddr55_read_data(us, lba, page, pages);
}
}
if (srb->cmnd[0] == TEST_UNIT_READY) {
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == START_STOP) {
return USB_STOR_TRANSPORT_GOOD;
}
set_sense_info (5, 0x20, 0); /* illegal command */
return USB_STOR_TRANSPORT_FAILED; // FIXME: sense buffer?
}
static struct scsi_host_template sddr55_host_template;
static int sddr55_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct us_data *us;
int result;
result = usb_stor_probe1(&us, intf, id,
(id - sddr55_usb_ids) + sddr55_unusual_dev_list,
&sddr55_host_template);
if (result)
return result;
us->transport_name = "SDDR55";
us->transport = sddr55_transport;
us->transport_reset = sddr55_reset;
us->max_lun = 0;
result = usb_stor_probe2(us);
return result;
}
static struct usb_driver sddr55_driver = {
.name = DRV_NAME,
.probe = sddr55_probe,
.disconnect = usb_stor_disconnect,
.suspend = usb_stor_suspend,
.resume = usb_stor_resume,
.reset_resume = usb_stor_reset_resume,
.pre_reset = usb_stor_pre_reset,
.post_reset = usb_stor_post_reset,
.id_table = sddr55_usb_ids,
.soft_unbind = 1,
.no_dynamic_id = 1,
};
module_usb_stor_driver(sddr55_driver, sddr55_host_template, DRV_NAME);
| linux-master | drivers/usb/storage/sddr55.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Transport & Protocol Driver for In-System Design, Inc. ISD200 ASIC
*
* Current development and maintenance:
* (C) 2001-2002 Björn Stenberg ([email protected])
*
* Developed with the assistance of:
* (C) 2002 Alan Stern <[email protected]>
*
* Initial work:
* (C) 2000 In-System Design, Inc. ([email protected])
*
* The ISD200 ASIC does not natively support ATA devices. The chip
* does implement an interface, the ATA Command Block (ATACB) which provides
* a means of passing ATA commands and ATA register accesses to a device.
*
* History:
*
* 2002-10-19: Removed the specialized transfer routines.
* (Alan Stern <[email protected]>)
* 2001-02-24: Removed lots of duplicate code and simplified the structure.
* ([email protected])
* 2002-01-16: Fixed endianness bug so it works on the ppc arch.
* (Luc Saillard <[email protected]>)
* 2002-01-17: All bitfields removed.
* ([email protected])
*/
/* Include files */
#include <linux/jiffies.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/ata.h>
#include <linux/hdreg.h>
#include <linux/scatterlist.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include "usb.h"
#include "transport.h"
#include "protocol.h"
#include "debug.h"
#include "scsiglue.h"
#define DRV_NAME "ums-isd200"
MODULE_DESCRIPTION("Driver for In-System Design, Inc. ISD200 ASIC");
MODULE_AUTHOR("Björn Stenberg <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(USB_STORAGE);
static int isd200_Initialization(struct us_data *us);
/*
* The table of devices
*/
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
static struct usb_device_id isd200_usb_ids[] = {
# include "unusual_isd200.h"
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, isd200_usb_ids);
#undef UNUSUAL_DEV
/*
* The flags table
*/
#define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \
vendor_name, product_name, use_protocol, use_transport, \
init_function, Flags) \
{ \
.vendorName = vendor_name, \
.productName = product_name, \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
.initFunction = init_function, \
}
static struct us_unusual_dev isd200_unusual_dev_list[] = {
# include "unusual_isd200.h"
{ } /* Terminating entry */
};
#undef UNUSUAL_DEV
/* Timeout defines (in Seconds) */
#define ISD200_ENUM_BSY_TIMEOUT 35
#define ISD200_ENUM_DETECT_TIMEOUT 30
#define ISD200_DEFAULT_TIMEOUT 30
/* device flags */
#define DF_ATA_DEVICE 0x0001
#define DF_MEDIA_STATUS_ENABLED 0x0002
#define DF_REMOVABLE_MEDIA 0x0004
/* capability bit definitions */
#define CAPABILITY_DMA 0x01
#define CAPABILITY_LBA 0x02
/* command_setX bit definitions */
#define COMMANDSET_REMOVABLE 0x02
#define COMMANDSET_MEDIA_STATUS 0x10
/* ATA Vendor Specific defines */
#define ATA_ADDRESS_DEVHEAD_STD 0xa0
#define ATA_ADDRESS_DEVHEAD_LBA_MODE 0x40
#define ATA_ADDRESS_DEVHEAD_SLAVE 0x10
/* Action Select bits */
#define ACTION_SELECT_0 0x01
#define ACTION_SELECT_1 0x02
#define ACTION_SELECT_2 0x04
#define ACTION_SELECT_3 0x08
#define ACTION_SELECT_4 0x10
#define ACTION_SELECT_5 0x20
#define ACTION_SELECT_6 0x40
#define ACTION_SELECT_7 0x80
/* Register Select bits */
#define REG_ALTERNATE_STATUS 0x01
#define REG_DEVICE_CONTROL 0x01
#define REG_ERROR 0x02
#define REG_FEATURES 0x02
#define REG_SECTOR_COUNT 0x04
#define REG_SECTOR_NUMBER 0x08
#define REG_CYLINDER_LOW 0x10
#define REG_CYLINDER_HIGH 0x20
#define REG_DEVICE_HEAD 0x40
#define REG_STATUS 0x80
#define REG_COMMAND 0x80
/* ATA registers offset definitions */
#define ATA_REG_ERROR_OFFSET 1
#define ATA_REG_LCYL_OFFSET 4
#define ATA_REG_HCYL_OFFSET 5
#define ATA_REG_STATUS_OFFSET 7
/* ATA error definitions not in <linux/hdreg.h> */
#define ATA_ERROR_MEDIA_CHANGE 0x20
/* ATA command definitions not in <linux/hdreg.h> */
#define ATA_COMMAND_GET_MEDIA_STATUS 0xDA
#define ATA_COMMAND_MEDIA_EJECT 0xED
/* ATA drive control definitions */
#define ATA_DC_DISABLE_INTERRUPTS 0x02
#define ATA_DC_RESET_CONTROLLER 0x04
#define ATA_DC_REENABLE_CONTROLLER 0x00
/*
* General purpose return codes
*/
#define ISD200_ERROR -1
#define ISD200_GOOD 0
/*
* Transport return codes
*/
#define ISD200_TRANSPORT_GOOD 0 /* Transport good, command good */
#define ISD200_TRANSPORT_FAILED 1 /* Transport good, command failed */
#define ISD200_TRANSPORT_ERROR 2 /* Transport bad (i.e. device dead) */
/* driver action codes */
#define ACTION_READ_STATUS 0
#define ACTION_RESET 1
#define ACTION_REENABLE 2
#define ACTION_SOFT_RESET 3
#define ACTION_ENUM 4
#define ACTION_IDENTIFY 5
/*
* ata_cdb struct
*/
union ata_cdb {
struct {
unsigned char SignatureByte0;
unsigned char SignatureByte1;
unsigned char ActionSelect;
unsigned char RegisterSelect;
unsigned char TransferBlockSize;
unsigned char WriteData3F6;
unsigned char WriteData1F1;
unsigned char WriteData1F2;
unsigned char WriteData1F3;
unsigned char WriteData1F4;
unsigned char WriteData1F5;
unsigned char WriteData1F6;
unsigned char WriteData1F7;
unsigned char Reserved[3];
} generic;
struct {
unsigned char SignatureByte0;
unsigned char SignatureByte1;
unsigned char ActionSelect;
unsigned char RegisterSelect;
unsigned char TransferBlockSize;
unsigned char AlternateStatusByte;
unsigned char ErrorByte;
unsigned char SectorCountByte;
unsigned char SectorNumberByte;
unsigned char CylinderLowByte;
unsigned char CylinderHighByte;
unsigned char DeviceHeadByte;
unsigned char StatusByte;
unsigned char Reserved[3];
} read;
struct {
unsigned char SignatureByte0;
unsigned char SignatureByte1;
unsigned char ActionSelect;
unsigned char RegisterSelect;
unsigned char TransferBlockSize;
unsigned char DeviceControlByte;
unsigned char FeaturesByte;
unsigned char SectorCountByte;
unsigned char SectorNumberByte;
unsigned char CylinderLowByte;
unsigned char CylinderHighByte;
unsigned char DeviceHeadByte;
unsigned char CommandByte;
unsigned char Reserved[3];
} write;
};
/*
* Inquiry data structure. This is the data returned from the target
* after it receives an inquiry.
*
* This structure may be extended by the number of bytes specified
* in the field AdditionalLength. The defined size constant only
* includes fields through ProductRevisionLevel.
*/
/*
* DeviceType field
*/
#define DIRECT_ACCESS_DEVICE 0x00 /* disks */
#define DEVICE_REMOVABLE 0x80
struct inquiry_data {
unsigned char DeviceType;
unsigned char DeviceTypeModifier;
unsigned char Versions;
unsigned char Format;
unsigned char AdditionalLength;
unsigned char Reserved[2];
unsigned char Capability;
unsigned char VendorId[8];
unsigned char ProductId[16];
unsigned char ProductRevisionLevel[4];
unsigned char VendorSpecific[20];
unsigned char Reserved3[40];
} __attribute__ ((packed));
/*
* INQUIRY data buffer size
*/
#define INQUIRYDATABUFFERSIZE 36
/*
* ISD200 CONFIG data struct
*/
#define ATACFG_TIMING 0x0f
#define ATACFG_ATAPI_RESET 0x10
#define ATACFG_MASTER 0x20
#define ATACFG_BLOCKSIZE 0xa0
#define ATACFGE_LAST_LUN 0x07
#define ATACFGE_DESC_OVERRIDE 0x08
#define ATACFGE_STATE_SUSPEND 0x10
#define ATACFGE_SKIP_BOOT 0x20
#define ATACFGE_CONF_DESC2 0x40
#define ATACFGE_INIT_STATUS 0x80
#define CFG_CAPABILITY_SRST 0x01
struct isd200_config {
unsigned char EventNotification;
unsigned char ExternalClock;
unsigned char ATAInitTimeout;
unsigned char ATAConfig;
unsigned char ATAMajorCommand;
unsigned char ATAMinorCommand;
unsigned char ATAExtraConfig;
unsigned char Capability;
}__attribute__ ((packed));
/*
* ISD200 driver information struct
*/
struct isd200_info {
struct inquiry_data InquiryData;
u16 *id;
struct isd200_config ConfigData;
unsigned char *RegsBuf;
unsigned char ATARegs[8];
unsigned char DeviceHead;
unsigned char DeviceFlags;
/* maximum number of LUNs supported */
unsigned char MaxLUNs;
unsigned char cmnd[MAX_COMMAND_SIZE];
struct scsi_cmnd srb;
struct scatterlist sg;
};
/*
* Read Capacity Data - returned in Big Endian format
*/
struct read_capacity_data {
__be32 LogicalBlockAddress;
__be32 BytesPerBlock;
};
/*
* Read Block Limits Data - returned in Big Endian format
* This structure returns the maximum and minimum block
* size for a TAPE device.
*/
struct read_block_limits {
unsigned char Reserved;
unsigned char BlockMaximumSize[3];
unsigned char BlockMinimumSize[2];
};
/*
* Sense Data Format
*/
#define SENSE_ERRCODE 0x7f
#define SENSE_ERRCODE_VALID 0x80
#define SENSE_FLAG_SENSE_KEY 0x0f
#define SENSE_FLAG_BAD_LENGTH 0x20
#define SENSE_FLAG_END_OF_MEDIA 0x40
#define SENSE_FLAG_FILE_MARK 0x80
struct sense_data {
unsigned char ErrorCode;
unsigned char SegmentNumber;
unsigned char Flags;
unsigned char Information[4];
unsigned char AdditionalSenseLength;
unsigned char CommandSpecificInformation[4];
unsigned char AdditionalSenseCode;
unsigned char AdditionalSenseCodeQualifier;
unsigned char FieldReplaceableUnitCode;
unsigned char SenseKeySpecific[3];
} __attribute__ ((packed));
/*
* Default request sense buffer size
*/
#define SENSE_BUFFER_SIZE 18
/***********************************************************************
* Helper routines
***********************************************************************/
/**************************************************************************
* isd200_build_sense
*
* Builds an artificial sense buffer to report the results of a
* failed command.
*
* RETURNS:
* void
*/
static void isd200_build_sense(struct us_data *us, struct scsi_cmnd *srb)
{
struct isd200_info *info = (struct isd200_info *)us->extra;
struct sense_data *buf = (struct sense_data *) &srb->sense_buffer[0];
unsigned char error = info->ATARegs[ATA_REG_ERROR_OFFSET];
if(error & ATA_ERROR_MEDIA_CHANGE) {
buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID;
buf->AdditionalSenseLength = 0xb;
buf->Flags = UNIT_ATTENTION;
buf->AdditionalSenseCode = 0;
buf->AdditionalSenseCodeQualifier = 0;
} else if (error & ATA_MCR) {
buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID;
buf->AdditionalSenseLength = 0xb;
buf->Flags = UNIT_ATTENTION;
buf->AdditionalSenseCode = 0;
buf->AdditionalSenseCodeQualifier = 0;
} else if (error & ATA_TRK0NF) {
buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID;
buf->AdditionalSenseLength = 0xb;
buf->Flags = NOT_READY;
buf->AdditionalSenseCode = 0;
buf->AdditionalSenseCodeQualifier = 0;
} else if (error & ATA_UNC) {
buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID;
buf->AdditionalSenseLength = 0xb;
buf->Flags = DATA_PROTECT;
buf->AdditionalSenseCode = 0;
buf->AdditionalSenseCodeQualifier = 0;
} else {
buf->ErrorCode = 0;
buf->AdditionalSenseLength = 0;
buf->Flags = 0;
buf->AdditionalSenseCode = 0;
buf->AdditionalSenseCodeQualifier = 0;
}
}
/***********************************************************************
* Transport routines
***********************************************************************/
/**************************************************************************
* isd200_set_srb(), isd200_srb_set_bufflen()
*
* Two helpers to facilitate in initialization of scsi_cmnd structure
* Will need to change when struct scsi_cmnd changes
*/
static void isd200_set_srb(struct isd200_info *info,
enum dma_data_direction dir, void* buff, unsigned bufflen)
{
struct scsi_cmnd *srb = &info->srb;
if (buff)
sg_init_one(&info->sg, buff, bufflen);
srb->sc_data_direction = dir;
srb->sdb.table.sgl = buff ? &info->sg : NULL;
srb->sdb.length = bufflen;
srb->sdb.table.nents = buff ? 1 : 0;
}
static void isd200_srb_set_bufflen(struct scsi_cmnd *srb, unsigned bufflen)
{
srb->sdb.length = bufflen;
}
/**************************************************************************
* isd200_action
*
* Routine for sending commands to the isd200
*
* RETURNS:
* ISD status code
*/
static int isd200_action( struct us_data *us, int action,
void* pointer, int value )
{
union ata_cdb ata;
/* static to prevent this large struct being placed on the valuable stack */
static struct scsi_device srb_dev;
struct isd200_info *info = (struct isd200_info *)us->extra;
struct scsi_cmnd *srb = &info->srb;
int status;
memset(&ata, 0, sizeof(ata));
memcpy(srb->cmnd, info->cmnd, MAX_COMMAND_SIZE);
srb->device = &srb_dev;
ata.generic.SignatureByte0 = info->ConfigData.ATAMajorCommand;
ata.generic.SignatureByte1 = info->ConfigData.ATAMinorCommand;
ata.generic.TransferBlockSize = 1;
switch ( action ) {
case ACTION_READ_STATUS:
usb_stor_dbg(us, " isd200_action(READ_STATUS)\n");
ata.generic.ActionSelect = ACTION_SELECT_0|ACTION_SELECT_2;
ata.generic.RegisterSelect =
REG_CYLINDER_LOW | REG_CYLINDER_HIGH |
REG_STATUS | REG_ERROR;
isd200_set_srb(info, DMA_FROM_DEVICE, pointer, value);
break;
case ACTION_ENUM:
usb_stor_dbg(us, " isd200_action(ENUM,0x%02x)\n", value);
ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_2|
ACTION_SELECT_3|ACTION_SELECT_4|
ACTION_SELECT_5;
ata.generic.RegisterSelect = REG_DEVICE_HEAD;
ata.write.DeviceHeadByte = value;
isd200_set_srb(info, DMA_NONE, NULL, 0);
break;
case ACTION_RESET:
usb_stor_dbg(us, " isd200_action(RESET)\n");
ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_2|
ACTION_SELECT_3|ACTION_SELECT_4;
ata.generic.RegisterSelect = REG_DEVICE_CONTROL;
ata.write.DeviceControlByte = ATA_DC_RESET_CONTROLLER;
isd200_set_srb(info, DMA_NONE, NULL, 0);
break;
case ACTION_REENABLE:
usb_stor_dbg(us, " isd200_action(REENABLE)\n");
ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_2|
ACTION_SELECT_3|ACTION_SELECT_4;
ata.generic.RegisterSelect = REG_DEVICE_CONTROL;
ata.write.DeviceControlByte = ATA_DC_REENABLE_CONTROLLER;
isd200_set_srb(info, DMA_NONE, NULL, 0);
break;
case ACTION_SOFT_RESET:
usb_stor_dbg(us, " isd200_action(SOFT_RESET)\n");
ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_5;
ata.generic.RegisterSelect = REG_DEVICE_HEAD | REG_COMMAND;
ata.write.DeviceHeadByte = info->DeviceHead;
ata.write.CommandByte = ATA_CMD_DEV_RESET;
isd200_set_srb(info, DMA_NONE, NULL, 0);
break;
case ACTION_IDENTIFY:
usb_stor_dbg(us, " isd200_action(IDENTIFY)\n");
ata.generic.RegisterSelect = REG_COMMAND;
ata.write.CommandByte = ATA_CMD_ID_ATA;
isd200_set_srb(info, DMA_FROM_DEVICE, info->id,
ATA_ID_WORDS * 2);
break;
default:
usb_stor_dbg(us, "Error: Undefined action %d\n", action);
return ISD200_ERROR;
}
memcpy(srb->cmnd, &ata, sizeof(ata.generic));
srb->cmd_len = sizeof(ata.generic);
status = usb_stor_Bulk_transport(srb, us);
if (status == USB_STOR_TRANSPORT_GOOD)
status = ISD200_GOOD;
else {
usb_stor_dbg(us, " isd200_action(0x%02x) error: %d\n",
action, status);
status = ISD200_ERROR;
/* need to reset device here */
}
return status;
}
/**************************************************************************
* isd200_read_regs
*
* Read ATA Registers
*
* RETURNS:
* ISD status code
*/
static int isd200_read_regs( struct us_data *us )
{
struct isd200_info *info = (struct isd200_info *)us->extra;
int retStatus = ISD200_GOOD;
int transferStatus;
usb_stor_dbg(us, "Entering isd200_IssueATAReadRegs\n");
transferStatus = isd200_action( us, ACTION_READ_STATUS,
info->RegsBuf, sizeof(info->ATARegs) );
if (transferStatus != ISD200_TRANSPORT_GOOD) {
usb_stor_dbg(us, " Error reading ATA registers\n");
retStatus = ISD200_ERROR;
} else {
memcpy(info->ATARegs, info->RegsBuf, sizeof(info->ATARegs));
usb_stor_dbg(us, " Got ATA Register[ATA_REG_ERROR_OFFSET] = 0x%x\n",
info->ATARegs[ATA_REG_ERROR_OFFSET]);
}
return retStatus;
}
/**************************************************************************
* Invoke the transport and basic error-handling/recovery methods
*
* This is used by the protocol layers to actually send the message to
* the device and receive the response.
*/
static void isd200_invoke_transport( struct us_data *us,
struct scsi_cmnd *srb,
union ata_cdb *ataCdb )
{
int need_auto_sense = 0;
int transferStatus;
int result;
/* send the command to the transport layer */
memcpy(srb->cmnd, ataCdb, sizeof(ataCdb->generic));
srb->cmd_len = sizeof(ataCdb->generic);
transferStatus = usb_stor_Bulk_transport(srb, us);
/*
* if the command gets aborted by the higher layers, we need to
* short-circuit all other processing
*/
if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
usb_stor_dbg(us, "-- command was aborted\n");
goto Handle_Abort;
}
switch (transferStatus) {
case USB_STOR_TRANSPORT_GOOD:
/* Indicate a good result */
srb->result = SAM_STAT_GOOD;
break;
case USB_STOR_TRANSPORT_NO_SENSE:
usb_stor_dbg(us, "-- transport indicates protocol failure\n");
srb->result = SAM_STAT_CHECK_CONDITION;
return;
case USB_STOR_TRANSPORT_FAILED:
usb_stor_dbg(us, "-- transport indicates command failure\n");
need_auto_sense = 1;
break;
case USB_STOR_TRANSPORT_ERROR:
usb_stor_dbg(us, "-- transport indicates transport error\n");
srb->result = DID_ERROR << 16;
/* Need reset here */
return;
default:
usb_stor_dbg(us, "-- transport indicates unknown error\n");
srb->result = DID_ERROR << 16;
/* Need reset here */
return;
}
if ((scsi_get_resid(srb) > 0) &&
!((srb->cmnd[0] == REQUEST_SENSE) ||
(srb->cmnd[0] == INQUIRY) ||
(srb->cmnd[0] == MODE_SENSE) ||
(srb->cmnd[0] == LOG_SENSE) ||
(srb->cmnd[0] == MODE_SENSE_10))) {
usb_stor_dbg(us, "-- unexpectedly short transfer\n");
need_auto_sense = 1;
}
if (need_auto_sense) {
result = isd200_read_regs(us);
if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
usb_stor_dbg(us, "-- auto-sense aborted\n");
goto Handle_Abort;
}
if (result == ISD200_GOOD) {
isd200_build_sense(us, srb);
srb->result = SAM_STAT_CHECK_CONDITION;
/* If things are really okay, then let's show that */
if ((srb->sense_buffer[2] & 0xf) == 0x0)
srb->result = SAM_STAT_GOOD;
} else {
srb->result = DID_ERROR << 16;
/* Need reset here */
}
}
/*
* Regardless of auto-sense, if we _know_ we have an error
* condition, show that in the result code
*/
if (transferStatus == USB_STOR_TRANSPORT_FAILED)
srb->result = SAM_STAT_CHECK_CONDITION;
return;
/*
* abort processing: the bulk-only transport requires a reset
* following an abort
*/
Handle_Abort:
srb->result = DID_ABORT << 16;
/* permit the reset transfer to take place */
clear_bit(US_FLIDX_ABORTING, &us->dflags);
/* Need reset here */
}
#ifdef CONFIG_USB_STORAGE_DEBUG
static void isd200_log_config(struct us_data *us, struct isd200_info *info)
{
usb_stor_dbg(us, " Event Notification: 0x%x\n",
info->ConfigData.EventNotification);
usb_stor_dbg(us, " External Clock: 0x%x\n",
info->ConfigData.ExternalClock);
usb_stor_dbg(us, " ATA Init Timeout: 0x%x\n",
info->ConfigData.ATAInitTimeout);
usb_stor_dbg(us, " ATAPI Command Block Size: 0x%x\n",
(info->ConfigData.ATAConfig & ATACFG_BLOCKSIZE) >> 6);
usb_stor_dbg(us, " Master/Slave Selection: 0x%x\n",
info->ConfigData.ATAConfig & ATACFG_MASTER);
usb_stor_dbg(us, " ATAPI Reset: 0x%x\n",
info->ConfigData.ATAConfig & ATACFG_ATAPI_RESET);
usb_stor_dbg(us, " ATA Timing: 0x%x\n",
info->ConfigData.ATAConfig & ATACFG_TIMING);
usb_stor_dbg(us, " ATA Major Command: 0x%x\n",
info->ConfigData.ATAMajorCommand);
usb_stor_dbg(us, " ATA Minor Command: 0x%x\n",
info->ConfigData.ATAMinorCommand);
usb_stor_dbg(us, " Init Status: 0x%x\n",
info->ConfigData.ATAExtraConfig & ATACFGE_INIT_STATUS);
usb_stor_dbg(us, " Config Descriptor 2: 0x%x\n",
info->ConfigData.ATAExtraConfig & ATACFGE_CONF_DESC2);
usb_stor_dbg(us, " Skip Device Boot: 0x%x\n",
info->ConfigData.ATAExtraConfig & ATACFGE_SKIP_BOOT);
usb_stor_dbg(us, " ATA 3 State Suspend: 0x%x\n",
info->ConfigData.ATAExtraConfig & ATACFGE_STATE_SUSPEND);
usb_stor_dbg(us, " Descriptor Override: 0x%x\n",
info->ConfigData.ATAExtraConfig & ATACFGE_DESC_OVERRIDE);
usb_stor_dbg(us, " Last LUN Identifier: 0x%x\n",
info->ConfigData.ATAExtraConfig & ATACFGE_LAST_LUN);
usb_stor_dbg(us, " SRST Enable: 0x%x\n",
info->ConfigData.ATAExtraConfig & CFG_CAPABILITY_SRST);
}
#endif
/**************************************************************************
* isd200_write_config
*
* Write the ISD200 Configuration data
*
* RETURNS:
* ISD status code
*/
static int isd200_write_config( struct us_data *us )
{
struct isd200_info *info = (struct isd200_info *)us->extra;
int retStatus = ISD200_GOOD;
int result;
#ifdef CONFIG_USB_STORAGE_DEBUG
usb_stor_dbg(us, "Entering isd200_write_config\n");
usb_stor_dbg(us, " Writing the following ISD200 Config Data:\n");
isd200_log_config(us, info);
#endif
/* let's send the command via the control pipe */
result = usb_stor_ctrl_transfer(
us,
us->send_ctrl_pipe,
0x01,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
0x0000,
0x0002,
(void *) &info->ConfigData,
sizeof(info->ConfigData));
if (result >= 0) {
usb_stor_dbg(us, " ISD200 Config Data was written successfully\n");
} else {
usb_stor_dbg(us, " Request to write ISD200 Config Data failed!\n");
retStatus = ISD200_ERROR;
}
usb_stor_dbg(us, "Leaving isd200_write_config %08X\n", retStatus);
return retStatus;
}
/**************************************************************************
* isd200_read_config
*
* Reads the ISD200 Configuration data
*
* RETURNS:
* ISD status code
*/
static int isd200_read_config( struct us_data *us )
{
struct isd200_info *info = (struct isd200_info *)us->extra;
int retStatus = ISD200_GOOD;
int result;
usb_stor_dbg(us, "Entering isd200_read_config\n");
/* read the configuration information from ISD200. Use this to */
/* determine what the special ATA CDB bytes are. */
result = usb_stor_ctrl_transfer(
us,
us->recv_ctrl_pipe,
0x02,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x0000,
0x0002,
(void *) &info->ConfigData,
sizeof(info->ConfigData));
if (result >= 0) {
usb_stor_dbg(us, " Retrieved the following ISD200 Config Data:\n");
#ifdef CONFIG_USB_STORAGE_DEBUG
isd200_log_config(us, info);
#endif
} else {
usb_stor_dbg(us, " Request to get ISD200 Config Data failed!\n");
retStatus = ISD200_ERROR;
}
usb_stor_dbg(us, "Leaving isd200_read_config %08X\n", retStatus);
return retStatus;
}
/**************************************************************************
* isd200_atapi_soft_reset
*
* Perform an Atapi Soft Reset on the device
*
* RETURNS:
* NT status code
*/
static int isd200_atapi_soft_reset( struct us_data *us )
{
int retStatus = ISD200_GOOD;
int transferStatus;
usb_stor_dbg(us, "Entering isd200_atapi_soft_reset\n");
transferStatus = isd200_action( us, ACTION_SOFT_RESET, NULL, 0 );
if (transferStatus != ISD200_TRANSPORT_GOOD) {
usb_stor_dbg(us, " Error issuing Atapi Soft Reset\n");
retStatus = ISD200_ERROR;
}
usb_stor_dbg(us, "Leaving isd200_atapi_soft_reset %08X\n", retStatus);
return retStatus;
}
/**************************************************************************
* isd200_srst
*
* Perform an SRST on the device
*
* RETURNS:
* ISD status code
*/
static int isd200_srst( struct us_data *us )
{
int retStatus = ISD200_GOOD;
int transferStatus;
usb_stor_dbg(us, "Entering isd200_SRST\n");
transferStatus = isd200_action( us, ACTION_RESET, NULL, 0 );
/* check to see if this request failed */
if (transferStatus != ISD200_TRANSPORT_GOOD) {
usb_stor_dbg(us, " Error issuing SRST\n");
retStatus = ISD200_ERROR;
} else {
/* delay 10ms to give the drive a chance to see it */
msleep(10);
transferStatus = isd200_action( us, ACTION_REENABLE, NULL, 0 );
if (transferStatus != ISD200_TRANSPORT_GOOD) {
usb_stor_dbg(us, " Error taking drive out of reset\n");
retStatus = ISD200_ERROR;
} else {
/* delay 50ms to give the drive a chance to recover after SRST */
msleep(50);
}
}
usb_stor_dbg(us, "Leaving isd200_srst %08X\n", retStatus);
return retStatus;
}
/**************************************************************************
* isd200_try_enum
*
* Helper function for isd200_manual_enum(). Does ENUM and READ_STATUS
* and tries to analyze the status registers
*
* RETURNS:
* ISD status code
*/
static int isd200_try_enum(struct us_data *us, unsigned char master_slave,
int detect )
{
int status = ISD200_GOOD;
unsigned long endTime;
struct isd200_info *info = (struct isd200_info *)us->extra;
unsigned char *regs = info->RegsBuf;
int recheckAsMaster = 0;
if ( detect )
endTime = jiffies + ISD200_ENUM_DETECT_TIMEOUT * HZ;
else
endTime = jiffies + ISD200_ENUM_BSY_TIMEOUT * HZ;
/* loop until we detect !BSY or timeout */
while(1) {
status = isd200_action( us, ACTION_ENUM, NULL, master_slave );
if ( status != ISD200_GOOD )
break;
status = isd200_action( us, ACTION_READ_STATUS,
regs, 8 );
if ( status != ISD200_GOOD )
break;
if (!detect) {
if (regs[ATA_REG_STATUS_OFFSET] & ATA_BUSY) {
usb_stor_dbg(us, " %s status is still BSY, try again...\n",
master_slave == ATA_ADDRESS_DEVHEAD_STD ?
"Master" : "Slave");
} else {
usb_stor_dbg(us, " %s status !BSY, continue with next operation\n",
master_slave == ATA_ADDRESS_DEVHEAD_STD ?
"Master" : "Slave");
break;
}
}
/* check for ATA_BUSY and */
/* ATA_DF (workaround ATA Zip drive) and */
/* ATA_ERR (workaround for Archos CD-ROM) */
else if (regs[ATA_REG_STATUS_OFFSET] &
(ATA_BUSY | ATA_DF | ATA_ERR)) {
usb_stor_dbg(us, " Status indicates it is not ready, try again...\n");
}
/* check for DRDY, ATA devices set DRDY after SRST */
else if (regs[ATA_REG_STATUS_OFFSET] & ATA_DRDY) {
usb_stor_dbg(us, " Identified ATA device\n");
info->DeviceFlags |= DF_ATA_DEVICE;
info->DeviceHead = master_slave;
break;
}
/*
* check Cylinder High/Low to
* determine if it is an ATAPI device
*/
else if (regs[ATA_REG_HCYL_OFFSET] == 0xEB &&
regs[ATA_REG_LCYL_OFFSET] == 0x14) {
/*
* It seems that the RICOH
* MP6200A CD/RW drive will
* report itself okay as a
* slave when it is really a
* master. So this check again
* as a master device just to
* make sure it doesn't report
* itself okay as a master also
*/
if ((master_slave & ATA_ADDRESS_DEVHEAD_SLAVE) &&
!recheckAsMaster) {
usb_stor_dbg(us, " Identified ATAPI device as slave. Rechecking again as master\n");
recheckAsMaster = 1;
master_slave = ATA_ADDRESS_DEVHEAD_STD;
} else {
usb_stor_dbg(us, " Identified ATAPI device\n");
info->DeviceHead = master_slave;
status = isd200_atapi_soft_reset(us);
break;
}
} else {
usb_stor_dbg(us, " Not ATA, not ATAPI - Weird\n");
break;
}
/* check for timeout on this request */
if (time_after_eq(jiffies, endTime)) {
if (!detect)
usb_stor_dbg(us, " BSY check timeout, just continue with next operation...\n");
else
usb_stor_dbg(us, " Device detect timeout!\n");
break;
}
}
return status;
}
/**************************************************************************
* isd200_manual_enum
*
* Determines if the drive attached is an ATA or ATAPI and if it is a
* master or slave.
*
* RETURNS:
* ISD status code
*/
static int isd200_manual_enum(struct us_data *us)
{
struct isd200_info *info = (struct isd200_info *)us->extra;
int retStatus = ISD200_GOOD;
usb_stor_dbg(us, "Entering isd200_manual_enum\n");
retStatus = isd200_read_config(us);
if (retStatus == ISD200_GOOD) {
int isslave;
/* master or slave? */
retStatus = isd200_try_enum( us, ATA_ADDRESS_DEVHEAD_STD, 0);
if (retStatus == ISD200_GOOD)
retStatus = isd200_try_enum( us, ATA_ADDRESS_DEVHEAD_SLAVE, 0);
if (retStatus == ISD200_GOOD) {
retStatus = isd200_srst(us);
if (retStatus == ISD200_GOOD)
/* ata or atapi? */
retStatus = isd200_try_enum( us, ATA_ADDRESS_DEVHEAD_STD, 1);
}
isslave = (info->DeviceHead & ATA_ADDRESS_DEVHEAD_SLAVE) ? 1 : 0;
if (!(info->ConfigData.ATAConfig & ATACFG_MASTER)) {
usb_stor_dbg(us, " Setting Master/Slave selection to %d\n",
isslave);
info->ConfigData.ATAConfig &= 0x3f;
info->ConfigData.ATAConfig |= (isslave<<6);
retStatus = isd200_write_config(us);
}
}
usb_stor_dbg(us, "Leaving isd200_manual_enum %08X\n", retStatus);
return(retStatus);
}
static void isd200_fix_driveid(u16 *id)
{
#ifndef __LITTLE_ENDIAN
# ifdef __BIG_ENDIAN
int i;
for (i = 0; i < ATA_ID_WORDS; i++)
id[i] = __le16_to_cpu(id[i]);
# else
# error "Please fix <asm/byteorder.h>"
# endif
#endif
}
static void isd200_dump_driveid(struct us_data *us, u16 *id)
{
usb_stor_dbg(us, " Identify Data Structure:\n");
usb_stor_dbg(us, " config = 0x%x\n", id[ATA_ID_CONFIG]);
usb_stor_dbg(us, " cyls = 0x%x\n", id[ATA_ID_CYLS]);
usb_stor_dbg(us, " heads = 0x%x\n", id[ATA_ID_HEADS]);
usb_stor_dbg(us, " track_bytes = 0x%x\n", id[4]);
usb_stor_dbg(us, " sector_bytes = 0x%x\n", id[5]);
usb_stor_dbg(us, " sectors = 0x%x\n", id[ATA_ID_SECTORS]);
usb_stor_dbg(us, " serial_no[0] = 0x%x\n", *(char *)&id[ATA_ID_SERNO]);
usb_stor_dbg(us, " buf_type = 0x%x\n", id[20]);
usb_stor_dbg(us, " buf_size = 0x%x\n", id[ATA_ID_BUF_SIZE]);
usb_stor_dbg(us, " ecc_bytes = 0x%x\n", id[22]);
usb_stor_dbg(us, " fw_rev[0] = 0x%x\n", *(char *)&id[ATA_ID_FW_REV]);
usb_stor_dbg(us, " model[0] = 0x%x\n", *(char *)&id[ATA_ID_PROD]);
usb_stor_dbg(us, " max_multsect = 0x%x\n", id[ATA_ID_MAX_MULTSECT] & 0xff);
usb_stor_dbg(us, " dword_io = 0x%x\n", id[ATA_ID_DWORD_IO]);
usb_stor_dbg(us, " capability = 0x%x\n", id[ATA_ID_CAPABILITY] >> 8);
usb_stor_dbg(us, " tPIO = 0x%x\n", id[ATA_ID_OLD_PIO_MODES] >> 8);
usb_stor_dbg(us, " tDMA = 0x%x\n", id[ATA_ID_OLD_DMA_MODES] >> 8);
usb_stor_dbg(us, " field_valid = 0x%x\n", id[ATA_ID_FIELD_VALID]);
usb_stor_dbg(us, " cur_cyls = 0x%x\n", id[ATA_ID_CUR_CYLS]);
usb_stor_dbg(us, " cur_heads = 0x%x\n", id[ATA_ID_CUR_HEADS]);
usb_stor_dbg(us, " cur_sectors = 0x%x\n", id[ATA_ID_CUR_SECTORS]);
usb_stor_dbg(us, " cur_capacity = 0x%x\n", ata_id_u32(id, 57));
usb_stor_dbg(us, " multsect = 0x%x\n", id[ATA_ID_MULTSECT] & 0xff);
usb_stor_dbg(us, " lba_capacity = 0x%x\n", ata_id_u32(id, ATA_ID_LBA_CAPACITY));
usb_stor_dbg(us, " command_set_1 = 0x%x\n", id[ATA_ID_COMMAND_SET_1]);
usb_stor_dbg(us, " command_set_2 = 0x%x\n", id[ATA_ID_COMMAND_SET_2]);
}
/**************************************************************************
* isd200_get_inquiry_data
*
* Get inquiry data
*
* RETURNS:
* ISD status code
*/
static int isd200_get_inquiry_data( struct us_data *us )
{
struct isd200_info *info = (struct isd200_info *)us->extra;
int retStatus = ISD200_GOOD;
u16 *id = info->id;
usb_stor_dbg(us, "Entering isd200_get_inquiry_data\n");
/* set default to Master */
info->DeviceHead = ATA_ADDRESS_DEVHEAD_STD;
/* attempt to manually enumerate this device */
retStatus = isd200_manual_enum(us);
if (retStatus == ISD200_GOOD) {
int transferStatus;
/* check for an ATA device */
if (info->DeviceFlags & DF_ATA_DEVICE) {
/* this must be an ATA device */
/* perform an ATA Command Identify */
transferStatus = isd200_action( us, ACTION_IDENTIFY,
id, ATA_ID_WORDS * 2);
if (transferStatus != ISD200_TRANSPORT_GOOD) {
/* Error issuing ATA Command Identify */
usb_stor_dbg(us, " Error issuing ATA Command Identify\n");
retStatus = ISD200_ERROR;
} else {
/* ATA Command Identify successful */
int i;
__be16 *src;
__u16 *dest;
isd200_fix_driveid(id);
isd200_dump_driveid(us, id);
memset(&info->InquiryData, 0, sizeof(info->InquiryData));
/* Standard IDE interface only supports disks */
info->InquiryData.DeviceType = DIRECT_ACCESS_DEVICE;
/* The length must be at least 36 (5 + 31) */
info->InquiryData.AdditionalLength = 0x1F;
if (id[ATA_ID_COMMAND_SET_1] & COMMANDSET_MEDIA_STATUS) {
/* set the removable bit */
info->InquiryData.DeviceTypeModifier = DEVICE_REMOVABLE;
info->DeviceFlags |= DF_REMOVABLE_MEDIA;
}
/* Fill in vendor identification fields */
src = (__be16 *)&id[ATA_ID_PROD];
dest = (__u16*)info->InquiryData.VendorId;
for (i = 0; i < 4; i++)
dest[i] = be16_to_cpu(src[i]);
src = (__be16 *)&id[ATA_ID_PROD + 8/2];
dest = (__u16*)info->InquiryData.ProductId;
for (i=0;i<8;i++)
dest[i] = be16_to_cpu(src[i]);
src = (__be16 *)&id[ATA_ID_FW_REV];
dest = (__u16*)info->InquiryData.ProductRevisionLevel;
for (i=0;i<2;i++)
dest[i] = be16_to_cpu(src[i]);
/* determine if it supports Media Status Notification */
if (id[ATA_ID_COMMAND_SET_2] & COMMANDSET_MEDIA_STATUS) {
usb_stor_dbg(us, " Device supports Media Status Notification\n");
/*
* Indicate that it is enabled, even
* though it is not.
* This allows the lock/unlock of the
* media to work correctly.
*/
info->DeviceFlags |= DF_MEDIA_STATUS_ENABLED;
}
else
info->DeviceFlags &= ~DF_MEDIA_STATUS_ENABLED;
}
} else {
/*
* this must be an ATAPI device
* use an ATAPI protocol (Transparent SCSI)
*/
us->protocol_name = "Transparent SCSI";
us->proto_handler = usb_stor_transparent_scsi_command;
usb_stor_dbg(us, "Protocol changed to: %s\n",
us->protocol_name);
/* Free driver structure */
us->extra_destructor(info);
kfree(info);
us->extra = NULL;
us->extra_destructor = NULL;
}
}
usb_stor_dbg(us, "Leaving isd200_get_inquiry_data %08X\n", retStatus);
return(retStatus);
}
/**************************************************************************
* isd200_scsi_to_ata
*
* Translate SCSI commands to ATA commands.
*
* RETURNS:
* 1 if the command needs to be sent to the transport layer
* 0 otherwise
*/
static int isd200_scsi_to_ata(struct scsi_cmnd *srb, struct us_data *us,
union ata_cdb * ataCdb)
{
struct isd200_info *info = (struct isd200_info *)us->extra;
u16 *id = info->id;
int sendToTransport = 1;
unsigned char sectnum, head;
unsigned short cylinder;
unsigned long lba;
unsigned long blockCount;
unsigned char senseData[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
memset(ataCdb, 0, sizeof(union ata_cdb));
/* SCSI Command */
switch (srb->cmnd[0]) {
case INQUIRY:
usb_stor_dbg(us, " ATA OUT - INQUIRY\n");
/* copy InquiryData */
usb_stor_set_xfer_buf((unsigned char *) &info->InquiryData,
sizeof(info->InquiryData), srb);
srb->result = SAM_STAT_GOOD;
sendToTransport = 0;
break;
case MODE_SENSE:
usb_stor_dbg(us, " ATA OUT - SCSIOP_MODE_SENSE\n");
/* Initialize the return buffer */
usb_stor_set_xfer_buf(senseData, sizeof(senseData), srb);
if (info->DeviceFlags & DF_MEDIA_STATUS_ENABLED)
{
ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand;
ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand;
ataCdb->generic.TransferBlockSize = 1;
ataCdb->generic.RegisterSelect = REG_COMMAND;
ataCdb->write.CommandByte = ATA_COMMAND_GET_MEDIA_STATUS;
isd200_srb_set_bufflen(srb, 0);
} else {
usb_stor_dbg(us, " Media Status not supported, just report okay\n");
srb->result = SAM_STAT_GOOD;
sendToTransport = 0;
}
break;
case TEST_UNIT_READY:
usb_stor_dbg(us, " ATA OUT - SCSIOP_TEST_UNIT_READY\n");
if (info->DeviceFlags & DF_MEDIA_STATUS_ENABLED)
{
ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand;
ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand;
ataCdb->generic.TransferBlockSize = 1;
ataCdb->generic.RegisterSelect = REG_COMMAND;
ataCdb->write.CommandByte = ATA_COMMAND_GET_MEDIA_STATUS;
isd200_srb_set_bufflen(srb, 0);
} else {
usb_stor_dbg(us, " Media Status not supported, just report okay\n");
srb->result = SAM_STAT_GOOD;
sendToTransport = 0;
}
break;
case READ_CAPACITY:
{
unsigned long capacity;
struct read_capacity_data readCapacityData;
usb_stor_dbg(us, " ATA OUT - SCSIOP_READ_CAPACITY\n");
if (ata_id_has_lba(id))
capacity = ata_id_u32(id, ATA_ID_LBA_CAPACITY) - 1;
else
capacity = (id[ATA_ID_HEADS] * id[ATA_ID_CYLS] *
id[ATA_ID_SECTORS]) - 1;
readCapacityData.LogicalBlockAddress = cpu_to_be32(capacity);
readCapacityData.BytesPerBlock = cpu_to_be32(0x200);
usb_stor_set_xfer_buf((unsigned char *) &readCapacityData,
sizeof(readCapacityData), srb);
srb->result = SAM_STAT_GOOD;
sendToTransport = 0;
}
break;
case READ_10:
usb_stor_dbg(us, " ATA OUT - SCSIOP_READ\n");
lba = be32_to_cpu(*(__be32 *)&srb->cmnd[2]);
blockCount = (unsigned long)srb->cmnd[7]<<8 | (unsigned long)srb->cmnd[8];
if (ata_id_has_lba(id)) {
sectnum = (unsigned char)(lba);
cylinder = (unsigned short)(lba>>8);
head = ATA_ADDRESS_DEVHEAD_LBA_MODE | (unsigned char)(lba>>24 & 0x0F);
} else {
sectnum = (u8)((lba % id[ATA_ID_SECTORS]) + 1);
cylinder = (u16)(lba / (id[ATA_ID_SECTORS] *
id[ATA_ID_HEADS]));
head = (u8)((lba / id[ATA_ID_SECTORS]) %
id[ATA_ID_HEADS]);
}
ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand;
ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand;
ataCdb->generic.TransferBlockSize = 1;
ataCdb->generic.RegisterSelect =
REG_SECTOR_COUNT | REG_SECTOR_NUMBER |
REG_CYLINDER_LOW | REG_CYLINDER_HIGH |
REG_DEVICE_HEAD | REG_COMMAND;
ataCdb->write.SectorCountByte = (unsigned char)blockCount;
ataCdb->write.SectorNumberByte = sectnum;
ataCdb->write.CylinderHighByte = (unsigned char)(cylinder>>8);
ataCdb->write.CylinderLowByte = (unsigned char)cylinder;
ataCdb->write.DeviceHeadByte = (head | ATA_ADDRESS_DEVHEAD_STD);
ataCdb->write.CommandByte = ATA_CMD_PIO_READ;
break;
case WRITE_10:
usb_stor_dbg(us, " ATA OUT - SCSIOP_WRITE\n");
lba = be32_to_cpu(*(__be32 *)&srb->cmnd[2]);
blockCount = (unsigned long)srb->cmnd[7]<<8 | (unsigned long)srb->cmnd[8];
if (ata_id_has_lba(id)) {
sectnum = (unsigned char)(lba);
cylinder = (unsigned short)(lba>>8);
head = ATA_ADDRESS_DEVHEAD_LBA_MODE | (unsigned char)(lba>>24 & 0x0F);
} else {
sectnum = (u8)((lba % id[ATA_ID_SECTORS]) + 1);
cylinder = (u16)(lba / (id[ATA_ID_SECTORS] *
id[ATA_ID_HEADS]));
head = (u8)((lba / id[ATA_ID_SECTORS]) %
id[ATA_ID_HEADS]);
}
ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand;
ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand;
ataCdb->generic.TransferBlockSize = 1;
ataCdb->generic.RegisterSelect =
REG_SECTOR_COUNT | REG_SECTOR_NUMBER |
REG_CYLINDER_LOW | REG_CYLINDER_HIGH |
REG_DEVICE_HEAD | REG_COMMAND;
ataCdb->write.SectorCountByte = (unsigned char)blockCount;
ataCdb->write.SectorNumberByte = sectnum;
ataCdb->write.CylinderHighByte = (unsigned char)(cylinder>>8);
ataCdb->write.CylinderLowByte = (unsigned char)cylinder;
ataCdb->write.DeviceHeadByte = (head | ATA_ADDRESS_DEVHEAD_STD);
ataCdb->write.CommandByte = ATA_CMD_PIO_WRITE;
break;
case ALLOW_MEDIUM_REMOVAL:
usb_stor_dbg(us, " ATA OUT - SCSIOP_MEDIUM_REMOVAL\n");
if (info->DeviceFlags & DF_REMOVABLE_MEDIA) {
usb_stor_dbg(us, " srb->cmnd[4] = 0x%X\n",
srb->cmnd[4]);
ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand;
ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand;
ataCdb->generic.TransferBlockSize = 1;
ataCdb->generic.RegisterSelect = REG_COMMAND;
ataCdb->write.CommandByte = (srb->cmnd[4] & 0x1) ?
ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK;
isd200_srb_set_bufflen(srb, 0);
} else {
usb_stor_dbg(us, " Not removable media, just report okay\n");
srb->result = SAM_STAT_GOOD;
sendToTransport = 0;
}
break;
case START_STOP:
usb_stor_dbg(us, " ATA OUT - SCSIOP_START_STOP_UNIT\n");
usb_stor_dbg(us, " srb->cmnd[4] = 0x%X\n", srb->cmnd[4]);
if ((srb->cmnd[4] & 0x3) == 0x2) {
usb_stor_dbg(us, " Media Eject\n");
ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand;
ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand;
ataCdb->generic.TransferBlockSize = 0;
ataCdb->generic.RegisterSelect = REG_COMMAND;
ataCdb->write.CommandByte = ATA_COMMAND_MEDIA_EJECT;
} else if ((srb->cmnd[4] & 0x3) == 0x1) {
usb_stor_dbg(us, " Get Media Status\n");
ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand;
ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand;
ataCdb->generic.TransferBlockSize = 1;
ataCdb->generic.RegisterSelect = REG_COMMAND;
ataCdb->write.CommandByte = ATA_COMMAND_GET_MEDIA_STATUS;
isd200_srb_set_bufflen(srb, 0);
} else {
usb_stor_dbg(us, " Nothing to do, just report okay\n");
srb->result = SAM_STAT_GOOD;
sendToTransport = 0;
}
break;
default:
usb_stor_dbg(us, "Unsupported SCSI command - 0x%X\n",
srb->cmnd[0]);
srb->result = DID_ERROR << 16;
sendToTransport = 0;
break;
}
return(sendToTransport);
}
/**************************************************************************
* isd200_free_info
*
* Frees the driver structure.
*/
static void isd200_free_info_ptrs(void *info_)
{
struct isd200_info *info = (struct isd200_info *) info_;
if (info) {
kfree(info->id);
kfree(info->RegsBuf);
kfree(info->srb.sense_buffer);
}
}
/**************************************************************************
* isd200_init_info
*
* Allocates (if necessary) and initializes the driver structure.
*
* RETURNS:
* error status code
*/
static int isd200_init_info(struct us_data *us)
{
struct isd200_info *info;
info = kzalloc(sizeof(struct isd200_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->id = kzalloc(ATA_ID_WORDS * 2, GFP_KERNEL);
info->RegsBuf = kmalloc(sizeof(info->ATARegs), GFP_KERNEL);
info->srb.sense_buffer = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
if (!info->id || !info->RegsBuf || !info->srb.sense_buffer) {
isd200_free_info_ptrs(info);
kfree(info);
return -ENOMEM;
}
us->extra = info;
us->extra_destructor = isd200_free_info_ptrs;
return 0;
}
/**************************************************************************
* Initialization for the ISD200
*/
static int isd200_Initialization(struct us_data *us)
{
usb_stor_dbg(us, "ISD200 Initialization...\n");
/* Initialize ISD200 info struct */
if (isd200_init_info(us) == ISD200_ERROR) {
usb_stor_dbg(us, "ERROR Initializing ISD200 Info struct\n");
} else {
/* Get device specific data */
if (isd200_get_inquiry_data(us) != ISD200_GOOD)
usb_stor_dbg(us, "ISD200 Initialization Failure\n");
else
usb_stor_dbg(us, "ISD200 Initialization complete\n");
}
return 0;
}
/**************************************************************************
* Protocol and Transport for the ISD200 ASIC
*
* This protocol and transport are for ATA devices connected to an ISD200
* ASIC. An ATAPI device that is connected as a slave device will be
* detected in the driver initialization function and the protocol will
* be changed to an ATAPI protocol (Transparent SCSI).
*
*/
static void isd200_ata_command(struct scsi_cmnd *srb, struct us_data *us)
{
int sendToTransport, orig_bufflen;
union ata_cdb ataCdb;
/* Make sure driver was initialized */
if (us->extra == NULL) {
usb_stor_dbg(us, "ERROR Driver not initialized\n");
srb->result = DID_ERROR << 16;
return;
}
scsi_set_resid(srb, 0);
/* scsi_bufflen might change in protocol translation to ata */
orig_bufflen = scsi_bufflen(srb);
sendToTransport = isd200_scsi_to_ata(srb, us, &ataCdb);
/* send the command to the transport layer */
if (sendToTransport)
isd200_invoke_transport(us, srb, &ataCdb);
isd200_srb_set_bufflen(srb, orig_bufflen);
}
static struct scsi_host_template isd200_host_template;
static int isd200_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct us_data *us;
int result;
result = usb_stor_probe1(&us, intf, id,
(id - isd200_usb_ids) + isd200_unusual_dev_list,
&isd200_host_template);
if (result)
return result;
us->protocol_name = "ISD200 ATA/ATAPI";
us->proto_handler = isd200_ata_command;
result = usb_stor_probe2(us);
return result;
}
static struct usb_driver isd200_driver = {
.name = DRV_NAME,
.probe = isd200_probe,
.disconnect = usb_stor_disconnect,
.suspend = usb_stor_suspend,
.resume = usb_stor_resume,
.reset_resume = usb_stor_reset_resume,
.pre_reset = usb_stor_pre_reset,
.post_reset = usb_stor_post_reset,
.id_table = isd200_usb_ids,
.soft_unbind = 1,
.no_dynamic_id = 1,
};
module_usb_stor_driver(isd200_driver, isd200_host_template, DRV_NAME);
| linux-master | drivers/usb/storage/isd200.c |
// SPDX-License-Identifier: GPL-2.0
/*
* LED Triggers for USB Activity
*
* Copyright 2014 Michal Sojka <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/leds.h>
#include <linux/usb.h>
#include "common.h"
#define BLINK_DELAY 30
DEFINE_LED_TRIGGER(ledtrig_usb_gadget);
DEFINE_LED_TRIGGER(ledtrig_usb_host);
void usb_led_activity(enum usb_led_event ev)
{
struct led_trigger *trig = NULL;
switch (ev) {
case USB_LED_EVENT_GADGET:
trig = ledtrig_usb_gadget;
break;
case USB_LED_EVENT_HOST:
trig = ledtrig_usb_host;
break;
}
/* led_trigger_blink_oneshot() handles trig == NULL gracefully */
led_trigger_blink_oneshot(trig, BLINK_DELAY, BLINK_DELAY, 0);
}
EXPORT_SYMBOL_GPL(usb_led_activity);
void __init ledtrig_usb_init(void)
{
led_trigger_register_simple("usb-gadget", &ledtrig_usb_gadget);
led_trigger_register_simple("usb-host", &ledtrig_usb_host);
}
void __exit ledtrig_usb_exit(void)
{
led_trigger_unregister_simple(ledtrig_usb_gadget);
led_trigger_unregister_simple(ledtrig_usb_host);
}
| linux-master | drivers/usb/common/led.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Provides code common for host and device side USB.
*
* If either host side (ie. CONFIG_USB=y) or device side USB stack
* (ie. CONFIG_USB_GADGET=y) is compiled in the kernel, this module is
* compiled-in as well. Otherwise, if either of the two stacks is
* compiled as module, this file is compiled as module as well.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/of.h>
#include <linux/usb/otg.h>
#include <linux/of_platform.h>
#include <linux/debugfs.h>
#include "common.h"
static const char *const ep_type_names[] = {
[USB_ENDPOINT_XFER_CONTROL] = "ctrl",
[USB_ENDPOINT_XFER_ISOC] = "isoc",
[USB_ENDPOINT_XFER_BULK] = "bulk",
[USB_ENDPOINT_XFER_INT] = "intr",
};
/**
* usb_ep_type_string() - Returns human readable-name of the endpoint type.
* @ep_type: The endpoint type to return human-readable name for. If it's not
* any of the types: USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT},
* usually got by usb_endpoint_type(), the string 'unknown' will be returned.
*/
const char *usb_ep_type_string(int ep_type)
{
if (ep_type < 0 || ep_type >= ARRAY_SIZE(ep_type_names))
return "unknown";
return ep_type_names[ep_type];
}
EXPORT_SYMBOL_GPL(usb_ep_type_string);
const char *usb_otg_state_string(enum usb_otg_state state)
{
static const char *const names[] = {
[OTG_STATE_A_IDLE] = "a_idle",
[OTG_STATE_A_WAIT_VRISE] = "a_wait_vrise",
[OTG_STATE_A_WAIT_BCON] = "a_wait_bcon",
[OTG_STATE_A_HOST] = "a_host",
[OTG_STATE_A_SUSPEND] = "a_suspend",
[OTG_STATE_A_PERIPHERAL] = "a_peripheral",
[OTG_STATE_A_WAIT_VFALL] = "a_wait_vfall",
[OTG_STATE_A_VBUS_ERR] = "a_vbus_err",
[OTG_STATE_B_IDLE] = "b_idle",
[OTG_STATE_B_SRP_INIT] = "b_srp_init",
[OTG_STATE_B_PERIPHERAL] = "b_peripheral",
[OTG_STATE_B_WAIT_ACON] = "b_wait_acon",
[OTG_STATE_B_HOST] = "b_host",
};
if (state < 0 || state >= ARRAY_SIZE(names))
return "UNDEFINED";
return names[state];
}
EXPORT_SYMBOL_GPL(usb_otg_state_string);
static const char *const speed_names[] = {
[USB_SPEED_UNKNOWN] = "UNKNOWN",
[USB_SPEED_LOW] = "low-speed",
[USB_SPEED_FULL] = "full-speed",
[USB_SPEED_HIGH] = "high-speed",
[USB_SPEED_WIRELESS] = "wireless",
[USB_SPEED_SUPER] = "super-speed",
[USB_SPEED_SUPER_PLUS] = "super-speed-plus",
};
static const char *const ssp_rate[] = {
[USB_SSP_GEN_UNKNOWN] = "UNKNOWN",
[USB_SSP_GEN_2x1] = "super-speed-plus-gen2x1",
[USB_SSP_GEN_1x2] = "super-speed-plus-gen1x2",
[USB_SSP_GEN_2x2] = "super-speed-plus-gen2x2",
};
/**
* usb_speed_string() - Returns human readable-name of the speed.
* @speed: The speed to return human-readable name for. If it's not
* any of the speeds defined in usb_device_speed enum, string for
* USB_SPEED_UNKNOWN will be returned.
*/
const char *usb_speed_string(enum usb_device_speed speed)
{
if (speed < 0 || speed >= ARRAY_SIZE(speed_names))
speed = USB_SPEED_UNKNOWN;
return speed_names[speed];
}
EXPORT_SYMBOL_GPL(usb_speed_string);
/**
* usb_get_maximum_speed - Get maximum requested speed for a given USB
* controller.
* @dev: Pointer to the given USB controller device
*
* The function gets the maximum speed string from property "maximum-speed",
* and returns the corresponding enum usb_device_speed.
*/
enum usb_device_speed usb_get_maximum_speed(struct device *dev)
{
const char *maximum_speed;
int ret;
ret = device_property_read_string(dev, "maximum-speed", &maximum_speed);
if (ret < 0)
return USB_SPEED_UNKNOWN;
ret = match_string(ssp_rate, ARRAY_SIZE(ssp_rate), maximum_speed);
if (ret > 0)
return USB_SPEED_SUPER_PLUS;
ret = match_string(speed_names, ARRAY_SIZE(speed_names), maximum_speed);
return (ret < 0) ? USB_SPEED_UNKNOWN : ret;
}
EXPORT_SYMBOL_GPL(usb_get_maximum_speed);
/**
* usb_get_maximum_ssp_rate - Get the signaling rate generation and lane count
* of a SuperSpeed Plus capable device.
* @dev: Pointer to the given USB controller device
*
* If the string from "maximum-speed" property is super-speed-plus-genXxY where
* 'X' is the generation number and 'Y' is the number of lanes, then this
* function returns the corresponding enum usb_ssp_rate.
*/
enum usb_ssp_rate usb_get_maximum_ssp_rate(struct device *dev)
{
const char *maximum_speed;
int ret;
ret = device_property_read_string(dev, "maximum-speed", &maximum_speed);
if (ret < 0)
return USB_SSP_GEN_UNKNOWN;
ret = match_string(ssp_rate, ARRAY_SIZE(ssp_rate), maximum_speed);
return (ret < 0) ? USB_SSP_GEN_UNKNOWN : ret;
}
EXPORT_SYMBOL_GPL(usb_get_maximum_ssp_rate);
/**
* usb_state_string - Returns human readable name for the state.
* @state: The state to return a human-readable name for. If it's not
* any of the states devices in usb_device_state_string enum,
* the string UNKNOWN will be returned.
*/
const char *usb_state_string(enum usb_device_state state)
{
static const char *const names[] = {
[USB_STATE_NOTATTACHED] = "not attached",
[USB_STATE_ATTACHED] = "attached",
[USB_STATE_POWERED] = "powered",
[USB_STATE_RECONNECTING] = "reconnecting",
[USB_STATE_UNAUTHENTICATED] = "unauthenticated",
[USB_STATE_DEFAULT] = "default",
[USB_STATE_ADDRESS] = "addressed",
[USB_STATE_CONFIGURED] = "configured",
[USB_STATE_SUSPENDED] = "suspended",
};
if (state < 0 || state >= ARRAY_SIZE(names))
return "UNKNOWN";
return names[state];
}
EXPORT_SYMBOL_GPL(usb_state_string);
static const char *const usb_dr_modes[] = {
[USB_DR_MODE_UNKNOWN] = "",
[USB_DR_MODE_HOST] = "host",
[USB_DR_MODE_PERIPHERAL] = "peripheral",
[USB_DR_MODE_OTG] = "otg",
};
static enum usb_dr_mode usb_get_dr_mode_from_string(const char *str)
{
int ret;
ret = match_string(usb_dr_modes, ARRAY_SIZE(usb_dr_modes), str);
return (ret < 0) ? USB_DR_MODE_UNKNOWN : ret;
}
enum usb_dr_mode usb_get_dr_mode(struct device *dev)
{
const char *dr_mode;
int err;
err = device_property_read_string(dev, "dr_mode", &dr_mode);
if (err < 0)
return USB_DR_MODE_UNKNOWN;
return usb_get_dr_mode_from_string(dr_mode);
}
EXPORT_SYMBOL_GPL(usb_get_dr_mode);
/**
* usb_get_role_switch_default_mode - Get default mode for given device
* @dev: Pointer to the given device
*
* The function gets string from property 'role-switch-default-mode',
* and returns the corresponding enum usb_dr_mode.
*/
enum usb_dr_mode usb_get_role_switch_default_mode(struct device *dev)
{
const char *str;
int ret;
ret = device_property_read_string(dev, "role-switch-default-mode", &str);
if (ret < 0)
return USB_DR_MODE_UNKNOWN;
return usb_get_dr_mode_from_string(str);
}
EXPORT_SYMBOL_GPL(usb_get_role_switch_default_mode);
/**
* usb_decode_interval - Decode bInterval into the time expressed in 1us unit
* @epd: The descriptor of the endpoint
* @speed: The speed that the endpoint works as
*
* Function returns the interval expressed in 1us unit for servicing
* endpoint for data transfers.
*/
unsigned int usb_decode_interval(const struct usb_endpoint_descriptor *epd,
enum usb_device_speed speed)
{
unsigned int interval = 0;
switch (usb_endpoint_type(epd)) {
case USB_ENDPOINT_XFER_CONTROL:
/* uframes per NAK */
if (speed == USB_SPEED_HIGH)
interval = epd->bInterval;
break;
case USB_ENDPOINT_XFER_ISOC:
interval = 1 << (epd->bInterval - 1);
break;
case USB_ENDPOINT_XFER_BULK:
/* uframes per NAK */
if (speed == USB_SPEED_HIGH && usb_endpoint_dir_out(epd))
interval = epd->bInterval;
break;
case USB_ENDPOINT_XFER_INT:
if (speed >= USB_SPEED_HIGH)
interval = 1 << (epd->bInterval - 1);
else
interval = epd->bInterval;
break;
}
interval *= (speed >= USB_SPEED_HIGH) ? 125 : 1000;
return interval;
}
EXPORT_SYMBOL_GPL(usb_decode_interval);
#ifdef CONFIG_OF
/**
* of_usb_get_dr_mode_by_phy - Get dual role mode for the controller device
* which is associated with the given phy device_node
* @np: Pointer to the given phy device_node
* @arg0: phandle args[0] for phy's with #phy-cells >= 1, or -1 for
* phys which do not have phy-cells
*
* In dts a usb controller associates with phy devices. The function gets
* the string from property 'dr_mode' of the controller associated with the
* given phy device node, and returns the correspondig enum usb_dr_mode.
*/
enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
{
struct device_node *controller = NULL;
struct of_phandle_args args;
const char *dr_mode;
int index;
int err;
do {
controller = of_find_node_with_property(controller, "phys");
if (!of_device_is_available(controller))
continue;
index = 0;
do {
if (arg0 == -1) {
args.np = of_parse_phandle(controller, "phys",
index);
args.args_count = 0;
} else {
err = of_parse_phandle_with_args(controller,
"phys", "#phy-cells",
index, &args);
if (err)
break;
}
of_node_put(args.np);
if (args.np == np && (args.args_count == 0 ||
args.args[0] == arg0))
goto finish;
index++;
} while (args.np);
} while (controller);
finish:
err = of_property_read_string(controller, "dr_mode", &dr_mode);
of_node_put(controller);
if (err < 0)
return USB_DR_MODE_UNKNOWN;
return usb_get_dr_mode_from_string(dr_mode);
}
EXPORT_SYMBOL_GPL(of_usb_get_dr_mode_by_phy);
/**
* of_usb_host_tpl_support - to get if Targeted Peripheral List is supported
* for given targeted hosts (non-PC hosts)
* @np: Pointer to the given device_node
*
* The function gets if the targeted hosts support TPL or not
*/
bool of_usb_host_tpl_support(struct device_node *np)
{
return of_property_read_bool(np, "tpl-support");
}
EXPORT_SYMBOL_GPL(of_usb_host_tpl_support);
/**
* of_usb_update_otg_caps - to update usb otg capabilities according to
* the passed properties in DT.
* @np: Pointer to the given device_node
* @otg_caps: Pointer to the target usb_otg_caps to be set
*
* The function updates the otg capabilities
*/
int of_usb_update_otg_caps(struct device_node *np,
struct usb_otg_caps *otg_caps)
{
u32 otg_rev;
if (!otg_caps)
return -EINVAL;
if (!of_property_read_u32(np, "otg-rev", &otg_rev)) {
switch (otg_rev) {
case 0x0100:
case 0x0120:
case 0x0130:
case 0x0200:
/* Choose the lesser one if it's already been set */
if (otg_caps->otg_rev)
otg_caps->otg_rev = min_t(u16, otg_rev,
otg_caps->otg_rev);
else
otg_caps->otg_rev = otg_rev;
break;
default:
pr_err("%pOF: unsupported otg-rev: 0x%x\n",
np, otg_rev);
return -EINVAL;
}
} else {
/*
* otg-rev is mandatory for otg properties, if not passed
* we set it to be 0 and assume it's a legacy otg device.
* Non-dt platform can set it afterwards.
*/
otg_caps->otg_rev = 0;
}
if (of_property_read_bool(np, "hnp-disable"))
otg_caps->hnp_support = false;
if (of_property_read_bool(np, "srp-disable"))
otg_caps->srp_support = false;
if (of_property_read_bool(np, "adp-disable") ||
(otg_caps->otg_rev < 0x0200))
otg_caps->adp_support = false;
return 0;
}
EXPORT_SYMBOL_GPL(of_usb_update_otg_caps);
/**
* usb_of_get_companion_dev - Find the companion device
* @dev: the device pointer to find a companion
*
* Find the companion device from platform bus.
*
* Takes a reference to the returned struct device which needs to be dropped
* after use.
*
* Return: On success, a pointer to the companion device, %NULL on failure.
*/
struct device *usb_of_get_companion_dev(struct device *dev)
{
struct device_node *node;
struct platform_device *pdev = NULL;
node = of_parse_phandle(dev->of_node, "companion", 0);
if (node)
pdev = of_find_device_by_node(node);
of_node_put(node);
return pdev ? &pdev->dev : NULL;
}
EXPORT_SYMBOL_GPL(usb_of_get_companion_dev);
#endif
struct dentry *usb_debug_root;
EXPORT_SYMBOL_GPL(usb_debug_root);
static int __init usb_common_init(void)
{
usb_debug_root = debugfs_create_dir("usb", NULL);
ledtrig_usb_init();
return 0;
}
static void __exit usb_common_exit(void)
{
ledtrig_usb_exit();
debugfs_remove_recursive(usb_debug_root);
}
subsys_initcall(usb_common_init);
module_exit(usb_common_exit);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/common/common.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ulpi.c - USB ULPI PHY bus
*
* Copyright (C) 2015 Intel Corporation
*
* Author: Heikki Krogerus <[email protected]>
*/
#include <linux/ulpi/interface.h>
#include <linux/ulpi/driver.h>
#include <linux/ulpi/regs.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/debugfs.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/clk/clk-conf.h>
/* -------------------------------------------------------------------------- */
int ulpi_read(struct ulpi *ulpi, u8 addr)
{
return ulpi->ops->read(ulpi->dev.parent, addr);
}
EXPORT_SYMBOL_GPL(ulpi_read);
int ulpi_write(struct ulpi *ulpi, u8 addr, u8 val)
{
return ulpi->ops->write(ulpi->dev.parent, addr, val);
}
EXPORT_SYMBOL_GPL(ulpi_write);
/* -------------------------------------------------------------------------- */
static int ulpi_match(struct device *dev, struct device_driver *driver)
{
struct ulpi_driver *drv = to_ulpi_driver(driver);
struct ulpi *ulpi = to_ulpi_dev(dev);
const struct ulpi_device_id *id;
/*
* Some ULPI devices don't have a vendor id
* or provide an id_table so rely on OF match.
*/
if (ulpi->id.vendor == 0 || !drv->id_table)
return of_driver_match_device(dev, driver);
for (id = drv->id_table; id->vendor; id++)
if (id->vendor == ulpi->id.vendor &&
id->product == ulpi->id.product)
return 1;
return 0;
}
static int ulpi_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct ulpi *ulpi = to_ulpi_dev(dev);
int ret;
ret = of_device_uevent_modalias(dev, env);
if (ret != -ENODEV)
return ret;
if (add_uevent_var(env, "MODALIAS=ulpi:v%04xp%04x",
ulpi->id.vendor, ulpi->id.product))
return -ENOMEM;
return 0;
}
static int ulpi_probe(struct device *dev)
{
struct ulpi_driver *drv = to_ulpi_driver(dev->driver);
int ret;
ret = of_clk_set_defaults(dev->of_node, false);
if (ret < 0)
return ret;
return drv->probe(to_ulpi_dev(dev));
}
static void ulpi_remove(struct device *dev)
{
struct ulpi_driver *drv = to_ulpi_driver(dev->driver);
if (drv->remove)
drv->remove(to_ulpi_dev(dev));
}
static const struct bus_type ulpi_bus = {
.name = "ulpi",
.match = ulpi_match,
.uevent = ulpi_uevent,
.probe = ulpi_probe,
.remove = ulpi_remove,
};
/* -------------------------------------------------------------------------- */
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int len;
struct ulpi *ulpi = to_ulpi_dev(dev);
len = of_device_modalias(dev, buf, PAGE_SIZE);
if (len != -ENODEV)
return len;
return sprintf(buf, "ulpi:v%04xp%04x\n",
ulpi->id.vendor, ulpi->id.product);
}
static DEVICE_ATTR_RO(modalias);
static struct attribute *ulpi_dev_attrs[] = {
&dev_attr_modalias.attr,
NULL
};
static const struct attribute_group ulpi_dev_attr_group = {
.attrs = ulpi_dev_attrs,
};
static const struct attribute_group *ulpi_dev_attr_groups[] = {
&ulpi_dev_attr_group,
NULL
};
static void ulpi_dev_release(struct device *dev)
{
of_node_put(dev->of_node);
kfree(to_ulpi_dev(dev));
}
static const struct device_type ulpi_dev_type = {
.name = "ulpi_device",
.groups = ulpi_dev_attr_groups,
.release = ulpi_dev_release,
};
/* -------------------------------------------------------------------------- */
/**
* __ulpi_register_driver - register a driver with the ULPI bus
* @drv: driver being registered
* @module: ends up being THIS_MODULE
*
* Registers a driver with the ULPI bus.
*/
int __ulpi_register_driver(struct ulpi_driver *drv, struct module *module)
{
if (!drv->probe)
return -EINVAL;
drv->driver.owner = module;
drv->driver.bus = &ulpi_bus;
return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(__ulpi_register_driver);
/**
* ulpi_unregister_driver - unregister a driver with the ULPI bus
* @drv: driver to unregister
*
* Unregisters a driver with the ULPI bus.
*/
void ulpi_unregister_driver(struct ulpi_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL_GPL(ulpi_unregister_driver);
/* -------------------------------------------------------------------------- */
static int ulpi_of_register(struct ulpi *ulpi)
{
struct device_node *np = NULL, *child;
struct device *parent;
/* Find a ulpi bus underneath the parent or the grandparent */
parent = ulpi->dev.parent;
if (parent->of_node)
np = of_get_child_by_name(parent->of_node, "ulpi");
else if (parent->parent && parent->parent->of_node)
np = of_get_child_by_name(parent->parent->of_node, "ulpi");
if (!np)
return 0;
child = of_get_next_available_child(np, NULL);
of_node_put(np);
if (!child)
return -EINVAL;
ulpi->dev.of_node = child;
return 0;
}
static int ulpi_read_id(struct ulpi *ulpi)
{
int ret;
/* Test the interface */
ret = ulpi_write(ulpi, ULPI_SCRATCH, 0xaa);
if (ret < 0)
goto err;
ret = ulpi_read(ulpi, ULPI_SCRATCH);
if (ret < 0)
return ret;
if (ret != 0xaa)
goto err;
ulpi->id.vendor = ulpi_read(ulpi, ULPI_VENDOR_ID_LOW);
ulpi->id.vendor |= ulpi_read(ulpi, ULPI_VENDOR_ID_HIGH) << 8;
ulpi->id.product = ulpi_read(ulpi, ULPI_PRODUCT_ID_LOW);
ulpi->id.product |= ulpi_read(ulpi, ULPI_PRODUCT_ID_HIGH) << 8;
/* Some ULPI devices don't have a vendor id so rely on OF match */
if (ulpi->id.vendor == 0)
goto err;
request_module("ulpi:v%04xp%04x", ulpi->id.vendor, ulpi->id.product);
return 0;
err:
of_request_module(ulpi->dev.of_node);
return 0;
}
static int ulpi_regs_show(struct seq_file *seq, void *data)
{
struct ulpi *ulpi = seq->private;
#define ulpi_print(name, reg) do { \
int ret = ulpi_read(ulpi, reg); \
if (ret < 0) \
return ret; \
seq_printf(seq, name " %.02x\n", ret); \
} while (0)
ulpi_print("Vendor ID Low ", ULPI_VENDOR_ID_LOW);
ulpi_print("Vendor ID High ", ULPI_VENDOR_ID_HIGH);
ulpi_print("Product ID Low ", ULPI_PRODUCT_ID_LOW);
ulpi_print("Product ID High ", ULPI_PRODUCT_ID_HIGH);
ulpi_print("Function Control ", ULPI_FUNC_CTRL);
ulpi_print("Interface Control ", ULPI_IFC_CTRL);
ulpi_print("OTG Control ", ULPI_OTG_CTRL);
ulpi_print("USB Interrupt Enable Rising ", ULPI_USB_INT_EN_RISE);
ulpi_print("USB Interrupt Enable Falling", ULPI_USB_INT_EN_FALL);
ulpi_print("USB Interrupt Status ", ULPI_USB_INT_STS);
ulpi_print("USB Interrupt Latch ", ULPI_USB_INT_LATCH);
ulpi_print("Debug ", ULPI_DEBUG);
ulpi_print("Scratch Register ", ULPI_SCRATCH);
ulpi_print("Carkit Control ", ULPI_CARKIT_CTRL);
ulpi_print("Carkit Interrupt Delay ", ULPI_CARKIT_INT_DELAY);
ulpi_print("Carkit Interrupt Enable ", ULPI_CARKIT_INT_EN);
ulpi_print("Carkit Interrupt Status ", ULPI_CARKIT_INT_STS);
ulpi_print("Carkit Interrupt Latch ", ULPI_CARKIT_INT_LATCH);
ulpi_print("Carkit Pulse Control ", ULPI_CARKIT_PLS_CTRL);
ulpi_print("Transmit Positive Width ", ULPI_TX_POS_WIDTH);
ulpi_print("Transmit Negative Width ", ULPI_TX_NEG_WIDTH);
ulpi_print("Receive Polarity Recovery ", ULPI_POLARITY_RECOVERY);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ulpi_regs);
static struct dentry *ulpi_root;
static int ulpi_register(struct device *dev, struct ulpi *ulpi)
{
int ret;
struct dentry *root;
ulpi->dev.parent = dev; /* needed early for ops */
ulpi->dev.bus = &ulpi_bus;
ulpi->dev.type = &ulpi_dev_type;
dev_set_name(&ulpi->dev, "%s.ulpi", dev_name(dev));
ACPI_COMPANION_SET(&ulpi->dev, ACPI_COMPANION(dev));
ret = ulpi_of_register(ulpi);
if (ret)
return ret;
ret = ulpi_read_id(ulpi);
if (ret) {
of_node_put(ulpi->dev.of_node);
return ret;
}
ret = device_register(&ulpi->dev);
if (ret) {
put_device(&ulpi->dev);
return ret;
}
root = debugfs_create_dir(dev_name(dev), ulpi_root);
debugfs_create_file("regs", 0444, root, ulpi, &ulpi_regs_fops);
dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n",
ulpi->id.vendor, ulpi->id.product);
return 0;
}
/**
* ulpi_register_interface - instantiate new ULPI device
* @dev: USB controller's device interface
* @ops: ULPI register access
*
* Allocates and registers a ULPI device and an interface for it. Called from
* the USB controller that provides the ULPI interface.
*/
struct ulpi *ulpi_register_interface(struct device *dev,
const struct ulpi_ops *ops)
{
struct ulpi *ulpi;
int ret;
ulpi = kzalloc(sizeof(*ulpi), GFP_KERNEL);
if (!ulpi)
return ERR_PTR(-ENOMEM);
ulpi->ops = ops;
ret = ulpi_register(dev, ulpi);
if (ret) {
kfree(ulpi);
return ERR_PTR(ret);
}
return ulpi;
}
EXPORT_SYMBOL_GPL(ulpi_register_interface);
/**
* ulpi_unregister_interface - unregister ULPI interface
* @ulpi: struct ulpi_interface
*
* Unregisters a ULPI device and it's interface that was created with
* ulpi_create_interface().
*/
void ulpi_unregister_interface(struct ulpi *ulpi)
{
debugfs_lookup_and_remove(dev_name(&ulpi->dev), ulpi_root);
device_unregister(&ulpi->dev);
}
EXPORT_SYMBOL_GPL(ulpi_unregister_interface);
/* -------------------------------------------------------------------------- */
static int __init ulpi_init(void)
{
int ret;
ulpi_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
ret = bus_register(&ulpi_bus);
if (ret)
debugfs_remove(ulpi_root);
return ret;
}
subsys_initcall(ulpi_init);
static void __exit ulpi_exit(void)
{
bus_unregister(&ulpi_bus);
debugfs_remove(ulpi_root);
}
module_exit(ulpi_exit);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("USB ULPI PHY bus");
| linux-master | drivers/usb/common/ulpi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Common USB debugging functions
*
* Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
*
* Authors: Felipe Balbi <[email protected]>,
* Sebastian Andrzej Siewior <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/usb/ch9.h>
static void usb_decode_get_status(__u8 bRequestType, __u16 wIndex,
__u16 wLength, char *str, size_t size)
{
switch (bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
snprintf(str, size, "Get Device Status(Length = %d)", wLength);
break;
case USB_RECIP_INTERFACE:
snprintf(str, size,
"Get Interface Status(Intf = %d, Length = %d)",
wIndex, wLength);
break;
case USB_RECIP_ENDPOINT:
snprintf(str, size, "Get Endpoint Status(ep%d%s)",
wIndex & ~USB_DIR_IN,
wIndex & USB_DIR_IN ? "in" : "out");
break;
}
}
static const char *usb_decode_device_feature(u16 wValue)
{
switch (wValue) {
case USB_DEVICE_SELF_POWERED:
return "Self Powered";
case USB_DEVICE_REMOTE_WAKEUP:
return "Remote Wakeup";
case USB_DEVICE_TEST_MODE:
return "Test Mode";
case USB_DEVICE_U1_ENABLE:
return "U1 Enable";
case USB_DEVICE_U2_ENABLE:
return "U2 Enable";
case USB_DEVICE_LTM_ENABLE:
return "LTM Enable";
default:
return "UNKNOWN";
}
}
static const char *usb_decode_test_mode(u16 wIndex)
{
switch (wIndex) {
case USB_TEST_J:
return ": TEST_J";
case USB_TEST_K:
return ": TEST_K";
case USB_TEST_SE0_NAK:
return ": TEST_SE0_NAK";
case USB_TEST_PACKET:
return ": TEST_PACKET";
case USB_TEST_FORCE_ENABLE:
return ": TEST_FORCE_EN";
default:
return ": UNKNOWN";
}
}
static void usb_decode_set_clear_feature(__u8 bRequestType,
__u8 bRequest, __u16 wValue,
__u16 wIndex, char *str, size_t size)
{
switch (bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
snprintf(str, size, "%s Device Feature(%s%s)",
bRequest == USB_REQ_CLEAR_FEATURE ? "Clear" : "Set",
usb_decode_device_feature(wValue),
wValue == USB_DEVICE_TEST_MODE ?
usb_decode_test_mode(wIndex) : "");
break;
case USB_RECIP_INTERFACE:
snprintf(str, size, "%s Interface Feature(%s)",
bRequest == USB_REQ_CLEAR_FEATURE ? "Clear" : "Set",
wValue == USB_INTRF_FUNC_SUSPEND ?
"Function Suspend" : "UNKNOWN");
break;
case USB_RECIP_ENDPOINT:
snprintf(str, size, "%s Endpoint Feature(%s ep%d%s)",
bRequest == USB_REQ_CLEAR_FEATURE ? "Clear" : "Set",
wValue == USB_ENDPOINT_HALT ? "Halt" : "UNKNOWN",
wIndex & ~USB_DIR_IN,
wIndex & USB_DIR_IN ? "in" : "out");
break;
}
}
static void usb_decode_set_address(__u16 wValue, char *str, size_t size)
{
snprintf(str, size, "Set Address(Addr = %02x)", wValue);
}
static void usb_decode_get_set_descriptor(__u8 bRequestType, __u8 bRequest,
__u16 wValue, __u16 wIndex,
__u16 wLength, char *str, size_t size)
{
char *s;
switch (wValue >> 8) {
case USB_DT_DEVICE:
s = "Device";
break;
case USB_DT_CONFIG:
s = "Configuration";
break;
case USB_DT_STRING:
s = "String";
break;
case USB_DT_INTERFACE:
s = "Interface";
break;
case USB_DT_ENDPOINT:
s = "Endpoint";
break;
case USB_DT_DEVICE_QUALIFIER:
s = "Device Qualifier";
break;
case USB_DT_OTHER_SPEED_CONFIG:
s = "Other Speed Config";
break;
case USB_DT_INTERFACE_POWER:
s = "Interface Power";
break;
case USB_DT_OTG:
s = "OTG";
break;
case USB_DT_DEBUG:
s = "Debug";
break;
case USB_DT_INTERFACE_ASSOCIATION:
s = "Interface Association";
break;
case USB_DT_BOS:
s = "BOS";
break;
case USB_DT_DEVICE_CAPABILITY:
s = "Device Capability";
break;
case USB_DT_PIPE_USAGE:
s = "Pipe Usage";
break;
case USB_DT_SS_ENDPOINT_COMP:
s = "SS Endpoint Companion";
break;
case USB_DT_SSP_ISOC_ENDPOINT_COMP:
s = "SSP Isochronous Endpoint Companion";
break;
default:
s = "UNKNOWN";
break;
}
snprintf(str, size, "%s %s Descriptor(Index = %d, Length = %d)",
bRequest == USB_REQ_GET_DESCRIPTOR ? "Get" : "Set",
s, wValue & 0xff, wLength);
}
static void usb_decode_get_configuration(__u16 wLength, char *str, size_t size)
{
snprintf(str, size, "Get Configuration(Length = %d)", wLength);
}
static void usb_decode_set_configuration(__u8 wValue, char *str, size_t size)
{
snprintf(str, size, "Set Configuration(Config = %d)", wValue);
}
static void usb_decode_get_intf(__u16 wIndex, __u16 wLength, char *str,
size_t size)
{
snprintf(str, size, "Get Interface(Intf = %d, Length = %d)",
wIndex, wLength);
}
static void usb_decode_set_intf(__u8 wValue, __u16 wIndex, char *str,
size_t size)
{
snprintf(str, size, "Set Interface(Intf = %d, Alt.Setting = %d)",
wIndex, wValue);
}
static void usb_decode_synch_frame(__u16 wIndex, __u16 wLength,
char *str, size_t size)
{
snprintf(str, size, "Synch Frame(Endpoint = %d, Length = %d)",
wIndex, wLength);
}
static void usb_decode_set_sel(__u16 wLength, char *str, size_t size)
{
snprintf(str, size, "Set SEL(Length = %d)", wLength);
}
static void usb_decode_set_isoch_delay(__u8 wValue, char *str, size_t size)
{
snprintf(str, size, "Set Isochronous Delay(Delay = %d ns)", wValue);
}
static void usb_decode_ctrl_generic(char *str, size_t size, __u8 bRequestType,
__u8 bRequest, __u16 wValue, __u16 wIndex,
__u16 wLength)
{
u8 recip = bRequestType & USB_RECIP_MASK;
u8 type = bRequestType & USB_TYPE_MASK;
snprintf(str, size,
"Type=%s Recipient=%s Dir=%s bRequest=%u wValue=%u wIndex=%u wLength=%u",
(type == USB_TYPE_STANDARD) ? "Standard" :
(type == USB_TYPE_VENDOR) ? "Vendor" :
(type == USB_TYPE_CLASS) ? "Class" : "Unknown",
(recip == USB_RECIP_DEVICE) ? "Device" :
(recip == USB_RECIP_INTERFACE) ? "Interface" :
(recip == USB_RECIP_ENDPOINT) ? "Endpoint" : "Unknown",
(bRequestType & USB_DIR_IN) ? "IN" : "OUT",
bRequest, wValue, wIndex, wLength);
}
static void usb_decode_ctrl_standard(char *str, size_t size, __u8 bRequestType,
__u8 bRequest, __u16 wValue, __u16 wIndex,
__u16 wLength)
{
switch (bRequest) {
case USB_REQ_GET_STATUS:
usb_decode_get_status(bRequestType, wIndex, wLength, str, size);
break;
case USB_REQ_CLEAR_FEATURE:
case USB_REQ_SET_FEATURE:
usb_decode_set_clear_feature(bRequestType, bRequest, wValue,
wIndex, str, size);
break;
case USB_REQ_SET_ADDRESS:
usb_decode_set_address(wValue, str, size);
break;
case USB_REQ_GET_DESCRIPTOR:
case USB_REQ_SET_DESCRIPTOR:
usb_decode_get_set_descriptor(bRequestType, bRequest, wValue,
wIndex, wLength, str, size);
break;
case USB_REQ_GET_CONFIGURATION:
usb_decode_get_configuration(wLength, str, size);
break;
case USB_REQ_SET_CONFIGURATION:
usb_decode_set_configuration(wValue, str, size);
break;
case USB_REQ_GET_INTERFACE:
usb_decode_get_intf(wIndex, wLength, str, size);
break;
case USB_REQ_SET_INTERFACE:
usb_decode_set_intf(wValue, wIndex, str, size);
break;
case USB_REQ_SYNCH_FRAME:
usb_decode_synch_frame(wIndex, wLength, str, size);
break;
case USB_REQ_SET_SEL:
usb_decode_set_sel(wLength, str, size);
break;
case USB_REQ_SET_ISOCH_DELAY:
usb_decode_set_isoch_delay(wValue, str, size);
break;
default:
usb_decode_ctrl_generic(str, size, bRequestType, bRequest,
wValue, wIndex, wLength);
break;
}
}
/**
* usb_decode_ctrl - Returns human readable representation of control request.
* @str: buffer to return a human-readable representation of control request.
* This buffer should have about 200 bytes.
* @size: size of str buffer.
* @bRequestType: matches the USB bmRequestType field
* @bRequest: matches the USB bRequest field
* @wValue: matches the USB wValue field (CPU byte order)
* @wIndex: matches the USB wIndex field (CPU byte order)
* @wLength: matches the USB wLength field (CPU byte order)
*
* Function returns decoded, formatted and human-readable description of
* control request packet.
*
* The usage scenario for this is for tracepoints, so function as a return
* use the same value as in parameters. This approach allows to use this
* function in TP_printk
*
* Important: wValue, wIndex, wLength parameters before invoking this function
* should be processed by le16_to_cpu macro.
*/
const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType,
__u8 bRequest, __u16 wValue, __u16 wIndex,
__u16 wLength)
{
switch (bRequestType & USB_TYPE_MASK) {
case USB_TYPE_STANDARD:
usb_decode_ctrl_standard(str, size, bRequestType, bRequest,
wValue, wIndex, wLength);
break;
case USB_TYPE_VENDOR:
case USB_TYPE_CLASS:
default:
usb_decode_ctrl_generic(str, size, bRequestType, bRequest,
wValue, wIndex, wLength);
break;
}
return str;
}
EXPORT_SYMBOL_GPL(usb_decode_ctrl);
| linux-master | drivers/usb/common/debug.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB GPIO Based Connection Detection Driver
*
* Copyright (C) 2019 MediaTek Inc.
*
* Author: Chunfeng Yun <[email protected]>
*
* Some code borrowed from drivers/extcon/extcon-usb-gpio.c
*/
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/regulator/consumer.h>
#include <linux/usb/role.h>
#define USB_GPIO_DEB_MS 20 /* ms */
#define USB_GPIO_DEB_US ((USB_GPIO_DEB_MS) * 1000) /* us */
#define USB_CONN_IRQF \
(IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT)
struct usb_conn_info {
struct device *dev;
struct usb_role_switch *role_sw;
enum usb_role last_role;
struct regulator *vbus;
struct delayed_work dw_det;
unsigned long debounce_jiffies;
struct gpio_desc *id_gpiod;
struct gpio_desc *vbus_gpiod;
int id_irq;
int vbus_irq;
struct power_supply_desc desc;
struct power_supply *charger;
bool initial_detection;
};
/*
* "DEVICE" = VBUS and "HOST" = !ID, so we have:
* Both "DEVICE" and "HOST" can't be set as active at the same time
* so if "HOST" is active (i.e. ID is 0) we keep "DEVICE" inactive
* even if VBUS is on.
*
* Role | ID | VBUS
* ------------------------------------
* [1] DEVICE | H | H
* [2] NONE | H | L
* [3] HOST | L | H
* [4] HOST | L | L
*
* In case we have only one of these signals:
* - VBUS only - we want to distinguish between [1] and [2], so ID is always 1
* - ID only - we want to distinguish between [1] and [4], so VBUS = ID
*/
static void usb_conn_detect_cable(struct work_struct *work)
{
struct usb_conn_info *info;
enum usb_role role;
int id, vbus, ret;
info = container_of(to_delayed_work(work),
struct usb_conn_info, dw_det);
/* check ID and VBUS */
id = info->id_gpiod ?
gpiod_get_value_cansleep(info->id_gpiod) : 1;
vbus = info->vbus_gpiod ?
gpiod_get_value_cansleep(info->vbus_gpiod) : id;
if (!id)
role = USB_ROLE_HOST;
else if (vbus)
role = USB_ROLE_DEVICE;
else
role = USB_ROLE_NONE;
dev_dbg(info->dev, "role %s -> %s, gpios: id %d, vbus %d\n",
usb_role_string(info->last_role), usb_role_string(role), id, vbus);
if (!info->initial_detection && info->last_role == role) {
dev_warn(info->dev, "repeated role: %s\n", usb_role_string(role));
return;
}
info->initial_detection = false;
if (info->last_role == USB_ROLE_HOST && info->vbus)
regulator_disable(info->vbus);
ret = usb_role_switch_set_role(info->role_sw, role);
if (ret)
dev_err(info->dev, "failed to set role: %d\n", ret);
if (role == USB_ROLE_HOST && info->vbus) {
ret = regulator_enable(info->vbus);
if (ret)
dev_err(info->dev, "enable vbus regulator failed\n");
}
info->last_role = role;
if (info->vbus)
dev_dbg(info->dev, "vbus regulator is %s\n",
regulator_is_enabled(info->vbus) ? "enabled" : "disabled");
power_supply_changed(info->charger);
}
static void usb_conn_queue_dwork(struct usb_conn_info *info,
unsigned long delay)
{
queue_delayed_work(system_power_efficient_wq, &info->dw_det, delay);
}
static irqreturn_t usb_conn_isr(int irq, void *dev_id)
{
struct usb_conn_info *info = dev_id;
usb_conn_queue_dwork(info, info->debounce_jiffies);
return IRQ_HANDLED;
}
static enum power_supply_property usb_charger_properties[] = {
POWER_SUPPLY_PROP_ONLINE,
};
static int usb_charger_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct usb_conn_info *info = power_supply_get_drvdata(psy);
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
val->intval = info->last_role == USB_ROLE_DEVICE;
break;
default:
return -EINVAL;
}
return 0;
}
static int usb_conn_psy_register(struct usb_conn_info *info)
{
struct device *dev = info->dev;
struct power_supply_desc *desc = &info->desc;
struct power_supply_config cfg = {
.of_node = dev->of_node,
};
desc->name = "usb-charger";
desc->properties = usb_charger_properties;
desc->num_properties = ARRAY_SIZE(usb_charger_properties);
desc->get_property = usb_charger_get_property;
desc->type = POWER_SUPPLY_TYPE_USB;
cfg.drv_data = info;
info->charger = devm_power_supply_register(dev, desc, &cfg);
if (IS_ERR(info->charger))
dev_err(dev, "Unable to register charger\n");
return PTR_ERR_OR_ZERO(info->charger);
}
static int usb_conn_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct usb_conn_info *info;
int ret = 0;
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->dev = dev;
info->id_gpiod = devm_gpiod_get_optional(dev, "id", GPIOD_IN);
if (IS_ERR(info->id_gpiod))
return PTR_ERR(info->id_gpiod);
info->vbus_gpiod = devm_gpiod_get_optional(dev, "vbus", GPIOD_IN);
if (IS_ERR(info->vbus_gpiod))
return PTR_ERR(info->vbus_gpiod);
if (!info->id_gpiod && !info->vbus_gpiod) {
dev_err(dev, "failed to get gpios\n");
return -ENODEV;
}
if (info->id_gpiod)
ret = gpiod_set_debounce(info->id_gpiod, USB_GPIO_DEB_US);
if (!ret && info->vbus_gpiod)
ret = gpiod_set_debounce(info->vbus_gpiod, USB_GPIO_DEB_US);
if (ret < 0)
info->debounce_jiffies = msecs_to_jiffies(USB_GPIO_DEB_MS);
INIT_DELAYED_WORK(&info->dw_det, usb_conn_detect_cable);
info->vbus = devm_regulator_get_optional(dev, "vbus");
if (PTR_ERR(info->vbus) == -ENODEV)
info->vbus = NULL;
if (IS_ERR(info->vbus))
return dev_err_probe(dev, PTR_ERR(info->vbus), "failed to get vbus\n");
info->role_sw = usb_role_switch_get(dev);
if (IS_ERR(info->role_sw))
return dev_err_probe(dev, PTR_ERR(info->role_sw),
"failed to get role switch\n");
ret = usb_conn_psy_register(info);
if (ret)
goto put_role_sw;
if (info->id_gpiod) {
info->id_irq = gpiod_to_irq(info->id_gpiod);
if (info->id_irq < 0) {
dev_err(dev, "failed to get ID IRQ\n");
ret = info->id_irq;
goto put_role_sw;
}
ret = devm_request_threaded_irq(dev, info->id_irq, NULL,
usb_conn_isr, USB_CONN_IRQF,
pdev->name, info);
if (ret < 0) {
dev_err(dev, "failed to request ID IRQ\n");
goto put_role_sw;
}
}
if (info->vbus_gpiod) {
info->vbus_irq = gpiod_to_irq(info->vbus_gpiod);
if (info->vbus_irq < 0) {
dev_err(dev, "failed to get VBUS IRQ\n");
ret = info->vbus_irq;
goto put_role_sw;
}
ret = devm_request_threaded_irq(dev, info->vbus_irq, NULL,
usb_conn_isr, USB_CONN_IRQF,
pdev->name, info);
if (ret < 0) {
dev_err(dev, "failed to request VBUS IRQ\n");
goto put_role_sw;
}
}
platform_set_drvdata(pdev, info);
device_set_wakeup_capable(&pdev->dev, true);
/* Perform initial detection */
info->initial_detection = true;
usb_conn_queue_dwork(info, 0);
return 0;
put_role_sw:
usb_role_switch_put(info->role_sw);
return ret;
}
static void usb_conn_remove(struct platform_device *pdev)
{
struct usb_conn_info *info = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&info->dw_det);
if (info->last_role == USB_ROLE_HOST && info->vbus)
regulator_disable(info->vbus);
usb_role_switch_put(info->role_sw);
}
static int __maybe_unused usb_conn_suspend(struct device *dev)
{
struct usb_conn_info *info = dev_get_drvdata(dev);
if (device_may_wakeup(dev)) {
if (info->id_gpiod)
enable_irq_wake(info->id_irq);
if (info->vbus_gpiod)
enable_irq_wake(info->vbus_irq);
return 0;
}
if (info->id_gpiod)
disable_irq(info->id_irq);
if (info->vbus_gpiod)
disable_irq(info->vbus_irq);
pinctrl_pm_select_sleep_state(dev);
return 0;
}
static int __maybe_unused usb_conn_resume(struct device *dev)
{
struct usb_conn_info *info = dev_get_drvdata(dev);
if (device_may_wakeup(dev)) {
if (info->id_gpiod)
disable_irq_wake(info->id_irq);
if (info->vbus_gpiod)
disable_irq_wake(info->vbus_irq);
return 0;
}
pinctrl_pm_select_default_state(dev);
if (info->id_gpiod)
enable_irq(info->id_irq);
if (info->vbus_gpiod)
enable_irq(info->vbus_irq);
usb_conn_queue_dwork(info, 0);
return 0;
}
static SIMPLE_DEV_PM_OPS(usb_conn_pm_ops,
usb_conn_suspend, usb_conn_resume);
static const struct of_device_id usb_conn_dt_match[] = {
{ .compatible = "gpio-usb-b-connector", },
{ }
};
MODULE_DEVICE_TABLE(of, usb_conn_dt_match);
static struct platform_driver usb_conn_driver = {
.probe = usb_conn_probe,
.remove_new = usb_conn_remove,
.driver = {
.name = "usb-conn-gpio",
.pm = &usb_conn_pm_ops,
.of_match_table = usb_conn_dt_match,
},
};
module_platform_driver(usb_conn_driver);
MODULE_AUTHOR("Chunfeng Yun <[email protected]>");
MODULE_DESCRIPTION("USB GPIO based connection detection driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/common/usb-conn-gpio.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* OTG Finite State Machine from OTG spec
*
* Copyright (C) 2007,2008 Freescale Semiconductor, Inc.
*
* Author: Li Yang <[email protected]>
* Jerry Huang <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/usb.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
#include <linux/usb/otg-fsm.h>
#ifdef VERBOSE
#define VDBG(fmt, args...) pr_debug("[%s] " fmt, \
__func__, ## args)
#else
#define VDBG(stuff...) do {} while (0)
#endif
/* Change USB protocol when there is a protocol change */
static int otg_set_protocol(struct otg_fsm *fsm, int protocol)
{
int ret = 0;
if (fsm->protocol != protocol) {
VDBG("Changing role fsm->protocol= %d; new protocol= %d\n",
fsm->protocol, protocol);
/* stop old protocol */
if (fsm->protocol == PROTO_HOST)
ret = otg_start_host(fsm, 0);
else if (fsm->protocol == PROTO_GADGET)
ret = otg_start_gadget(fsm, 0);
if (ret)
return ret;
/* start new protocol */
if (protocol == PROTO_HOST)
ret = otg_start_host(fsm, 1);
else if (protocol == PROTO_GADGET)
ret = otg_start_gadget(fsm, 1);
if (ret)
return ret;
fsm->protocol = protocol;
return 0;
}
return 0;
}
/* Called when leaving a state. Do state clean up jobs here */
static void otg_leave_state(struct otg_fsm *fsm, enum usb_otg_state old_state)
{
switch (old_state) {
case OTG_STATE_B_IDLE:
otg_del_timer(fsm, B_SE0_SRP);
fsm->b_se0_srp = 0;
fsm->adp_sns = 0;
fsm->adp_prb = 0;
break;
case OTG_STATE_B_SRP_INIT:
fsm->data_pulse = 0;
fsm->b_srp_done = 0;
break;
case OTG_STATE_B_PERIPHERAL:
if (fsm->otg->gadget)
fsm->otg->gadget->host_request_flag = 0;
break;
case OTG_STATE_B_WAIT_ACON:
otg_del_timer(fsm, B_ASE0_BRST);
fsm->b_ase0_brst_tmout = 0;
break;
case OTG_STATE_B_HOST:
break;
case OTG_STATE_A_IDLE:
fsm->adp_prb = 0;
break;
case OTG_STATE_A_WAIT_VRISE:
otg_del_timer(fsm, A_WAIT_VRISE);
fsm->a_wait_vrise_tmout = 0;
break;
case OTG_STATE_A_WAIT_BCON:
otg_del_timer(fsm, A_WAIT_BCON);
fsm->a_wait_bcon_tmout = 0;
break;
case OTG_STATE_A_HOST:
otg_del_timer(fsm, A_WAIT_ENUM);
break;
case OTG_STATE_A_SUSPEND:
otg_del_timer(fsm, A_AIDL_BDIS);
fsm->a_aidl_bdis_tmout = 0;
fsm->a_suspend_req_inf = 0;
break;
case OTG_STATE_A_PERIPHERAL:
otg_del_timer(fsm, A_BIDL_ADIS);
fsm->a_bidl_adis_tmout = 0;
if (fsm->otg->gadget)
fsm->otg->gadget->host_request_flag = 0;
break;
case OTG_STATE_A_WAIT_VFALL:
otg_del_timer(fsm, A_WAIT_VFALL);
fsm->a_wait_vfall_tmout = 0;
otg_del_timer(fsm, A_WAIT_VRISE);
break;
case OTG_STATE_A_VBUS_ERR:
break;
default:
break;
}
}
static void otg_hnp_polling_work(struct work_struct *work)
{
struct otg_fsm *fsm = container_of(to_delayed_work(work),
struct otg_fsm, hnp_polling_work);
struct usb_device *udev;
enum usb_otg_state state = fsm->otg->state;
u8 flag;
int retval;
if (state != OTG_STATE_A_HOST && state != OTG_STATE_B_HOST)
return;
udev = usb_hub_find_child(fsm->otg->host->root_hub, 1);
if (!udev) {
dev_err(fsm->otg->host->controller,
"no usb dev connected, can't start HNP polling\n");
return;
}
*fsm->host_req_flag = 0;
/* Get host request flag from connected USB device */
retval = usb_control_msg(udev,
usb_rcvctrlpipe(udev, 0),
USB_REQ_GET_STATUS,
USB_DIR_IN | USB_RECIP_DEVICE,
0,
OTG_STS_SELECTOR,
fsm->host_req_flag,
1,
USB_CTRL_GET_TIMEOUT);
if (retval != 1) {
dev_err(&udev->dev, "Get one byte OTG status failed\n");
return;
}
flag = *fsm->host_req_flag;
if (flag == 0) {
/* Continue HNP polling */
schedule_delayed_work(&fsm->hnp_polling_work,
msecs_to_jiffies(T_HOST_REQ_POLL));
return;
} else if (flag != HOST_REQUEST_FLAG) {
dev_err(&udev->dev, "host request flag %d is invalid\n", flag);
return;
}
/* Host request flag is set */
if (state == OTG_STATE_A_HOST) {
/* Set b_hnp_enable */
if (!fsm->otg->host->b_hnp_enable) {
retval = usb_control_msg(udev,
usb_sndctrlpipe(udev, 0),
USB_REQ_SET_FEATURE, 0,
USB_DEVICE_B_HNP_ENABLE,
0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
if (retval >= 0)
fsm->otg->host->b_hnp_enable = 1;
}
fsm->a_bus_req = 0;
} else if (state == OTG_STATE_B_HOST) {
fsm->b_bus_req = 0;
}
otg_statemachine(fsm);
}
static void otg_start_hnp_polling(struct otg_fsm *fsm)
{
/*
* The memory of host_req_flag should be allocated by
* controller driver, otherwise, hnp polling is not started.
*/
if (!fsm->host_req_flag)
return;
if (!fsm->hnp_work_inited) {
INIT_DELAYED_WORK(&fsm->hnp_polling_work, otg_hnp_polling_work);
fsm->hnp_work_inited = true;
}
schedule_delayed_work(&fsm->hnp_polling_work,
msecs_to_jiffies(T_HOST_REQ_POLL));
}
/* Called when entering a state */
static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
{
if (fsm->otg->state == new_state)
return 0;
VDBG("Set state: %s\n", usb_otg_state_string(new_state));
otg_leave_state(fsm, fsm->otg->state);
switch (new_state) {
case OTG_STATE_B_IDLE:
otg_drv_vbus(fsm, 0);
otg_chrg_vbus(fsm, 0);
otg_loc_conn(fsm, 0);
otg_loc_sof(fsm, 0);
/*
* Driver is responsible for starting ADP probing
* if ADP sensing times out.
*/
otg_start_adp_sns(fsm);
otg_set_protocol(fsm, PROTO_UNDEF);
otg_add_timer(fsm, B_SE0_SRP);
break;
case OTG_STATE_B_SRP_INIT:
otg_start_pulse(fsm);
otg_loc_sof(fsm, 0);
otg_set_protocol(fsm, PROTO_UNDEF);
otg_add_timer(fsm, B_SRP_FAIL);
break;
case OTG_STATE_B_PERIPHERAL:
otg_chrg_vbus(fsm, 0);
otg_loc_sof(fsm, 0);
otg_set_protocol(fsm, PROTO_GADGET);
otg_loc_conn(fsm, 1);
break;
case OTG_STATE_B_WAIT_ACON:
otg_chrg_vbus(fsm, 0);
otg_loc_conn(fsm, 0);
otg_loc_sof(fsm, 0);
otg_set_protocol(fsm, PROTO_HOST);
otg_add_timer(fsm, B_ASE0_BRST);
fsm->a_bus_suspend = 0;
break;
case OTG_STATE_B_HOST:
otg_chrg_vbus(fsm, 0);
otg_loc_conn(fsm, 0);
otg_loc_sof(fsm, 1);
otg_set_protocol(fsm, PROTO_HOST);
usb_bus_start_enum(fsm->otg->host,
fsm->otg->host->otg_port);
otg_start_hnp_polling(fsm);
break;
case OTG_STATE_A_IDLE:
otg_drv_vbus(fsm, 0);
otg_chrg_vbus(fsm, 0);
otg_loc_conn(fsm, 0);
otg_loc_sof(fsm, 0);
otg_start_adp_prb(fsm);
otg_set_protocol(fsm, PROTO_HOST);
break;
case OTG_STATE_A_WAIT_VRISE:
otg_drv_vbus(fsm, 1);
otg_loc_conn(fsm, 0);
otg_loc_sof(fsm, 0);
otg_set_protocol(fsm, PROTO_HOST);
otg_add_timer(fsm, A_WAIT_VRISE);
break;
case OTG_STATE_A_WAIT_BCON:
otg_drv_vbus(fsm, 1);
otg_loc_conn(fsm, 0);
otg_loc_sof(fsm, 0);
otg_set_protocol(fsm, PROTO_HOST);
otg_add_timer(fsm, A_WAIT_BCON);
break;
case OTG_STATE_A_HOST:
otg_drv_vbus(fsm, 1);
otg_loc_conn(fsm, 0);
otg_loc_sof(fsm, 1);
otg_set_protocol(fsm, PROTO_HOST);
/*
* When HNP is triggered while a_bus_req = 0, a_host will
* suspend too fast to complete a_set_b_hnp_en
*/
if (!fsm->a_bus_req || fsm->a_suspend_req_inf)
otg_add_timer(fsm, A_WAIT_ENUM);
otg_start_hnp_polling(fsm);
break;
case OTG_STATE_A_SUSPEND:
otg_drv_vbus(fsm, 1);
otg_loc_conn(fsm, 0);
otg_loc_sof(fsm, 0);
otg_set_protocol(fsm, PROTO_HOST);
otg_add_timer(fsm, A_AIDL_BDIS);
break;
case OTG_STATE_A_PERIPHERAL:
otg_loc_sof(fsm, 0);
otg_set_protocol(fsm, PROTO_GADGET);
otg_drv_vbus(fsm, 1);
otg_loc_conn(fsm, 1);
otg_add_timer(fsm, A_BIDL_ADIS);
break;
case OTG_STATE_A_WAIT_VFALL:
otg_drv_vbus(fsm, 0);
otg_loc_conn(fsm, 0);
otg_loc_sof(fsm, 0);
otg_set_protocol(fsm, PROTO_HOST);
otg_add_timer(fsm, A_WAIT_VFALL);
break;
case OTG_STATE_A_VBUS_ERR:
otg_drv_vbus(fsm, 0);
otg_loc_conn(fsm, 0);
otg_loc_sof(fsm, 0);
otg_set_protocol(fsm, PROTO_UNDEF);
break;
default:
break;
}
fsm->otg->state = new_state;
fsm->state_changed = 1;
return 0;
}
/* State change judgement */
int otg_statemachine(struct otg_fsm *fsm)
{
enum usb_otg_state state;
mutex_lock(&fsm->lock);
state = fsm->otg->state;
fsm->state_changed = 0;
/* State machine state change judgement */
switch (state) {
case OTG_STATE_UNDEFINED:
VDBG("fsm->id = %d\n", fsm->id);
if (fsm->id)
otg_set_state(fsm, OTG_STATE_B_IDLE);
else
otg_set_state(fsm, OTG_STATE_A_IDLE);
break;
case OTG_STATE_B_IDLE:
if (!fsm->id)
otg_set_state(fsm, OTG_STATE_A_IDLE);
else if (fsm->b_sess_vld && fsm->otg->gadget)
otg_set_state(fsm, OTG_STATE_B_PERIPHERAL);
else if ((fsm->b_bus_req || fsm->adp_change || fsm->power_up) &&
fsm->b_ssend_srp && fsm->b_se0_srp)
otg_set_state(fsm, OTG_STATE_B_SRP_INIT);
break;
case OTG_STATE_B_SRP_INIT:
if (!fsm->id || fsm->b_srp_done)
otg_set_state(fsm, OTG_STATE_B_IDLE);
break;
case OTG_STATE_B_PERIPHERAL:
if (!fsm->id || !fsm->b_sess_vld)
otg_set_state(fsm, OTG_STATE_B_IDLE);
else if (fsm->b_bus_req && fsm->otg->
gadget->b_hnp_enable && fsm->a_bus_suspend)
otg_set_state(fsm, OTG_STATE_B_WAIT_ACON);
break;
case OTG_STATE_B_WAIT_ACON:
if (fsm->a_conn)
otg_set_state(fsm, OTG_STATE_B_HOST);
else if (!fsm->id || !fsm->b_sess_vld)
otg_set_state(fsm, OTG_STATE_B_IDLE);
else if (fsm->a_bus_resume || fsm->b_ase0_brst_tmout) {
fsm->b_ase0_brst_tmout = 0;
otg_set_state(fsm, OTG_STATE_B_PERIPHERAL);
}
break;
case OTG_STATE_B_HOST:
if (!fsm->id || !fsm->b_sess_vld)
otg_set_state(fsm, OTG_STATE_B_IDLE);
else if (!fsm->b_bus_req || !fsm->a_conn || fsm->test_device)
otg_set_state(fsm, OTG_STATE_B_PERIPHERAL);
break;
case OTG_STATE_A_IDLE:
if (fsm->id)
otg_set_state(fsm, OTG_STATE_B_IDLE);
else if (!fsm->a_bus_drop && (fsm->a_bus_req ||
fsm->a_srp_det || fsm->adp_change || fsm->power_up))
otg_set_state(fsm, OTG_STATE_A_WAIT_VRISE);
break;
case OTG_STATE_A_WAIT_VRISE:
if (fsm->a_vbus_vld)
otg_set_state(fsm, OTG_STATE_A_WAIT_BCON);
else if (fsm->id || fsm->a_bus_drop ||
fsm->a_wait_vrise_tmout)
otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
break;
case OTG_STATE_A_WAIT_BCON:
if (!fsm->a_vbus_vld)
otg_set_state(fsm, OTG_STATE_A_VBUS_ERR);
else if (fsm->b_conn)
otg_set_state(fsm, OTG_STATE_A_HOST);
else if (fsm->id || fsm->a_bus_drop || fsm->a_wait_bcon_tmout)
otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
break;
case OTG_STATE_A_HOST:
if (fsm->id || fsm->a_bus_drop)
otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
else if ((!fsm->a_bus_req || fsm->a_suspend_req_inf) &&
fsm->otg->host->b_hnp_enable)
otg_set_state(fsm, OTG_STATE_A_SUSPEND);
else if (!fsm->b_conn)
otg_set_state(fsm, OTG_STATE_A_WAIT_BCON);
else if (!fsm->a_vbus_vld)
otg_set_state(fsm, OTG_STATE_A_VBUS_ERR);
break;
case OTG_STATE_A_SUSPEND:
if (!fsm->b_conn && fsm->otg->host->b_hnp_enable)
otg_set_state(fsm, OTG_STATE_A_PERIPHERAL);
else if (!fsm->b_conn && !fsm->otg->host->b_hnp_enable)
otg_set_state(fsm, OTG_STATE_A_WAIT_BCON);
else if (fsm->a_bus_req || fsm->b_bus_resume)
otg_set_state(fsm, OTG_STATE_A_HOST);
else if (fsm->id || fsm->a_bus_drop || fsm->a_aidl_bdis_tmout)
otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
else if (!fsm->a_vbus_vld)
otg_set_state(fsm, OTG_STATE_A_VBUS_ERR);
break;
case OTG_STATE_A_PERIPHERAL:
if (fsm->id || fsm->a_bus_drop)
otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
else if (fsm->a_bidl_adis_tmout || fsm->b_bus_suspend)
otg_set_state(fsm, OTG_STATE_A_WAIT_BCON);
else if (!fsm->a_vbus_vld)
otg_set_state(fsm, OTG_STATE_A_VBUS_ERR);
break;
case OTG_STATE_A_WAIT_VFALL:
if (fsm->a_wait_vfall_tmout)
otg_set_state(fsm, OTG_STATE_A_IDLE);
break;
case OTG_STATE_A_VBUS_ERR:
if (fsm->id || fsm->a_bus_drop || fsm->a_clr_err)
otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
break;
default:
break;
}
mutex_unlock(&fsm->lock);
VDBG("quit statemachine, changed = %d\n", fsm->state_changed);
return fsm->state_changed;
}
EXPORT_SYMBOL_GPL(otg_statemachine);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/common/usb-otg-fsm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* otg_fsm.c - ChipIdea USB IP core OTG FSM driver
*
* Copyright (C) 2014 Freescale Semiconductor, Inc.
*
* Author: Jun Li
*/
/*
* This file mainly handles OTG fsm, it includes OTG fsm operations
* for HNP and SRP.
*
* TODO List
* - ADP
* - OTG test device
*/
#include <linux/usb/otg.h>
#include <linux/usb/gadget.h>
#include <linux/usb/hcd.h>
#include <linux/usb/chipidea.h>
#include <linux/regulator/consumer.h>
#include "ci.h"
#include "bits.h"
#include "otg.h"
#include "otg_fsm.h"
/* Add for otg: interact with user space app */
static ssize_t
a_bus_req_show(struct device *dev, struct device_attribute *attr, char *buf)
{
char *next;
unsigned size, t;
struct ci_hdrc *ci = dev_get_drvdata(dev);
next = buf;
size = PAGE_SIZE;
t = scnprintf(next, size, "%d\n", ci->fsm.a_bus_req);
size -= t;
next += t;
return PAGE_SIZE - size;
}
static ssize_t
a_bus_req_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ci_hdrc *ci = dev_get_drvdata(dev);
if (count > 2)
return -1;
mutex_lock(&ci->fsm.lock);
if (buf[0] == '0') {
ci->fsm.a_bus_req = 0;
} else if (buf[0] == '1') {
/* If a_bus_drop is TRUE, a_bus_req can't be set */
if (ci->fsm.a_bus_drop) {
mutex_unlock(&ci->fsm.lock);
return count;
}
ci->fsm.a_bus_req = 1;
if (ci->fsm.otg->state == OTG_STATE_A_PERIPHERAL) {
ci->gadget.host_request_flag = 1;
mutex_unlock(&ci->fsm.lock);
return count;
}
}
ci_otg_queue_work(ci);
mutex_unlock(&ci->fsm.lock);
return count;
}
static DEVICE_ATTR_RW(a_bus_req);
static ssize_t
a_bus_drop_show(struct device *dev, struct device_attribute *attr, char *buf)
{
char *next;
unsigned size, t;
struct ci_hdrc *ci = dev_get_drvdata(dev);
next = buf;
size = PAGE_SIZE;
t = scnprintf(next, size, "%d\n", ci->fsm.a_bus_drop);
size -= t;
next += t;
return PAGE_SIZE - size;
}
static ssize_t
a_bus_drop_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ci_hdrc *ci = dev_get_drvdata(dev);
if (count > 2)
return -1;
mutex_lock(&ci->fsm.lock);
if (buf[0] == '0') {
ci->fsm.a_bus_drop = 0;
} else if (buf[0] == '1') {
ci->fsm.a_bus_drop = 1;
ci->fsm.a_bus_req = 0;
}
ci_otg_queue_work(ci);
mutex_unlock(&ci->fsm.lock);
return count;
}
static DEVICE_ATTR_RW(a_bus_drop);
static ssize_t
b_bus_req_show(struct device *dev, struct device_attribute *attr, char *buf)
{
char *next;
unsigned size, t;
struct ci_hdrc *ci = dev_get_drvdata(dev);
next = buf;
size = PAGE_SIZE;
t = scnprintf(next, size, "%d\n", ci->fsm.b_bus_req);
size -= t;
next += t;
return PAGE_SIZE - size;
}
static ssize_t
b_bus_req_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ci_hdrc *ci = dev_get_drvdata(dev);
if (count > 2)
return -1;
mutex_lock(&ci->fsm.lock);
if (buf[0] == '0')
ci->fsm.b_bus_req = 0;
else if (buf[0] == '1') {
ci->fsm.b_bus_req = 1;
if (ci->fsm.otg->state == OTG_STATE_B_PERIPHERAL) {
ci->gadget.host_request_flag = 1;
mutex_unlock(&ci->fsm.lock);
return count;
}
}
ci_otg_queue_work(ci);
mutex_unlock(&ci->fsm.lock);
return count;
}
static DEVICE_ATTR_RW(b_bus_req);
static ssize_t
a_clr_err_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ci_hdrc *ci = dev_get_drvdata(dev);
if (count > 2)
return -1;
mutex_lock(&ci->fsm.lock);
if (buf[0] == '1')
ci->fsm.a_clr_err = 1;
ci_otg_queue_work(ci);
mutex_unlock(&ci->fsm.lock);
return count;
}
static DEVICE_ATTR_WO(a_clr_err);
static struct attribute *inputs_attrs[] = {
&dev_attr_a_bus_req.attr,
&dev_attr_a_bus_drop.attr,
&dev_attr_b_bus_req.attr,
&dev_attr_a_clr_err.attr,
NULL,
};
static const struct attribute_group inputs_attr_group = {
.name = "inputs",
.attrs = inputs_attrs,
};
/*
* Keep this list in the same order as timers indexed
* by enum otg_fsm_timer in include/linux/usb/otg-fsm.h
*/
static unsigned otg_timer_ms[] = {
TA_WAIT_VRISE,
TA_WAIT_VFALL,
TA_WAIT_BCON,
TA_AIDL_BDIS,
TB_ASE0_BRST,
TA_BIDL_ADIS,
TB_AIDL_BDIS,
TB_SE0_SRP,
TB_SRP_FAIL,
0,
TB_DATA_PLS,
TB_SSEND_SRP,
};
/*
* Add timer to active timer list
*/
static void ci_otg_add_timer(struct ci_hdrc *ci, enum otg_fsm_timer t)
{
unsigned long flags, timer_sec, timer_nsec;
if (t >= NUM_OTG_FSM_TIMERS)
return;
spin_lock_irqsave(&ci->lock, flags);
timer_sec = otg_timer_ms[t] / MSEC_PER_SEC;
timer_nsec = (otg_timer_ms[t] % MSEC_PER_SEC) * NSEC_PER_MSEC;
ci->hr_timeouts[t] = ktime_add(ktime_get(),
ktime_set(timer_sec, timer_nsec));
ci->enabled_otg_timer_bits |= (1 << t);
if ((ci->next_otg_timer == NUM_OTG_FSM_TIMERS) ||
ktime_after(ci->hr_timeouts[ci->next_otg_timer],
ci->hr_timeouts[t])) {
ci->next_otg_timer = t;
hrtimer_start_range_ns(&ci->otg_fsm_hrtimer,
ci->hr_timeouts[t], NSEC_PER_MSEC,
HRTIMER_MODE_ABS);
}
spin_unlock_irqrestore(&ci->lock, flags);
}
/*
* Remove timer from active timer list
*/
static void ci_otg_del_timer(struct ci_hdrc *ci, enum otg_fsm_timer t)
{
unsigned long flags, enabled_timer_bits;
enum otg_fsm_timer cur_timer, next_timer = NUM_OTG_FSM_TIMERS;
if ((t >= NUM_OTG_FSM_TIMERS) ||
!(ci->enabled_otg_timer_bits & (1 << t)))
return;
spin_lock_irqsave(&ci->lock, flags);
ci->enabled_otg_timer_bits &= ~(1 << t);
if (ci->next_otg_timer == t) {
if (ci->enabled_otg_timer_bits == 0) {
spin_unlock_irqrestore(&ci->lock, flags);
/* No enabled timers after delete it */
hrtimer_cancel(&ci->otg_fsm_hrtimer);
spin_lock_irqsave(&ci->lock, flags);
ci->next_otg_timer = NUM_OTG_FSM_TIMERS;
} else {
/* Find the next timer */
enabled_timer_bits = ci->enabled_otg_timer_bits;
for_each_set_bit(cur_timer, &enabled_timer_bits,
NUM_OTG_FSM_TIMERS) {
if ((next_timer == NUM_OTG_FSM_TIMERS) ||
ktime_before(ci->hr_timeouts[next_timer],
ci->hr_timeouts[cur_timer]))
next_timer = cur_timer;
}
}
}
if (next_timer != NUM_OTG_FSM_TIMERS) {
ci->next_otg_timer = next_timer;
hrtimer_start_range_ns(&ci->otg_fsm_hrtimer,
ci->hr_timeouts[next_timer], NSEC_PER_MSEC,
HRTIMER_MODE_ABS);
}
spin_unlock_irqrestore(&ci->lock, flags);
}
/* OTG FSM timer handlers */
static int a_wait_vrise_tmout(struct ci_hdrc *ci)
{
ci->fsm.a_wait_vrise_tmout = 1;
return 0;
}
static int a_wait_vfall_tmout(struct ci_hdrc *ci)
{
ci->fsm.a_wait_vfall_tmout = 1;
return 0;
}
static int a_wait_bcon_tmout(struct ci_hdrc *ci)
{
ci->fsm.a_wait_bcon_tmout = 1;
return 0;
}
static int a_aidl_bdis_tmout(struct ci_hdrc *ci)
{
ci->fsm.a_aidl_bdis_tmout = 1;
return 0;
}
static int b_ase0_brst_tmout(struct ci_hdrc *ci)
{
ci->fsm.b_ase0_brst_tmout = 1;
return 0;
}
static int a_bidl_adis_tmout(struct ci_hdrc *ci)
{
ci->fsm.a_bidl_adis_tmout = 1;
return 0;
}
static int b_aidl_bdis_tmout(struct ci_hdrc *ci)
{
ci->fsm.a_bus_suspend = 1;
return 0;
}
static int b_se0_srp_tmout(struct ci_hdrc *ci)
{
ci->fsm.b_se0_srp = 1;
return 0;
}
static int b_srp_fail_tmout(struct ci_hdrc *ci)
{
ci->fsm.b_srp_done = 1;
return 1;
}
static int b_data_pls_tmout(struct ci_hdrc *ci)
{
ci->fsm.b_srp_done = 1;
ci->fsm.b_bus_req = 0;
if (ci->fsm.power_up)
ci->fsm.power_up = 0;
hw_write_otgsc(ci, OTGSC_HABA, 0);
pm_runtime_put(ci->dev);
return 0;
}
static int b_ssend_srp_tmout(struct ci_hdrc *ci)
{
ci->fsm.b_ssend_srp = 1;
/* only vbus fall below B_sess_vld in b_idle state */
if (ci->fsm.otg->state == OTG_STATE_B_IDLE)
return 0;
else
return 1;
}
/*
* Keep this list in the same order as timers indexed
* by enum otg_fsm_timer in include/linux/usb/otg-fsm.h
*/
static int (*otg_timer_handlers[])(struct ci_hdrc *) = {
a_wait_vrise_tmout, /* A_WAIT_VRISE */
a_wait_vfall_tmout, /* A_WAIT_VFALL */
a_wait_bcon_tmout, /* A_WAIT_BCON */
a_aidl_bdis_tmout, /* A_AIDL_BDIS */
b_ase0_brst_tmout, /* B_ASE0_BRST */
a_bidl_adis_tmout, /* A_BIDL_ADIS */
b_aidl_bdis_tmout, /* B_AIDL_BDIS */
b_se0_srp_tmout, /* B_SE0_SRP */
b_srp_fail_tmout, /* B_SRP_FAIL */
NULL, /* A_WAIT_ENUM */
b_data_pls_tmout, /* B_DATA_PLS */
b_ssend_srp_tmout, /* B_SSEND_SRP */
};
/*
* Enable the next nearest enabled timer if have
*/
static enum hrtimer_restart ci_otg_hrtimer_func(struct hrtimer *t)
{
struct ci_hdrc *ci = container_of(t, struct ci_hdrc, otg_fsm_hrtimer);
ktime_t now, *timeout;
unsigned long enabled_timer_bits;
unsigned long flags;
enum otg_fsm_timer cur_timer, next_timer = NUM_OTG_FSM_TIMERS;
int ret = -EINVAL;
spin_lock_irqsave(&ci->lock, flags);
enabled_timer_bits = ci->enabled_otg_timer_bits;
ci->next_otg_timer = NUM_OTG_FSM_TIMERS;
now = ktime_get();
for_each_set_bit(cur_timer, &enabled_timer_bits, NUM_OTG_FSM_TIMERS) {
if (ktime_compare(now, ci->hr_timeouts[cur_timer]) >= 0) {
ci->enabled_otg_timer_bits &= ~(1 << cur_timer);
if (otg_timer_handlers[cur_timer])
ret = otg_timer_handlers[cur_timer](ci);
} else {
if ((next_timer == NUM_OTG_FSM_TIMERS) ||
ktime_before(ci->hr_timeouts[cur_timer],
ci->hr_timeouts[next_timer]))
next_timer = cur_timer;
}
}
/* Enable the next nearest timer */
if (next_timer < NUM_OTG_FSM_TIMERS) {
timeout = &ci->hr_timeouts[next_timer];
hrtimer_start_range_ns(&ci->otg_fsm_hrtimer, *timeout,
NSEC_PER_MSEC, HRTIMER_MODE_ABS);
ci->next_otg_timer = next_timer;
}
spin_unlock_irqrestore(&ci->lock, flags);
if (!ret)
ci_otg_queue_work(ci);
return HRTIMER_NORESTART;
}
/* Initialize timers */
static int ci_otg_init_timers(struct ci_hdrc *ci)
{
hrtimer_init(&ci->otg_fsm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ci->otg_fsm_hrtimer.function = ci_otg_hrtimer_func;
return 0;
}
/* -------------------------------------------------------------*/
/* Operations that will be called from OTG Finite State Machine */
/* -------------------------------------------------------------*/
static void ci_otg_fsm_add_timer(struct otg_fsm *fsm, enum otg_fsm_timer t)
{
struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm);
if (t < NUM_OTG_FSM_TIMERS)
ci_otg_add_timer(ci, t);
return;
}
static void ci_otg_fsm_del_timer(struct otg_fsm *fsm, enum otg_fsm_timer t)
{
struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm);
if (t < NUM_OTG_FSM_TIMERS)
ci_otg_del_timer(ci, t);
return;
}
/*
* A-device drive vbus: turn on vbus regulator and enable port power
* Data pulse irq should be disabled while vbus is on.
*/
static void ci_otg_drv_vbus(struct otg_fsm *fsm, int on)
{
int ret;
struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm);
if (on) {
/* Enable power */
hw_write(ci, OP_PORTSC, PORTSC_W1C_BITS | PORTSC_PP,
PORTSC_PP);
if (ci->platdata->reg_vbus) {
ret = regulator_enable(ci->platdata->reg_vbus);
if (ret) {
dev_err(ci->dev,
"Failed to enable vbus regulator, ret=%d\n",
ret);
return;
}
}
if (ci->platdata->flags & CI_HDRC_PHY_VBUS_CONTROL)
usb_phy_vbus_on(ci->usb_phy);
/* Disable data pulse irq */
hw_write_otgsc(ci, OTGSC_DPIE, 0);
fsm->a_srp_det = 0;
fsm->power_up = 0;
} else {
if (ci->platdata->reg_vbus)
regulator_disable(ci->platdata->reg_vbus);
if (ci->platdata->flags & CI_HDRC_PHY_VBUS_CONTROL)
usb_phy_vbus_off(ci->usb_phy);
fsm->a_bus_drop = 1;
fsm->a_bus_req = 0;
}
}
/*
* Control data line by Run Stop bit.
*/
static void ci_otg_loc_conn(struct otg_fsm *fsm, int on)
{
struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm);
if (on)
hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
else
hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
}
/*
* Generate SOF by host.
* In host mode, controller will automatically send SOF.
* Suspend will block the data on the port.
*
* This is controlled through usbcore by usb autosuspend,
* so the usb device class driver need support autosuspend,
* otherwise the bus suspend will not happen.
*/
static void ci_otg_loc_sof(struct otg_fsm *fsm, int on)
{
struct usb_device *udev;
if (!fsm->otg->host)
return;
udev = usb_hub_find_child(fsm->otg->host->root_hub, 1);
if (!udev)
return;
if (on) {
usb_disable_autosuspend(udev);
} else {
pm_runtime_set_autosuspend_delay(&udev->dev, 0);
usb_enable_autosuspend(udev);
}
}
/*
* Start SRP pulsing by data-line pulsing,
* no v-bus pulsing followed
*/
static void ci_otg_start_pulse(struct otg_fsm *fsm)
{
struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm);
/* Hardware Assistant Data pulse */
hw_write_otgsc(ci, OTGSC_HADP, OTGSC_HADP);
pm_runtime_get(ci->dev);
ci_otg_add_timer(ci, B_DATA_PLS);
}
static int ci_otg_start_host(struct otg_fsm *fsm, int on)
{
struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm);
if (on) {
ci_role_stop(ci);
ci_role_start(ci, CI_ROLE_HOST);
} else {
ci_role_stop(ci);
ci_role_start(ci, CI_ROLE_GADGET);
}
return 0;
}
static int ci_otg_start_gadget(struct otg_fsm *fsm, int on)
{
struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm);
if (on)
usb_gadget_vbus_connect(&ci->gadget);
else
usb_gadget_vbus_disconnect(&ci->gadget);
return 0;
}
static struct otg_fsm_ops ci_otg_ops = {
.drv_vbus = ci_otg_drv_vbus,
.loc_conn = ci_otg_loc_conn,
.loc_sof = ci_otg_loc_sof,
.start_pulse = ci_otg_start_pulse,
.add_timer = ci_otg_fsm_add_timer,
.del_timer = ci_otg_fsm_del_timer,
.start_host = ci_otg_start_host,
.start_gadget = ci_otg_start_gadget,
};
int ci_otg_fsm_work(struct ci_hdrc *ci)
{
/*
* Don't do fsm transition for B device
* when there is no gadget class driver
*/
if (ci->fsm.id && !(ci->driver) &&
ci->fsm.otg->state < OTG_STATE_A_IDLE)
return 0;
pm_runtime_get_sync(ci->dev);
if (otg_statemachine(&ci->fsm)) {
if (ci->fsm.otg->state == OTG_STATE_A_IDLE) {
/*
* Further state change for cases:
* a_idle to b_idle; or
* a_idle to a_wait_vrise due to ID change(1->0), so
* B-dev becomes A-dev can try to start new session
* consequently; or
* a_idle to a_wait_vrise when power up
*/
if ((ci->fsm.id) || (ci->id_event) ||
(ci->fsm.power_up)) {
ci_otg_queue_work(ci);
} else {
/* Enable data pulse irq */
hw_write(ci, OP_PORTSC, PORTSC_W1C_BITS |
PORTSC_PP, 0);
hw_write_otgsc(ci, OTGSC_DPIS, OTGSC_DPIS);
hw_write_otgsc(ci, OTGSC_DPIE, OTGSC_DPIE);
}
if (ci->id_event)
ci->id_event = false;
} else if (ci->fsm.otg->state == OTG_STATE_B_IDLE) {
if (ci->fsm.b_sess_vld) {
ci->fsm.power_up = 0;
/*
* Further transite to b_periphearl state
* when register gadget driver with vbus on
*/
ci_otg_queue_work(ci);
}
} else if (ci->fsm.otg->state == OTG_STATE_A_HOST) {
pm_runtime_mark_last_busy(ci->dev);
pm_runtime_put_autosuspend(ci->dev);
return 0;
}
}
pm_runtime_put_sync(ci->dev);
return 0;
}
/*
* Update fsm variables in each state if catching expected interrupts,
* called by otg fsm isr.
*/
static void ci_otg_fsm_event(struct ci_hdrc *ci)
{
u32 intr_sts, otg_bsess_vld, port_conn;
struct otg_fsm *fsm = &ci->fsm;
intr_sts = hw_read_intr_status(ci);
otg_bsess_vld = hw_read_otgsc(ci, OTGSC_BSV);
port_conn = hw_read(ci, OP_PORTSC, PORTSC_CCS);
switch (ci->fsm.otg->state) {
case OTG_STATE_A_WAIT_BCON:
if (port_conn) {
fsm->b_conn = 1;
fsm->a_bus_req = 1;
ci_otg_queue_work(ci);
}
break;
case OTG_STATE_B_IDLE:
if (otg_bsess_vld && (intr_sts & USBi_PCI) && port_conn) {
fsm->b_sess_vld = 1;
ci_otg_queue_work(ci);
}
break;
case OTG_STATE_B_PERIPHERAL:
if ((intr_sts & USBi_SLI) && port_conn && otg_bsess_vld) {
ci_otg_add_timer(ci, B_AIDL_BDIS);
} else if (intr_sts & USBi_PCI) {
ci_otg_del_timer(ci, B_AIDL_BDIS);
if (fsm->a_bus_suspend == 1)
fsm->a_bus_suspend = 0;
}
break;
case OTG_STATE_B_HOST:
if ((intr_sts & USBi_PCI) && !port_conn) {
fsm->a_conn = 0;
fsm->b_bus_req = 0;
ci_otg_queue_work(ci);
}
break;
case OTG_STATE_A_PERIPHERAL:
if (intr_sts & USBi_SLI) {
fsm->b_bus_suspend = 1;
/*
* Init a timer to know how long this suspend
* will continue, if time out, indicates B no longer
* wants to be host role
*/
ci_otg_add_timer(ci, A_BIDL_ADIS);
}
if (intr_sts & USBi_URI)
ci_otg_del_timer(ci, A_BIDL_ADIS);
if (intr_sts & USBi_PCI) {
if (fsm->b_bus_suspend == 1) {
ci_otg_del_timer(ci, A_BIDL_ADIS);
fsm->b_bus_suspend = 0;
}
}
break;
case OTG_STATE_A_SUSPEND:
if ((intr_sts & USBi_PCI) && !port_conn) {
fsm->b_conn = 0;
/* if gadget driver is binded */
if (ci->driver) {
/* A device to be peripheral mode */
ci->gadget.is_a_peripheral = 1;
}
ci_otg_queue_work(ci);
}
break;
case OTG_STATE_A_HOST:
if ((intr_sts & USBi_PCI) && !port_conn) {
fsm->b_conn = 0;
ci_otg_queue_work(ci);
}
break;
case OTG_STATE_B_WAIT_ACON:
if ((intr_sts & USBi_PCI) && port_conn) {
fsm->a_conn = 1;
ci_otg_queue_work(ci);
}
break;
default:
break;
}
}
/*
* ci_otg_irq - otg fsm related irq handling
* and also update otg fsm variable by monitoring usb host and udc
* state change interrupts.
* @ci: ci_hdrc
*/
irqreturn_t ci_otg_fsm_irq(struct ci_hdrc *ci)
{
irqreturn_t retval = IRQ_NONE;
u32 otgsc, otg_int_src = 0;
struct otg_fsm *fsm = &ci->fsm;
otgsc = hw_read_otgsc(ci, ~0);
otg_int_src = otgsc & OTGSC_INT_STATUS_BITS & (otgsc >> 8);
fsm->id = (otgsc & OTGSC_ID) ? 1 : 0;
if (otg_int_src) {
if (otg_int_src & OTGSC_DPIS) {
hw_write_otgsc(ci, OTGSC_DPIS, OTGSC_DPIS);
fsm->a_srp_det = 1;
fsm->a_bus_drop = 0;
} else if (otg_int_src & OTGSC_IDIS) {
hw_write_otgsc(ci, OTGSC_IDIS, OTGSC_IDIS);
if (fsm->id == 0) {
fsm->a_bus_drop = 0;
fsm->a_bus_req = 1;
ci->id_event = true;
}
} else if (otg_int_src & OTGSC_BSVIS) {
hw_write_otgsc(ci, OTGSC_BSVIS, OTGSC_BSVIS);
if (otgsc & OTGSC_BSV) {
fsm->b_sess_vld = 1;
ci_otg_del_timer(ci, B_SSEND_SRP);
ci_otg_del_timer(ci, B_SRP_FAIL);
fsm->b_ssend_srp = 0;
} else {
fsm->b_sess_vld = 0;
if (fsm->id)
ci_otg_add_timer(ci, B_SSEND_SRP);
}
} else if (otg_int_src & OTGSC_AVVIS) {
hw_write_otgsc(ci, OTGSC_AVVIS, OTGSC_AVVIS);
if (otgsc & OTGSC_AVV) {
fsm->a_vbus_vld = 1;
} else {
fsm->a_vbus_vld = 0;
fsm->b_conn = 0;
}
}
ci_otg_queue_work(ci);
return IRQ_HANDLED;
}
ci_otg_fsm_event(ci);
return retval;
}
void ci_hdrc_otg_fsm_start(struct ci_hdrc *ci)
{
ci_otg_queue_work(ci);
}
int ci_hdrc_otg_fsm_init(struct ci_hdrc *ci)
{
int retval = 0;
if (ci->phy)
ci->otg.phy = ci->phy;
else
ci->otg.usb_phy = ci->usb_phy;
ci->otg.gadget = &ci->gadget;
ci->fsm.otg = &ci->otg;
ci->fsm.power_up = 1;
ci->fsm.id = hw_read_otgsc(ci, OTGSC_ID) ? 1 : 0;
ci->fsm.otg->state = OTG_STATE_UNDEFINED;
ci->fsm.ops = &ci_otg_ops;
ci->gadget.hnp_polling_support = 1;
ci->fsm.host_req_flag = devm_kzalloc(ci->dev, 1, GFP_KERNEL);
if (!ci->fsm.host_req_flag)
return -ENOMEM;
mutex_init(&ci->fsm.lock);
retval = ci_otg_init_timers(ci);
if (retval) {
dev_err(ci->dev, "Couldn't init OTG timers\n");
return retval;
}
ci->enabled_otg_timer_bits = 0;
ci->next_otg_timer = NUM_OTG_FSM_TIMERS;
retval = sysfs_create_group(&ci->dev->kobj, &inputs_attr_group);
if (retval < 0) {
dev_dbg(ci->dev,
"Can't register sysfs attr group: %d\n", retval);
return retval;
}
/* Enable A vbus valid irq */
hw_write_otgsc(ci, OTGSC_AVVIE, OTGSC_AVVIE);
if (ci->fsm.id) {
ci->fsm.b_ssend_srp =
hw_read_otgsc(ci, OTGSC_BSV) ? 0 : 1;
ci->fsm.b_sess_vld =
hw_read_otgsc(ci, OTGSC_BSV) ? 1 : 0;
/* Enable BSV irq */
hw_write_otgsc(ci, OTGSC_BSVIE, OTGSC_BSVIE);
}
return 0;
}
void ci_hdrc_otg_fsm_remove(struct ci_hdrc *ci)
{
sysfs_remove_group(&ci->dev->kobj, &inputs_attr_group);
}
| linux-master | drivers/usb/chipidea/otg_fsm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Chipidea Device Mode Trace Support
*
* Copyright (C) 2020 NXP
*
* Author: Peter Chen <[email protected]>
*/
#define CREATE_TRACE_POINTS
#include "trace.h"
void ci_log(struct ci_hdrc *ci, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
trace_ci_log(ci, &vaf);
va_end(args);
}
| linux-master | drivers/usb/chipidea/trace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 Linaro Ltd.
*/
#include <linux/device.h>
#include <linux/usb/chipidea.h>
#include <linux/ulpi/interface.h>
#include "ci.h"
#define ULPI_WAKEUP BIT(31)
#define ULPI_RUN BIT(30)
#define ULPI_WRITE BIT(29)
#define ULPI_SYNC_STATE BIT(27)
#define ULPI_ADDR(n) ((n) << 16)
#define ULPI_DATA(n) (n)
static int ci_ulpi_wait(struct ci_hdrc *ci, u32 mask)
{
unsigned long usec = 10000;
while (usec--) {
if (!hw_read(ci, OP_ULPI_VIEWPORT, mask))
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
static int ci_ulpi_read(struct device *dev, u8 addr)
{
struct ci_hdrc *ci = dev_get_drvdata(dev);
int ret;
hw_write(ci, OP_ULPI_VIEWPORT, 0xffffffff, ULPI_WRITE | ULPI_WAKEUP);
ret = ci_ulpi_wait(ci, ULPI_WAKEUP);
if (ret)
return ret;
hw_write(ci, OP_ULPI_VIEWPORT, 0xffffffff, ULPI_RUN | ULPI_ADDR(addr));
ret = ci_ulpi_wait(ci, ULPI_RUN);
if (ret)
return ret;
return hw_read(ci, OP_ULPI_VIEWPORT, GENMASK(15, 8)) >> 8;
}
static int ci_ulpi_write(struct device *dev, u8 addr, u8 val)
{
struct ci_hdrc *ci = dev_get_drvdata(dev);
int ret;
hw_write(ci, OP_ULPI_VIEWPORT, 0xffffffff, ULPI_WRITE | ULPI_WAKEUP);
ret = ci_ulpi_wait(ci, ULPI_WAKEUP);
if (ret)
return ret;
hw_write(ci, OP_ULPI_VIEWPORT, 0xffffffff,
ULPI_RUN | ULPI_WRITE | ULPI_ADDR(addr) | val);
return ci_ulpi_wait(ci, ULPI_RUN);
}
int ci_ulpi_init(struct ci_hdrc *ci)
{
if (ci->platdata->phy_mode != USBPHY_INTERFACE_MODE_ULPI)
return 0;
/*
* Set PORTSC correctly so we can read/write ULPI registers for
* identification purposes
*/
hw_phymode_configure(ci);
ci->ulpi_ops.read = ci_ulpi_read;
ci->ulpi_ops.write = ci_ulpi_write;
ci->ulpi = ulpi_register_interface(ci->dev, &ci->ulpi_ops);
if (IS_ERR(ci->ulpi))
dev_err(ci->dev, "failed to register ULPI interface");
return PTR_ERR_OR_ZERO(ci->ulpi);
}
void ci_ulpi_exit(struct ci_hdrc *ci)
{
if (ci->ulpi) {
ulpi_unregister_interface(ci->ulpi);
ci->ulpi = NULL;
}
}
int ci_ulpi_resume(struct ci_hdrc *ci)
{
int cnt = 100000;
if (ci->platdata->phy_mode != USBPHY_INTERFACE_MODE_ULPI)
return 0;
while (cnt-- > 0) {
if (hw_read(ci, OP_ULPI_VIEWPORT, ULPI_SYNC_STATE))
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
| linux-master | drivers/usb/chipidea/ulpi.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. */
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/usb/chipidea.h>
#include <linux/clk.h>
#include <linux/reset.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/io.h>
#include <linux/reset-controller.h>
#include <linux/extcon.h>
#include <linux/of.h>
#include "ci.h"
#define HS_PHY_AHB_MODE 0x0098
#define HS_PHY_GENCONFIG 0x009c
#define HS_PHY_TXFIFO_IDLE_FORCE_DIS BIT(4)
#define HS_PHY_GENCONFIG_2 0x00a0
#define HS_PHY_SESS_VLD_CTRL_EN BIT(7)
#define HS_PHY_ULPI_TX_PKT_EN_CLR_FIX BIT(19)
#define HSPHY_SESS_VLD_CTRL BIT(25)
/* Vendor base starts at 0x200 beyond CI base */
#define HS_PHY_CTRL 0x0040
#define HS_PHY_SEC_CTRL 0x0078
#define HS_PHY_DIG_CLAMP_N BIT(16)
#define HS_PHY_POR_ASSERT BIT(0)
struct ci_hdrc_msm {
struct platform_device *ci;
struct clk *core_clk;
struct clk *iface_clk;
struct clk *fs_clk;
struct ci_hdrc_platform_data pdata;
struct reset_controller_dev rcdev;
bool secondary_phy;
bool hsic;
void __iomem *base;
};
static int
ci_hdrc_msm_por_reset(struct reset_controller_dev *r, unsigned long id)
{
struct ci_hdrc_msm *ci_msm = container_of(r, struct ci_hdrc_msm, rcdev);
void __iomem *addr = ci_msm->base;
u32 val;
if (id)
addr += HS_PHY_SEC_CTRL;
else
addr += HS_PHY_CTRL;
val = readl_relaxed(addr);
val |= HS_PHY_POR_ASSERT;
writel(val, addr);
/*
* wait for minimum 10 microseconds as suggested by manual.
* Use a slightly larger value since the exact value didn't
* work 100% of the time.
*/
udelay(12);
val &= ~HS_PHY_POR_ASSERT;
writel(val, addr);
return 0;
}
static const struct reset_control_ops ci_hdrc_msm_reset_ops = {
.reset = ci_hdrc_msm_por_reset,
};
static int ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
{
struct device *dev = ci->dev->parent;
struct ci_hdrc_msm *msm_ci = dev_get_drvdata(dev);
int ret;
switch (event) {
case CI_HDRC_CONTROLLER_RESET_EVENT:
dev_dbg(dev, "CI_HDRC_CONTROLLER_RESET_EVENT received\n");
hw_phymode_configure(ci);
if (msm_ci->secondary_phy) {
u32 val = readl_relaxed(msm_ci->base + HS_PHY_SEC_CTRL);
val |= HS_PHY_DIG_CLAMP_N;
writel_relaxed(val, msm_ci->base + HS_PHY_SEC_CTRL);
}
ret = phy_init(ci->phy);
if (ret)
return ret;
ret = phy_power_on(ci->phy);
if (ret) {
phy_exit(ci->phy);
return ret;
}
/* use AHB transactor, allow posted data writes */
hw_write_id_reg(ci, HS_PHY_AHB_MODE, 0xffffffff, 0x8);
/* workaround for rx buffer collision issue */
hw_write_id_reg(ci, HS_PHY_GENCONFIG,
HS_PHY_TXFIFO_IDLE_FORCE_DIS, 0);
if (!msm_ci->hsic)
hw_write_id_reg(ci, HS_PHY_GENCONFIG_2,
HS_PHY_ULPI_TX_PKT_EN_CLR_FIX, 0);
if (!IS_ERR(ci->platdata->vbus_extcon.edev) || ci->role_switch) {
hw_write_id_reg(ci, HS_PHY_GENCONFIG_2,
HS_PHY_SESS_VLD_CTRL_EN,
HS_PHY_SESS_VLD_CTRL_EN);
hw_write(ci, OP_USBCMD, HSPHY_SESS_VLD_CTRL,
HSPHY_SESS_VLD_CTRL);
}
break;
case CI_HDRC_CONTROLLER_STOPPED_EVENT:
dev_dbg(dev, "CI_HDRC_CONTROLLER_STOPPED_EVENT received\n");
phy_power_off(ci->phy);
phy_exit(ci->phy);
break;
default:
dev_dbg(dev, "unknown ci_hdrc event\n");
break;
}
return 0;
}
static int ci_hdrc_msm_mux_phy(struct ci_hdrc_msm *ci,
struct platform_device *pdev)
{
struct regmap *regmap;
struct device *dev = &pdev->dev;
struct of_phandle_args args;
u32 val;
int ret;
ret = of_parse_phandle_with_fixed_args(dev->of_node, "phy-select", 2, 0,
&args);
if (ret)
return 0;
regmap = syscon_node_to_regmap(args.np);
of_node_put(args.np);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
ret = regmap_write(regmap, args.args[0], args.args[1]);
if (ret)
return ret;
ci->secondary_phy = !!args.args[1];
if (ci->secondary_phy) {
val = readl_relaxed(ci->base + HS_PHY_SEC_CTRL);
val |= HS_PHY_DIG_CLAMP_N;
writel_relaxed(val, ci->base + HS_PHY_SEC_CTRL);
}
return 0;
}
static int ci_hdrc_msm_probe(struct platform_device *pdev)
{
struct ci_hdrc_msm *ci;
struct platform_device *plat_ci;
struct clk *clk;
struct reset_control *reset;
int ret;
struct device_node *ulpi_node, *phy_node;
dev_dbg(&pdev->dev, "ci_hdrc_msm_probe\n");
ci = devm_kzalloc(&pdev->dev, sizeof(*ci), GFP_KERNEL);
if (!ci)
return -ENOMEM;
platform_set_drvdata(pdev, ci);
ci->pdata.name = "ci_hdrc_msm";
ci->pdata.capoffset = DEF_CAPOFFSET;
ci->pdata.flags = CI_HDRC_REGS_SHARED | CI_HDRC_DISABLE_STREAMING |
CI_HDRC_OVERRIDE_AHB_BURST |
CI_HDRC_OVERRIDE_PHY_CONTROL;
ci->pdata.notify_event = ci_hdrc_msm_notify_event;
reset = devm_reset_control_get(&pdev->dev, "core");
if (IS_ERR(reset))
return PTR_ERR(reset);
ci->core_clk = clk = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(clk))
return PTR_ERR(clk);
ci->iface_clk = clk = devm_clk_get(&pdev->dev, "iface");
if (IS_ERR(clk))
return PTR_ERR(clk);
ci->fs_clk = clk = devm_clk_get_optional(&pdev->dev, "fs");
if (IS_ERR(clk))
return PTR_ERR(clk);
ci->base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ci->base))
return PTR_ERR(ci->base);
ci->rcdev.owner = THIS_MODULE;
ci->rcdev.ops = &ci_hdrc_msm_reset_ops;
ci->rcdev.of_node = pdev->dev.of_node;
ci->rcdev.nr_resets = 2;
ret = devm_reset_controller_register(&pdev->dev, &ci->rcdev);
if (ret)
return ret;
ret = clk_prepare_enable(ci->fs_clk);
if (ret)
return ret;
reset_control_assert(reset);
usleep_range(10000, 12000);
reset_control_deassert(reset);
clk_disable_unprepare(ci->fs_clk);
ret = clk_prepare_enable(ci->core_clk);
if (ret)
return ret;
ret = clk_prepare_enable(ci->iface_clk);
if (ret)
goto err_iface;
ret = ci_hdrc_msm_mux_phy(ci, pdev);
if (ret)
goto err_mux;
ulpi_node = of_get_child_by_name(pdev->dev.of_node, "ulpi");
if (ulpi_node) {
phy_node = of_get_next_available_child(ulpi_node, NULL);
ci->hsic = of_device_is_compatible(phy_node, "qcom,usb-hsic-phy");
of_node_put(phy_node);
}
of_node_put(ulpi_node);
plat_ci = ci_hdrc_add_device(&pdev->dev, pdev->resource,
pdev->num_resources, &ci->pdata);
if (IS_ERR(plat_ci)) {
ret = PTR_ERR(plat_ci);
if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev, "ci_hdrc_add_device failed!\n");
goto err_mux;
}
ci->ci = plat_ci;
pm_runtime_set_active(&pdev->dev);
pm_runtime_no_callbacks(&pdev->dev);
pm_runtime_enable(&pdev->dev);
return 0;
err_mux:
clk_disable_unprepare(ci->iface_clk);
err_iface:
clk_disable_unprepare(ci->core_clk);
return ret;
}
static void ci_hdrc_msm_remove(struct platform_device *pdev)
{
struct ci_hdrc_msm *ci = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
ci_hdrc_remove_device(ci->ci);
clk_disable_unprepare(ci->iface_clk);
clk_disable_unprepare(ci->core_clk);
}
static const struct of_device_id msm_ci_dt_match[] = {
{ .compatible = "qcom,ci-hdrc", },
{ }
};
MODULE_DEVICE_TABLE(of, msm_ci_dt_match);
static struct platform_driver ci_hdrc_msm_driver = {
.probe = ci_hdrc_msm_probe,
.remove_new = ci_hdrc_msm_remove,
.driver = {
.name = "msm_hsusb",
.of_match_table = msm_ci_dt_match,
},
};
module_platform_driver(ci_hdrc_msm_driver);
MODULE_ALIAS("platform:msm_hsusb");
MODULE_ALIAS("platform:ci13xxx_msm");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/chipidea/ci_hdrc_msm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ci_hdrc_pci.c - MIPS USB IP core family device controller
*
* Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
*
* Author: David Lopo
*/
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/usb/gadget.h>
#include <linux/usb/chipidea.h>
#include <linux/usb/usb_phy_generic.h>
/* driver name */
#define UDC_DRIVER_NAME "ci_hdrc_pci"
struct ci_hdrc_pci {
struct platform_device *ci;
struct platform_device *phy;
};
/******************************************************************************
* PCI block
*****************************************************************************/
static struct ci_hdrc_platform_data pci_platdata = {
.name = UDC_DRIVER_NAME,
.capoffset = DEF_CAPOFFSET,
};
static struct ci_hdrc_platform_data langwell_pci_platdata = {
.name = UDC_DRIVER_NAME,
.capoffset = 0,
};
static struct ci_hdrc_platform_data penwell_pci_platdata = {
.name = UDC_DRIVER_NAME,
.capoffset = 0,
.power_budget = 200,
};
/**
* ci_hdrc_pci_probe: PCI probe
* @pdev: USB device controller being probed
* @id: PCI hotplug ID connecting controller to UDC framework
*
* This function returns an error code
* Allocates basic PCI resources for this USB device controller, and then
* invokes the udc_probe() method to start the UDC associated with it
*/
static int ci_hdrc_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct ci_hdrc_platform_data *platdata = (void *)id->driver_data;
struct ci_hdrc_pci *ci;
struct resource res[3];
int retval = 0, nres = 2;
if (!platdata) {
dev_err(&pdev->dev, "device doesn't provide driver data\n");
return -ENODEV;
}
ci = devm_kzalloc(&pdev->dev, sizeof(*ci), GFP_KERNEL);
if (!ci)
return -ENOMEM;
retval = pcim_enable_device(pdev);
if (retval)
return retval;
if (!pdev->irq) {
dev_err(&pdev->dev, "No IRQ, check BIOS/PCI setup!");
return -ENODEV;
}
pci_set_master(pdev);
pci_try_set_mwi(pdev);
/* register a nop PHY */
ci->phy = usb_phy_generic_register();
if (IS_ERR(ci->phy))
return PTR_ERR(ci->phy);
memset(res, 0, sizeof(res));
res[0].start = pci_resource_start(pdev, 0);
res[0].end = pci_resource_end(pdev, 0);
res[0].flags = IORESOURCE_MEM;
res[1].start = pdev->irq;
res[1].flags = IORESOURCE_IRQ;
ci->ci = ci_hdrc_add_device(&pdev->dev, res, nres, platdata);
if (IS_ERR(ci->ci)) {
dev_err(&pdev->dev, "ci_hdrc_add_device failed!\n");
usb_phy_generic_unregister(ci->phy);
return PTR_ERR(ci->ci);
}
pci_set_drvdata(pdev, ci);
return 0;
}
/**
* ci_hdrc_pci_remove: PCI remove
* @pdev: USB Device Controller being removed
*
* Reverses the effect of ci_hdrc_pci_probe(),
* first invoking the udc_remove() and then releases
* all PCI resources allocated for this USB device controller
*/
static void ci_hdrc_pci_remove(struct pci_dev *pdev)
{
struct ci_hdrc_pci *ci = pci_get_drvdata(pdev);
ci_hdrc_remove_device(ci->ci);
usb_phy_generic_unregister(ci->phy);
}
/*
* PCI device table
* PCI device structure
*
* Check "pci.h" for details
*
* Note: ehci-pci driver may try to probe the device first. You have to add an
* ID to the bypass_pci_id_table in ehci-pci driver to prevent this.
*/
static const struct pci_device_id ci_hdrc_pci_id_table[] = {
{
PCI_DEVICE(0x153F, 0x1004),
.driver_data = (kernel_ulong_t)&pci_platdata,
},
{
PCI_DEVICE(0x153F, 0x1006),
.driver_data = (kernel_ulong_t)&pci_platdata,
},
{
PCI_VDEVICE(INTEL, 0x0811),
.driver_data = (kernel_ulong_t)&langwell_pci_platdata,
},
{
PCI_VDEVICE(INTEL, 0x0829),
.driver_data = (kernel_ulong_t)&penwell_pci_platdata,
},
{
/* Intel Clovertrail */
PCI_VDEVICE(INTEL, 0xe006),
.driver_data = (kernel_ulong_t)&penwell_pci_platdata,
},
{ 0 } /* end: all zeroes */
};
MODULE_DEVICE_TABLE(pci, ci_hdrc_pci_id_table);
static struct pci_driver ci_hdrc_pci_driver = {
.name = UDC_DRIVER_NAME,
.id_table = ci_hdrc_pci_id_table,
.probe = ci_hdrc_pci_probe,
.remove = ci_hdrc_pci_remove,
};
module_pci_driver(ci_hdrc_pci_driver);
MODULE_AUTHOR("MIPS - David Lopo <[email protected]>");
MODULE_DESCRIPTION("MIPS CI13XXX USB Peripheral Controller");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ci13xxx_pci");
| linux-master | drivers/usb/chipidea/ci_hdrc_pci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* udc.c - ChipIdea UDC driver
*
* Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
*
* Author: David Lopo
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmapool.h>
#include <linux/err.h>
#include <linux/irqreturn.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/pinctrl/consumer.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg-fsm.h>
#include <linux/usb/chipidea.h>
#include "ci.h"
#include "udc.h"
#include "bits.h"
#include "otg.h"
#include "otg_fsm.h"
#include "trace.h"
/* control endpoint description */
static const struct usb_endpoint_descriptor
ctrl_endpt_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
.wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
};
static const struct usb_endpoint_descriptor
ctrl_endpt_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
.wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
};
static int reprime_dtd(struct ci_hdrc *ci, struct ci_hw_ep *hwep,
struct td_node *node);
/**
* hw_ep_bit: calculates the bit number
* @num: endpoint number
* @dir: endpoint direction
*
* This function returns bit number
*/
static inline int hw_ep_bit(int num, int dir)
{
return num + ((dir == TX) ? 16 : 0);
}
static inline int ep_to_bit(struct ci_hdrc *ci, int n)
{
int fill = 16 - ci->hw_ep_max / 2;
if (n >= ci->hw_ep_max / 2)
n += fill;
return n;
}
/**
* hw_device_state: enables/disables interrupts (execute without interruption)
* @ci: the controller
* @dma: 0 => disable, !0 => enable and set dma engine
*
* This function returns an error code
*/
static int hw_device_state(struct ci_hdrc *ci, u32 dma)
{
if (dma) {
hw_write(ci, OP_ENDPTLISTADDR, ~0, dma);
/* interrupt, error, port change, reset, sleep/suspend */
hw_write(ci, OP_USBINTR, ~0,
USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
} else {
hw_write(ci, OP_USBINTR, ~0, 0);
}
return 0;
}
/**
* hw_ep_flush: flush endpoint fifo (execute without interruption)
* @ci: the controller
* @num: endpoint number
* @dir: endpoint direction
*
* This function returns an error code
*/
static int hw_ep_flush(struct ci_hdrc *ci, int num, int dir)
{
int n = hw_ep_bit(num, dir);
do {
/* flush any pending transfer */
hw_write(ci, OP_ENDPTFLUSH, ~0, BIT(n));
while (hw_read(ci, OP_ENDPTFLUSH, BIT(n)))
cpu_relax();
} while (hw_read(ci, OP_ENDPTSTAT, BIT(n)));
return 0;
}
/**
* hw_ep_disable: disables endpoint (execute without interruption)
* @ci: the controller
* @num: endpoint number
* @dir: endpoint direction
*
* This function returns an error code
*/
static int hw_ep_disable(struct ci_hdrc *ci, int num, int dir)
{
hw_write(ci, OP_ENDPTCTRL + num,
(dir == TX) ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
return 0;
}
/**
* hw_ep_enable: enables endpoint (execute without interruption)
* @ci: the controller
* @num: endpoint number
* @dir: endpoint direction
* @type: endpoint type
*
* This function returns an error code
*/
static int hw_ep_enable(struct ci_hdrc *ci, int num, int dir, int type)
{
u32 mask, data;
if (dir == TX) {
mask = ENDPTCTRL_TXT; /* type */
data = type << __ffs(mask);
mask |= ENDPTCTRL_TXS; /* unstall */
mask |= ENDPTCTRL_TXR; /* reset data toggle */
data |= ENDPTCTRL_TXR;
mask |= ENDPTCTRL_TXE; /* enable */
data |= ENDPTCTRL_TXE;
} else {
mask = ENDPTCTRL_RXT; /* type */
data = type << __ffs(mask);
mask |= ENDPTCTRL_RXS; /* unstall */
mask |= ENDPTCTRL_RXR; /* reset data toggle */
data |= ENDPTCTRL_RXR;
mask |= ENDPTCTRL_RXE; /* enable */
data |= ENDPTCTRL_RXE;
}
hw_write(ci, OP_ENDPTCTRL + num, mask, data);
return 0;
}
/**
* hw_ep_get_halt: return endpoint halt status
* @ci: the controller
* @num: endpoint number
* @dir: endpoint direction
*
* This function returns 1 if endpoint halted
*/
static int hw_ep_get_halt(struct ci_hdrc *ci, int num, int dir)
{
u32 mask = (dir == TX) ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
return hw_read(ci, OP_ENDPTCTRL + num, mask) ? 1 : 0;
}
/**
* hw_ep_prime: primes endpoint (execute without interruption)
* @ci: the controller
* @num: endpoint number
* @dir: endpoint direction
* @is_ctrl: true if control endpoint
*
* This function returns an error code
*/
static int hw_ep_prime(struct ci_hdrc *ci, int num, int dir, int is_ctrl)
{
int n = hw_ep_bit(num, dir);
/* Synchronize before ep prime */
wmb();
if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
return -EAGAIN;
hw_write(ci, OP_ENDPTPRIME, ~0, BIT(n));
while (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
cpu_relax();
if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
return -EAGAIN;
/* status shoult be tested according with manual but it doesn't work */
return 0;
}
/**
* hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
* without interruption)
* @ci: the controller
* @num: endpoint number
* @dir: endpoint direction
* @value: true => stall, false => unstall
*
* This function returns an error code
*/
static int hw_ep_set_halt(struct ci_hdrc *ci, int num, int dir, int value)
{
if (value != 0 && value != 1)
return -EINVAL;
do {
enum ci_hw_regs reg = OP_ENDPTCTRL + num;
u32 mask_xs = (dir == TX) ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
u32 mask_xr = (dir == TX) ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
/* data toggle - reserved for EP0 but it's in ESS */
hw_write(ci, reg, mask_xs|mask_xr,
value ? mask_xs : mask_xr);
} while (value != hw_ep_get_halt(ci, num, dir));
return 0;
}
/**
* hw_port_is_high_speed: test if port is high speed
* @ci: the controller
*
* This function returns true if high speed port
*/
static int hw_port_is_high_speed(struct ci_hdrc *ci)
{
return ci->hw_bank.lpm ? hw_read(ci, OP_DEVLC, DEVLC_PSPD) :
hw_read(ci, OP_PORTSC, PORTSC_HSP);
}
/**
* hw_test_and_clear_complete: test & clear complete status (execute without
* interruption)
* @ci: the controller
* @n: endpoint number
*
* This function returns complete status
*/
static int hw_test_and_clear_complete(struct ci_hdrc *ci, int n)
{
n = ep_to_bit(ci, n);
return hw_test_and_clear(ci, OP_ENDPTCOMPLETE, BIT(n));
}
/**
* hw_test_and_clear_intr_active: test & clear active interrupts (execute
* without interruption)
* @ci: the controller
*
* This function returns active interrutps
*/
static u32 hw_test_and_clear_intr_active(struct ci_hdrc *ci)
{
u32 reg = hw_read_intr_status(ci) & hw_read_intr_enable(ci);
hw_write(ci, OP_USBSTS, ~0, reg);
return reg;
}
/**
* hw_test_and_clear_setup_guard: test & clear setup guard (execute without
* interruption)
* @ci: the controller
*
* This function returns guard value
*/
static int hw_test_and_clear_setup_guard(struct ci_hdrc *ci)
{
return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, 0);
}
/**
* hw_test_and_set_setup_guard: test & set setup guard (execute without
* interruption)
* @ci: the controller
*
* This function returns guard value
*/
static int hw_test_and_set_setup_guard(struct ci_hdrc *ci)
{
return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
}
/**
* hw_usb_set_address: configures USB address (execute without interruption)
* @ci: the controller
* @value: new USB address
*
* This function explicitly sets the address, without the "USBADRA" (advance)
* feature, which is not supported by older versions of the controller.
*/
static void hw_usb_set_address(struct ci_hdrc *ci, u8 value)
{
hw_write(ci, OP_DEVICEADDR, DEVICEADDR_USBADR,
value << __ffs(DEVICEADDR_USBADR));
}
/**
* hw_usb_reset: restart device after a bus reset (execute without
* interruption)
* @ci: the controller
*
* This function returns an error code
*/
static int hw_usb_reset(struct ci_hdrc *ci)
{
hw_usb_set_address(ci, 0);
/* ESS flushes only at end?!? */
hw_write(ci, OP_ENDPTFLUSH, ~0, ~0);
/* clear setup token semaphores */
hw_write(ci, OP_ENDPTSETUPSTAT, 0, 0);
/* clear complete status */
hw_write(ci, OP_ENDPTCOMPLETE, 0, 0);
/* wait until all bits cleared */
while (hw_read(ci, OP_ENDPTPRIME, ~0))
udelay(10); /* not RTOS friendly */
/* reset all endpoints ? */
/* reset internal status and wait for further instructions
no need to verify the port reset status (ESS does it) */
return 0;
}
/******************************************************************************
* UTIL block
*****************************************************************************/
static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
unsigned int length, struct scatterlist *s)
{
int i;
u32 temp;
struct td_node *lastnode, *node = kzalloc(sizeof(struct td_node),
GFP_ATOMIC);
if (node == NULL)
return -ENOMEM;
node->ptr = dma_pool_zalloc(hwep->td_pool, GFP_ATOMIC, &node->dma);
if (node->ptr == NULL) {
kfree(node);
return -ENOMEM;
}
node->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES));
node->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
node->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX) {
u32 mul = hwreq->req.length / hwep->ep.maxpacket;
if (hwreq->req.length == 0
|| hwreq->req.length % hwep->ep.maxpacket)
mul++;
node->ptr->token |= cpu_to_le32(mul << __ffs(TD_MULTO));
}
if (s) {
temp = (u32) (sg_dma_address(s) + hwreq->req.actual);
node->td_remaining_size = CI_MAX_BUF_SIZE - length;
} else {
temp = (u32) (hwreq->req.dma + hwreq->req.actual);
}
if (length) {
node->ptr->page[0] = cpu_to_le32(temp);
for (i = 1; i < TD_PAGE_COUNT; i++) {
u32 page = temp + i * CI_HDRC_PAGE_SIZE;
page &= ~TD_RESERVED_MASK;
node->ptr->page[i] = cpu_to_le32(page);
}
}
hwreq->req.actual += length;
if (!list_empty(&hwreq->tds)) {
/* get the last entry */
lastnode = list_entry(hwreq->tds.prev,
struct td_node, td);
lastnode->ptr->next = cpu_to_le32(node->dma);
}
INIT_LIST_HEAD(&node->td);
list_add_tail(&node->td, &hwreq->tds);
return 0;
}
/**
* _usb_addr: calculates endpoint address from direction & number
* @ep: endpoint
*/
static inline u8 _usb_addr(struct ci_hw_ep *ep)
{
return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
}
static int prepare_td_for_non_sg(struct ci_hw_ep *hwep,
struct ci_hw_req *hwreq)
{
unsigned int rest = hwreq->req.length;
int pages = TD_PAGE_COUNT;
int ret = 0;
if (rest == 0) {
ret = add_td_to_list(hwep, hwreq, 0, NULL);
if (ret < 0)
return ret;
}
/*
* The first buffer could be not page aligned.
* In that case we have to span into one extra td.
*/
if (hwreq->req.dma % PAGE_SIZE)
pages--;
while (rest > 0) {
unsigned int count = min(hwreq->req.length - hwreq->req.actual,
(unsigned int)(pages * CI_HDRC_PAGE_SIZE));
ret = add_td_to_list(hwep, hwreq, count, NULL);
if (ret < 0)
return ret;
rest -= count;
}
if (hwreq->req.zero && hwreq->req.length && hwep->dir == TX
&& (hwreq->req.length % hwep->ep.maxpacket == 0)) {
ret = add_td_to_list(hwep, hwreq, 0, NULL);
if (ret < 0)
return ret;
}
return ret;
}
static int prepare_td_per_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
struct scatterlist *s)
{
unsigned int rest = sg_dma_len(s);
int ret = 0;
hwreq->req.actual = 0;
while (rest > 0) {
unsigned int count = min_t(unsigned int, rest,
CI_MAX_BUF_SIZE);
ret = add_td_to_list(hwep, hwreq, count, s);
if (ret < 0)
return ret;
rest -= count;
}
return ret;
}
static void ci_add_buffer_entry(struct td_node *node, struct scatterlist *s)
{
int empty_td_slot_index = (CI_MAX_BUF_SIZE - node->td_remaining_size)
/ CI_HDRC_PAGE_SIZE;
int i;
u32 token;
token = le32_to_cpu(node->ptr->token) + (sg_dma_len(s) << __ffs(TD_TOTAL_BYTES));
node->ptr->token = cpu_to_le32(token);
for (i = empty_td_slot_index; i < TD_PAGE_COUNT; i++) {
u32 page = (u32) sg_dma_address(s) +
(i - empty_td_slot_index) * CI_HDRC_PAGE_SIZE;
page &= ~TD_RESERVED_MASK;
node->ptr->page[i] = cpu_to_le32(page);
}
}
static int prepare_td_for_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
{
struct usb_request *req = &hwreq->req;
struct scatterlist *s = req->sg;
int ret = 0, i = 0;
struct td_node *node = NULL;
if (!s || req->zero || req->length == 0) {
dev_err(hwep->ci->dev, "not supported operation for sg\n");
return -EINVAL;
}
while (i++ < req->num_mapped_sgs) {
if (sg_dma_address(s) % PAGE_SIZE) {
dev_err(hwep->ci->dev, "not page aligned sg buffer\n");
return -EINVAL;
}
if (node && (node->td_remaining_size >= sg_dma_len(s))) {
ci_add_buffer_entry(node, s);
node->td_remaining_size -= sg_dma_len(s);
} else {
ret = prepare_td_per_sg(hwep, hwreq, s);
if (ret)
return ret;
node = list_entry(hwreq->tds.prev,
struct td_node, td);
}
s = sg_next(s);
}
return ret;
}
/**
* _hardware_enqueue: configures a request at hardware level
* @hwep: endpoint
* @hwreq: request
*
* This function returns an error code
*/
static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
{
struct ci_hdrc *ci = hwep->ci;
int ret = 0;
struct td_node *firstnode, *lastnode;
/* don't queue twice */
if (hwreq->req.status == -EALREADY)
return -EALREADY;
hwreq->req.status = -EALREADY;
ret = usb_gadget_map_request_by_dev(ci->dev->parent,
&hwreq->req, hwep->dir);
if (ret)
return ret;
if (hwreq->req.num_mapped_sgs)
ret = prepare_td_for_sg(hwep, hwreq);
else
ret = prepare_td_for_non_sg(hwep, hwreq);
if (ret)
return ret;
lastnode = list_entry(hwreq->tds.prev,
struct td_node, td);
lastnode->ptr->next = cpu_to_le32(TD_TERMINATE);
if (!hwreq->req.no_interrupt)
lastnode->ptr->token |= cpu_to_le32(TD_IOC);
list_for_each_entry_safe(firstnode, lastnode, &hwreq->tds, td)
trace_ci_prepare_td(hwep, hwreq, firstnode);
firstnode = list_first_entry(&hwreq->tds, struct td_node, td);
wmb();
hwreq->req.actual = 0;
if (!list_empty(&hwep->qh.queue)) {
struct ci_hw_req *hwreqprev;
int n = hw_ep_bit(hwep->num, hwep->dir);
int tmp_stat;
struct td_node *prevlastnode;
u32 next = firstnode->dma & TD_ADDR_MASK;
hwreqprev = list_entry(hwep->qh.queue.prev,
struct ci_hw_req, queue);
prevlastnode = list_entry(hwreqprev->tds.prev,
struct td_node, td);
prevlastnode->ptr->next = cpu_to_le32(next);
wmb();
if (ci->rev == CI_REVISION_22) {
if (!hw_read(ci, OP_ENDPTSTAT, BIT(n)))
reprime_dtd(ci, hwep, prevlastnode);
}
if (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
goto done;
do {
hw_write(ci, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
tmp_stat = hw_read(ci, OP_ENDPTSTAT, BIT(n));
} while (!hw_read(ci, OP_USBCMD, USBCMD_ATDTW));
hw_write(ci, OP_USBCMD, USBCMD_ATDTW, 0);
if (tmp_stat)
goto done;
}
/* QH configuration */
hwep->qh.ptr->td.next = cpu_to_le32(firstnode->dma);
hwep->qh.ptr->td.token &=
cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE));
if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == RX) {
u32 mul = hwreq->req.length / hwep->ep.maxpacket;
if (hwreq->req.length == 0
|| hwreq->req.length % hwep->ep.maxpacket)
mul++;
hwep->qh.ptr->cap |= cpu_to_le32(mul << __ffs(QH_MULT));
}
ret = hw_ep_prime(ci, hwep->num, hwep->dir,
hwep->type == USB_ENDPOINT_XFER_CONTROL);
done:
return ret;
}
/**
* free_pending_td: remove a pending request for the endpoint
* @hwep: endpoint
*/
static void free_pending_td(struct ci_hw_ep *hwep)
{
struct td_node *pending = hwep->pending_td;
dma_pool_free(hwep->td_pool, pending->ptr, pending->dma);
hwep->pending_td = NULL;
kfree(pending);
}
static int reprime_dtd(struct ci_hdrc *ci, struct ci_hw_ep *hwep,
struct td_node *node)
{
hwep->qh.ptr->td.next = cpu_to_le32(node->dma);
hwep->qh.ptr->td.token &=
cpu_to_le32(~(TD_STATUS_HALTED | TD_STATUS_ACTIVE));
return hw_ep_prime(ci, hwep->num, hwep->dir,
hwep->type == USB_ENDPOINT_XFER_CONTROL);
}
/**
* _hardware_dequeue: handles a request at hardware level
* @hwep: endpoint
* @hwreq: request
*
* This function returns an error code
*/
static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
{
u32 tmptoken;
struct td_node *node, *tmpnode;
unsigned remaining_length;
unsigned actual = hwreq->req.length;
struct ci_hdrc *ci = hwep->ci;
if (hwreq->req.status != -EALREADY)
return -EINVAL;
hwreq->req.status = 0;
list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
tmptoken = le32_to_cpu(node->ptr->token);
trace_ci_complete_td(hwep, hwreq, node);
if ((TD_STATUS_ACTIVE & tmptoken) != 0) {
int n = hw_ep_bit(hwep->num, hwep->dir);
if (ci->rev == CI_REVISION_24)
if (!hw_read(ci, OP_ENDPTSTAT, BIT(n)))
reprime_dtd(ci, hwep, node);
hwreq->req.status = -EALREADY;
return -EBUSY;
}
remaining_length = (tmptoken & TD_TOTAL_BYTES);
remaining_length >>= __ffs(TD_TOTAL_BYTES);
actual -= remaining_length;
hwreq->req.status = tmptoken & TD_STATUS;
if ((TD_STATUS_HALTED & hwreq->req.status)) {
hwreq->req.status = -EPIPE;
break;
} else if ((TD_STATUS_DT_ERR & hwreq->req.status)) {
hwreq->req.status = -EPROTO;
break;
} else if ((TD_STATUS_TR_ERR & hwreq->req.status)) {
hwreq->req.status = -EILSEQ;
break;
}
if (remaining_length) {
if (hwep->dir == TX) {
hwreq->req.status = -EPROTO;
break;
}
}
/*
* As the hardware could still address the freed td
* which will run the udc unusable, the cleanup of the
* td has to be delayed by one.
*/
if (hwep->pending_td)
free_pending_td(hwep);
hwep->pending_td = node;
list_del_init(&node->td);
}
usb_gadget_unmap_request_by_dev(hwep->ci->dev->parent,
&hwreq->req, hwep->dir);
hwreq->req.actual += actual;
if (hwreq->req.status)
return hwreq->req.status;
return hwreq->req.actual;
}
/**
* _ep_nuke: dequeues all endpoint requests
* @hwep: endpoint
*
* This function returns an error code
* Caller must hold lock
*/
static int _ep_nuke(struct ci_hw_ep *hwep)
__releases(hwep->lock)
__acquires(hwep->lock)
{
struct td_node *node, *tmpnode;
if (hwep == NULL)
return -EINVAL;
hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
while (!list_empty(&hwep->qh.queue)) {
/* pop oldest request */
struct ci_hw_req *hwreq = list_entry(hwep->qh.queue.next,
struct ci_hw_req, queue);
list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
dma_pool_free(hwep->td_pool, node->ptr, node->dma);
list_del_init(&node->td);
node->ptr = NULL;
kfree(node);
}
list_del_init(&hwreq->queue);
hwreq->req.status = -ESHUTDOWN;
if (hwreq->req.complete != NULL) {
spin_unlock(hwep->lock);
usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
spin_lock(hwep->lock);
}
}
if (hwep->pending_td)
free_pending_td(hwep);
return 0;
}
static int _ep_set_halt(struct usb_ep *ep, int value, bool check_transfer)
{
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
int direction, retval = 0;
unsigned long flags;
if (ep == NULL || hwep->ep.desc == NULL)
return -EINVAL;
if (usb_endpoint_xfer_isoc(hwep->ep.desc))
return -EOPNOTSUPP;
spin_lock_irqsave(hwep->lock, flags);
if (value && hwep->dir == TX && check_transfer &&
!list_empty(&hwep->qh.queue) &&
!usb_endpoint_xfer_control(hwep->ep.desc)) {
spin_unlock_irqrestore(hwep->lock, flags);
return -EAGAIN;
}
direction = hwep->dir;
do {
retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
if (!value)
hwep->wedge = 0;
if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
hwep->dir = (hwep->dir == TX) ? RX : TX;
} while (hwep->dir != direction);
spin_unlock_irqrestore(hwep->lock, flags);
return retval;
}
/**
* _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
* @gadget: gadget
*
* This function returns an error code
*/
static int _gadget_stop_activity(struct usb_gadget *gadget)
{
struct usb_ep *ep;
struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
unsigned long flags;
/* flush all endpoints */
gadget_for_each_ep(ep, gadget) {
usb_ep_fifo_flush(ep);
}
usb_ep_fifo_flush(&ci->ep0out->ep);
usb_ep_fifo_flush(&ci->ep0in->ep);
/* make sure to disable all endpoints */
gadget_for_each_ep(ep, gadget) {
usb_ep_disable(ep);
}
if (ci->status != NULL) {
usb_ep_free_request(&ci->ep0in->ep, ci->status);
ci->status = NULL;
}
spin_lock_irqsave(&ci->lock, flags);
ci->gadget.speed = USB_SPEED_UNKNOWN;
ci->remote_wakeup = 0;
ci->suspended = 0;
spin_unlock_irqrestore(&ci->lock, flags);
return 0;
}
/******************************************************************************
* ISR block
*****************************************************************************/
/**
* isr_reset_handler: USB reset interrupt handler
* @ci: UDC device
*
* This function resets USB engine after a bus reset occurred
*/
static void isr_reset_handler(struct ci_hdrc *ci)
__releases(ci->lock)
__acquires(ci->lock)
{
int retval;
spin_unlock(&ci->lock);
if (ci->gadget.speed != USB_SPEED_UNKNOWN)
usb_gadget_udc_reset(&ci->gadget, ci->driver);
retval = _gadget_stop_activity(&ci->gadget);
if (retval)
goto done;
retval = hw_usb_reset(ci);
if (retval)
goto done;
ci->status = usb_ep_alloc_request(&ci->ep0in->ep, GFP_ATOMIC);
if (ci->status == NULL)
retval = -ENOMEM;
done:
spin_lock(&ci->lock);
if (retval)
dev_err(ci->dev, "error: %i\n", retval);
}
/**
* isr_get_status_complete: get_status request complete function
* @ep: endpoint
* @req: request handled
*
* Caller must release lock
*/
static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
{
if (ep == NULL || req == NULL)
return;
kfree(req->buf);
usb_ep_free_request(ep, req);
}
/**
* _ep_queue: queues (submits) an I/O request to an endpoint
* @ep: endpoint
* @req: request
* @gfp_flags: GFP flags (not used)
*
* Caller must hold lock
* This function returns an error code
*/
static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
gfp_t __maybe_unused gfp_flags)
{
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
struct ci_hdrc *ci = hwep->ci;
int retval = 0;
if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
return -EINVAL;
if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
if (req->length)
hwep = (ci->ep0_dir == RX) ?
ci->ep0out : ci->ep0in;
if (!list_empty(&hwep->qh.queue)) {
_ep_nuke(hwep);
dev_warn(hwep->ci->dev, "endpoint ctrl %X nuked\n",
_usb_addr(hwep));
}
}
if (usb_endpoint_xfer_isoc(hwep->ep.desc) &&
hwreq->req.length > hwep->ep.mult * hwep->ep.maxpacket) {
dev_err(hwep->ci->dev, "request length too big for isochronous\n");
return -EMSGSIZE;
}
/* first nuke then test link, e.g. previous status has not sent */
if (!list_empty(&hwreq->queue)) {
dev_err(hwep->ci->dev, "request already in queue\n");
return -EBUSY;
}
/* push request */
hwreq->req.status = -EINPROGRESS;
hwreq->req.actual = 0;
retval = _hardware_enqueue(hwep, hwreq);
if (retval == -EALREADY)
retval = 0;
if (!retval)
list_add_tail(&hwreq->queue, &hwep->qh.queue);
return retval;
}
/**
* isr_get_status_response: get_status request response
* @ci: ci struct
* @setup: setup request packet
*
* This function returns an error code
*/
static int isr_get_status_response(struct ci_hdrc *ci,
struct usb_ctrlrequest *setup)
__releases(hwep->lock)
__acquires(hwep->lock)
{
struct ci_hw_ep *hwep = ci->ep0in;
struct usb_request *req = NULL;
gfp_t gfp_flags = GFP_ATOMIC;
int dir, num, retval;
if (hwep == NULL || setup == NULL)
return -EINVAL;
spin_unlock(hwep->lock);
req = usb_ep_alloc_request(&hwep->ep, gfp_flags);
spin_lock(hwep->lock);
if (req == NULL)
return -ENOMEM;
req->complete = isr_get_status_complete;
req->length = 2;
req->buf = kzalloc(req->length, gfp_flags);
if (req->buf == NULL) {
retval = -ENOMEM;
goto err_free_req;
}
if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
*(u16 *)req->buf = (ci->remote_wakeup << 1) |
ci->gadget.is_selfpowered;
} else if ((setup->bRequestType & USB_RECIP_MASK) \
== USB_RECIP_ENDPOINT) {
dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
TX : RX;
num = le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
*(u16 *)req->buf = hw_ep_get_halt(ci, num, dir);
}
/* else do nothing; reserved for future use */
retval = _ep_queue(&hwep->ep, req, gfp_flags);
if (retval)
goto err_free_buf;
return 0;
err_free_buf:
kfree(req->buf);
err_free_req:
spin_unlock(hwep->lock);
usb_ep_free_request(&hwep->ep, req);
spin_lock(hwep->lock);
return retval;
}
/**
* isr_setup_status_complete: setup_status request complete function
* @ep: endpoint
* @req: request handled
*
* Caller must release lock. Put the port in test mode if test mode
* feature is selected.
*/
static void
isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
{
struct ci_hdrc *ci = req->context;
unsigned long flags;
if (req->status < 0)
return;
if (ci->setaddr) {
hw_usb_set_address(ci, ci->address);
ci->setaddr = false;
if (ci->address)
usb_gadget_set_state(&ci->gadget, USB_STATE_ADDRESS);
}
spin_lock_irqsave(&ci->lock, flags);
if (ci->test_mode)
hw_port_test_set(ci, ci->test_mode);
spin_unlock_irqrestore(&ci->lock, flags);
}
/**
* isr_setup_status_phase: queues the status phase of a setup transation
* @ci: ci struct
*
* This function returns an error code
*/
static int isr_setup_status_phase(struct ci_hdrc *ci)
{
struct ci_hw_ep *hwep;
/*
* Unexpected USB controller behavior, caused by bad signal integrity
* or ground reference problems, can lead to isr_setup_status_phase
* being called with ci->status equal to NULL.
* If this situation occurs, you should review your USB hardware design.
*/
if (WARN_ON_ONCE(!ci->status))
return -EPIPE;
hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
ci->status->context = ci;
ci->status->complete = isr_setup_status_complete;
return _ep_queue(&hwep->ep, ci->status, GFP_ATOMIC);
}
/**
* isr_tr_complete_low: transaction complete low level handler
* @hwep: endpoint
*
* This function returns an error code
* Caller must hold lock
*/
static int isr_tr_complete_low(struct ci_hw_ep *hwep)
__releases(hwep->lock)
__acquires(hwep->lock)
{
struct ci_hw_req *hwreq, *hwreqtemp;
struct ci_hw_ep *hweptemp = hwep;
int retval = 0;
list_for_each_entry_safe(hwreq, hwreqtemp, &hwep->qh.queue,
queue) {
retval = _hardware_dequeue(hwep, hwreq);
if (retval < 0)
break;
list_del_init(&hwreq->queue);
if (hwreq->req.complete != NULL) {
spin_unlock(hwep->lock);
if ((hwep->type == USB_ENDPOINT_XFER_CONTROL) &&
hwreq->req.length)
hweptemp = hwep->ci->ep0in;
usb_gadget_giveback_request(&hweptemp->ep, &hwreq->req);
spin_lock(hwep->lock);
}
}
if (retval == -EBUSY)
retval = 0;
return retval;
}
static int otg_a_alt_hnp_support(struct ci_hdrc *ci)
{
dev_warn(&ci->gadget.dev,
"connect the device to an alternate port if you want HNP\n");
return isr_setup_status_phase(ci);
}
/**
* isr_setup_packet_handler: setup packet handler
* @ci: UDC descriptor
*
* This function handles setup packet
*/
static void isr_setup_packet_handler(struct ci_hdrc *ci)
__releases(ci->lock)
__acquires(ci->lock)
{
struct ci_hw_ep *hwep = &ci->ci_hw_ep[0];
struct usb_ctrlrequest req;
int type, num, dir, err = -EINVAL;
u8 tmode = 0;
/*
* Flush data and handshake transactions of previous
* setup packet.
*/
_ep_nuke(ci->ep0out);
_ep_nuke(ci->ep0in);
/* read_setup_packet */
do {
hw_test_and_set_setup_guard(ci);
memcpy(&req, &hwep->qh.ptr->setup, sizeof(req));
} while (!hw_test_and_clear_setup_guard(ci));
type = req.bRequestType;
ci->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
switch (req.bRequest) {
case USB_REQ_CLEAR_FEATURE:
if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
le16_to_cpu(req.wValue) ==
USB_ENDPOINT_HALT) {
if (req.wLength != 0)
break;
num = le16_to_cpu(req.wIndex);
dir = (num & USB_ENDPOINT_DIR_MASK) ? TX : RX;
num &= USB_ENDPOINT_NUMBER_MASK;
if (dir == TX)
num += ci->hw_ep_max / 2;
if (!ci->ci_hw_ep[num].wedge) {
spin_unlock(&ci->lock);
err = usb_ep_clear_halt(
&ci->ci_hw_ep[num].ep);
spin_lock(&ci->lock);
if (err)
break;
}
err = isr_setup_status_phase(ci);
} else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
le16_to_cpu(req.wValue) ==
USB_DEVICE_REMOTE_WAKEUP) {
if (req.wLength != 0)
break;
ci->remote_wakeup = 0;
err = isr_setup_status_phase(ci);
} else {
goto delegate;
}
break;
case USB_REQ_GET_STATUS:
if ((type != (USB_DIR_IN|USB_RECIP_DEVICE) ||
le16_to_cpu(req.wIndex) == OTG_STS_SELECTOR) &&
type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
type != (USB_DIR_IN|USB_RECIP_INTERFACE))
goto delegate;
if (le16_to_cpu(req.wLength) != 2 ||
le16_to_cpu(req.wValue) != 0)
break;
err = isr_get_status_response(ci, &req);
break;
case USB_REQ_SET_ADDRESS:
if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
goto delegate;
if (le16_to_cpu(req.wLength) != 0 ||
le16_to_cpu(req.wIndex) != 0)
break;
ci->address = (u8)le16_to_cpu(req.wValue);
ci->setaddr = true;
err = isr_setup_status_phase(ci);
break;
case USB_REQ_SET_FEATURE:
if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
le16_to_cpu(req.wValue) ==
USB_ENDPOINT_HALT) {
if (req.wLength != 0)
break;
num = le16_to_cpu(req.wIndex);
dir = (num & USB_ENDPOINT_DIR_MASK) ? TX : RX;
num &= USB_ENDPOINT_NUMBER_MASK;
if (dir == TX)
num += ci->hw_ep_max / 2;
spin_unlock(&ci->lock);
err = _ep_set_halt(&ci->ci_hw_ep[num].ep, 1, false);
spin_lock(&ci->lock);
if (!err)
isr_setup_status_phase(ci);
} else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
if (req.wLength != 0)
break;
switch (le16_to_cpu(req.wValue)) {
case USB_DEVICE_REMOTE_WAKEUP:
ci->remote_wakeup = 1;
err = isr_setup_status_phase(ci);
break;
case USB_DEVICE_TEST_MODE:
tmode = le16_to_cpu(req.wIndex) >> 8;
switch (tmode) {
case USB_TEST_J:
case USB_TEST_K:
case USB_TEST_SE0_NAK:
case USB_TEST_PACKET:
case USB_TEST_FORCE_ENABLE:
ci->test_mode = tmode;
err = isr_setup_status_phase(
ci);
break;
default:
break;
}
break;
case USB_DEVICE_B_HNP_ENABLE:
if (ci_otg_is_fsm_mode(ci)) {
ci->gadget.b_hnp_enable = 1;
err = isr_setup_status_phase(
ci);
}
break;
case USB_DEVICE_A_ALT_HNP_SUPPORT:
if (ci_otg_is_fsm_mode(ci))
err = otg_a_alt_hnp_support(ci);
break;
case USB_DEVICE_A_HNP_SUPPORT:
if (ci_otg_is_fsm_mode(ci)) {
ci->gadget.a_hnp_support = 1;
err = isr_setup_status_phase(
ci);
}
break;
default:
goto delegate;
}
} else {
goto delegate;
}
break;
default:
delegate:
if (req.wLength == 0) /* no data phase */
ci->ep0_dir = TX;
spin_unlock(&ci->lock);
err = ci->driver->setup(&ci->gadget, &req);
spin_lock(&ci->lock);
break;
}
if (err < 0) {
spin_unlock(&ci->lock);
if (_ep_set_halt(&hwep->ep, 1, false))
dev_err(ci->dev, "error: _ep_set_halt\n");
spin_lock(&ci->lock);
}
}
/**
* isr_tr_complete_handler: transaction complete interrupt handler
* @ci: UDC descriptor
*
* This function handles traffic events
*/
static void isr_tr_complete_handler(struct ci_hdrc *ci)
__releases(ci->lock)
__acquires(ci->lock)
{
unsigned i;
int err;
for (i = 0; i < ci->hw_ep_max; i++) {
struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
if (hwep->ep.desc == NULL)
continue; /* not configured */
if (hw_test_and_clear_complete(ci, i)) {
err = isr_tr_complete_low(hwep);
if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
if (err > 0) /* needs status phase */
err = isr_setup_status_phase(ci);
if (err < 0) {
spin_unlock(&ci->lock);
if (_ep_set_halt(&hwep->ep, 1, false))
dev_err(ci->dev,
"error: _ep_set_halt\n");
spin_lock(&ci->lock);
}
}
}
/* Only handle setup packet below */
if (i == 0 &&
hw_test_and_clear(ci, OP_ENDPTSETUPSTAT, BIT(0)))
isr_setup_packet_handler(ci);
}
}
/******************************************************************************
* ENDPT block
*****************************************************************************/
/*
* ep_enable: configure endpoint, making it usable
*
* Check usb_ep_enable() at "usb_gadget.h" for details
*/
static int ep_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
int retval = 0;
unsigned long flags;
u32 cap = 0;
if (ep == NULL || desc == NULL)
return -EINVAL;
spin_lock_irqsave(hwep->lock, flags);
/* only internal SW should enable ctrl endpts */
if (!list_empty(&hwep->qh.queue)) {
dev_warn(hwep->ci->dev, "enabling a non-empty endpoint!\n");
spin_unlock_irqrestore(hwep->lock, flags);
return -EBUSY;
}
hwep->ep.desc = desc;
hwep->dir = usb_endpoint_dir_in(desc) ? TX : RX;
hwep->num = usb_endpoint_num(desc);
hwep->type = usb_endpoint_type(desc);
hwep->ep.maxpacket = usb_endpoint_maxp(desc);
hwep->ep.mult = usb_endpoint_maxp_mult(desc);
if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
cap |= QH_IOS;
cap |= QH_ZLT;
cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
/*
* For ISO-TX, we set mult at QH as the largest value, and use
* MultO at TD as real mult value.
*/
if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX)
cap |= 3 << __ffs(QH_MULT);
hwep->qh.ptr->cap = cpu_to_le32(cap);
hwep->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE); /* needed? */
if (hwep->num != 0 && hwep->type == USB_ENDPOINT_XFER_CONTROL) {
dev_err(hwep->ci->dev, "Set control xfer at non-ep0\n");
retval = -EINVAL;
}
/*
* Enable endpoints in the HW other than ep0 as ep0
* is always enabled
*/
if (hwep->num)
retval |= hw_ep_enable(hwep->ci, hwep->num, hwep->dir,
hwep->type);
spin_unlock_irqrestore(hwep->lock, flags);
return retval;
}
/*
* ep_disable: endpoint is no longer usable
*
* Check usb_ep_disable() at "usb_gadget.h" for details
*/
static int ep_disable(struct usb_ep *ep)
{
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
int direction, retval = 0;
unsigned long flags;
if (ep == NULL)
return -EINVAL;
else if (hwep->ep.desc == NULL)
return -EBUSY;
spin_lock_irqsave(hwep->lock, flags);
if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
spin_unlock_irqrestore(hwep->lock, flags);
return 0;
}
/* only internal SW should disable ctrl endpts */
direction = hwep->dir;
do {
retval |= _ep_nuke(hwep);
retval |= hw_ep_disable(hwep->ci, hwep->num, hwep->dir);
if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
hwep->dir = (hwep->dir == TX) ? RX : TX;
} while (hwep->dir != direction);
hwep->ep.desc = NULL;
spin_unlock_irqrestore(hwep->lock, flags);
return retval;
}
/*
* ep_alloc_request: allocate a request object to use with this endpoint
*
* Check usb_ep_alloc_request() at "usb_gadget.h" for details
*/
static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
{
struct ci_hw_req *hwreq;
if (ep == NULL)
return NULL;
hwreq = kzalloc(sizeof(struct ci_hw_req), gfp_flags);
if (hwreq != NULL) {
INIT_LIST_HEAD(&hwreq->queue);
INIT_LIST_HEAD(&hwreq->tds);
}
return (hwreq == NULL) ? NULL : &hwreq->req;
}
/*
* ep_free_request: frees a request object
*
* Check usb_ep_free_request() at "usb_gadget.h" for details
*/
static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
{
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
struct td_node *node, *tmpnode;
unsigned long flags;
if (ep == NULL || req == NULL) {
return;
} else if (!list_empty(&hwreq->queue)) {
dev_err(hwep->ci->dev, "freeing queued request\n");
return;
}
spin_lock_irqsave(hwep->lock, flags);
list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
dma_pool_free(hwep->td_pool, node->ptr, node->dma);
list_del_init(&node->td);
node->ptr = NULL;
kfree(node);
}
kfree(hwreq);
spin_unlock_irqrestore(hwep->lock, flags);
}
/*
* ep_queue: queues (submits) an I/O request to an endpoint
*
* Check usb_ep_queue()* at usb_gadget.h" for details
*/
static int ep_queue(struct usb_ep *ep, struct usb_request *req,
gfp_t __maybe_unused gfp_flags)
{
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
int retval = 0;
unsigned long flags;
if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
return -EINVAL;
spin_lock_irqsave(hwep->lock, flags);
if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
spin_unlock_irqrestore(hwep->lock, flags);
return 0;
}
retval = _ep_queue(ep, req, gfp_flags);
spin_unlock_irqrestore(hwep->lock, flags);
return retval;
}
/*
* ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
*
* Check usb_ep_dequeue() at "usb_gadget.h" for details
*/
static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
{
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
unsigned long flags;
struct td_node *node, *tmpnode;
if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY ||
hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
list_empty(&hwep->qh.queue))
return -EINVAL;
spin_lock_irqsave(hwep->lock, flags);
if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN)
hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
dma_pool_free(hwep->td_pool, node->ptr, node->dma);
list_del(&node->td);
kfree(node);
}
/* pop request */
list_del_init(&hwreq->queue);
usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir);
req->status = -ECONNRESET;
if (hwreq->req.complete != NULL) {
spin_unlock(hwep->lock);
usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
spin_lock(hwep->lock);
}
spin_unlock_irqrestore(hwep->lock, flags);
return 0;
}
/*
* ep_set_halt: sets the endpoint halt feature
*
* Check usb_ep_set_halt() at "usb_gadget.h" for details
*/
static int ep_set_halt(struct usb_ep *ep, int value)
{
return _ep_set_halt(ep, value, true);
}
/*
* ep_set_wedge: sets the halt feature and ignores clear requests
*
* Check usb_ep_set_wedge() at "usb_gadget.h" for details
*/
static int ep_set_wedge(struct usb_ep *ep)
{
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
unsigned long flags;
if (ep == NULL || hwep->ep.desc == NULL)
return -EINVAL;
spin_lock_irqsave(hwep->lock, flags);
hwep->wedge = 1;
spin_unlock_irqrestore(hwep->lock, flags);
return usb_ep_set_halt(ep);
}
/*
* ep_fifo_flush: flushes contents of a fifo
*
* Check usb_ep_fifo_flush() at "usb_gadget.h" for details
*/
static void ep_fifo_flush(struct usb_ep *ep)
{
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
unsigned long flags;
if (ep == NULL) {
dev_err(hwep->ci->dev, "%02X: -EINVAL\n", _usb_addr(hwep));
return;
}
spin_lock_irqsave(hwep->lock, flags);
if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
spin_unlock_irqrestore(hwep->lock, flags);
return;
}
hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
spin_unlock_irqrestore(hwep->lock, flags);
}
/*
* Endpoint-specific part of the API to the USB controller hardware
* Check "usb_gadget.h" for details
*/
static const struct usb_ep_ops usb_ep_ops = {
.enable = ep_enable,
.disable = ep_disable,
.alloc_request = ep_alloc_request,
.free_request = ep_free_request,
.queue = ep_queue,
.dequeue = ep_dequeue,
.set_halt = ep_set_halt,
.set_wedge = ep_set_wedge,
.fifo_flush = ep_fifo_flush,
};
/******************************************************************************
* GADGET block
*****************************************************************************/
static int ci_udc_get_frame(struct usb_gadget *_gadget)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
unsigned long flags;
int ret;
spin_lock_irqsave(&ci->lock, flags);
ret = hw_read(ci, OP_FRINDEX, 0x3fff);
spin_unlock_irqrestore(&ci->lock, flags);
return ret >> 3;
}
/*
* ci_hdrc_gadget_connect: caller makes sure gadget driver is binded
*/
static void ci_hdrc_gadget_connect(struct usb_gadget *_gadget, int is_active)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
if (is_active) {
pm_runtime_get_sync(ci->dev);
hw_device_reset(ci);
spin_lock_irq(&ci->lock);
if (ci->driver) {
hw_device_state(ci, ci->ep0out->qh.dma);
usb_gadget_set_state(_gadget, USB_STATE_POWERED);
spin_unlock_irq(&ci->lock);
usb_udc_vbus_handler(_gadget, true);
} else {
spin_unlock_irq(&ci->lock);
}
} else {
usb_udc_vbus_handler(_gadget, false);
if (ci->driver)
ci->driver->disconnect(&ci->gadget);
hw_device_state(ci, 0);
if (ci->platdata->notify_event)
ci->platdata->notify_event(ci,
CI_HDRC_CONTROLLER_STOPPED_EVENT);
_gadget_stop_activity(&ci->gadget);
pm_runtime_put_sync(ci->dev);
usb_gadget_set_state(_gadget, USB_STATE_NOTATTACHED);
}
}
static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&ci->lock, flags);
ci->vbus_active = is_active;
spin_unlock_irqrestore(&ci->lock, flags);
if (ci->usb_phy)
usb_phy_set_charger_state(ci->usb_phy, is_active ?
USB_CHARGER_PRESENT : USB_CHARGER_ABSENT);
if (ci->platdata->notify_event)
ret = ci->platdata->notify_event(ci,
CI_HDRC_CONTROLLER_VBUS_EVENT);
if (ci->usb_phy) {
if (is_active)
usb_phy_set_event(ci->usb_phy, USB_EVENT_VBUS);
else
usb_phy_set_event(ci->usb_phy, USB_EVENT_NONE);
}
if (ci->driver)
ci_hdrc_gadget_connect(_gadget, is_active);
return ret;
}
static int ci_udc_wakeup(struct usb_gadget *_gadget)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&ci->lock, flags);
if (ci->gadget.speed == USB_SPEED_UNKNOWN) {
spin_unlock_irqrestore(&ci->lock, flags);
return 0;
}
if (!ci->remote_wakeup) {
ret = -EOPNOTSUPP;
goto out;
}
if (!hw_read(ci, OP_PORTSC, PORTSC_SUSP)) {
ret = -EINVAL;
goto out;
}
hw_write(ci, OP_PORTSC, PORTSC_FPR, PORTSC_FPR);
out:
spin_unlock_irqrestore(&ci->lock, flags);
return ret;
}
static int ci_udc_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
if (ci->usb_phy)
return usb_phy_set_power(ci->usb_phy, ma);
return -ENOTSUPP;
}
static int ci_udc_selfpowered(struct usb_gadget *_gadget, int is_on)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
struct ci_hw_ep *hwep = ci->ep0in;
unsigned long flags;
spin_lock_irqsave(hwep->lock, flags);
_gadget->is_selfpowered = (is_on != 0);
spin_unlock_irqrestore(hwep->lock, flags);
return 0;
}
/* Change Data+ pullup status
* this func is used by usb_gadget_connect/disconnect
*/
static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
/*
* Data+ pullup controlled by OTG state machine in OTG fsm mode;
* and don't touch Data+ in host mode for dual role config.
*/
if (ci_otg_is_fsm_mode(ci) || ci->role == CI_ROLE_HOST)
return 0;
pm_runtime_get_sync(ci->dev);
if (is_on)
hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
else
hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
pm_runtime_put_sync(ci->dev);
return 0;
}
static int ci_udc_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver);
static int ci_udc_stop(struct usb_gadget *gadget);
/* Match ISOC IN from the highest endpoint */
static struct usb_ep *ci_udc_match_ep(struct usb_gadget *gadget,
struct usb_endpoint_descriptor *desc,
struct usb_ss_ep_comp_descriptor *comp_desc)
{
struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
struct usb_ep *ep;
if (usb_endpoint_xfer_isoc(desc) && usb_endpoint_dir_in(desc)) {
list_for_each_entry_reverse(ep, &ci->gadget.ep_list, ep_list) {
if (ep->caps.dir_in && !ep->claimed)
return ep;
}
}
return NULL;
}
/*
* Device operations part of the API to the USB controller hardware,
* which don't involve endpoints (or i/o)
* Check "usb_gadget.h" for details
*/
static const struct usb_gadget_ops usb_gadget_ops = {
.get_frame = ci_udc_get_frame,
.vbus_session = ci_udc_vbus_session,
.wakeup = ci_udc_wakeup,
.set_selfpowered = ci_udc_selfpowered,
.pullup = ci_udc_pullup,
.vbus_draw = ci_udc_vbus_draw,
.udc_start = ci_udc_start,
.udc_stop = ci_udc_stop,
.match_ep = ci_udc_match_ep,
};
static int init_eps(struct ci_hdrc *ci)
{
int retval = 0, i, j;
for (i = 0; i < ci->hw_ep_max/2; i++)
for (j = RX; j <= TX; j++) {
int k = i + j * ci->hw_ep_max/2;
struct ci_hw_ep *hwep = &ci->ci_hw_ep[k];
scnprintf(hwep->name, sizeof(hwep->name), "ep%i%s", i,
(j == TX) ? "in" : "out");
hwep->ci = ci;
hwep->lock = &ci->lock;
hwep->td_pool = ci->td_pool;
hwep->ep.name = hwep->name;
hwep->ep.ops = &usb_ep_ops;
if (i == 0) {
hwep->ep.caps.type_control = true;
} else {
hwep->ep.caps.type_iso = true;
hwep->ep.caps.type_bulk = true;
hwep->ep.caps.type_int = true;
}
if (j == TX)
hwep->ep.caps.dir_in = true;
else
hwep->ep.caps.dir_out = true;
/*
* for ep0: maxP defined in desc, for other
* eps, maxP is set by epautoconfig() called
* by gadget layer
*/
usb_ep_set_maxpacket_limit(&hwep->ep, (unsigned short)~0);
INIT_LIST_HEAD(&hwep->qh.queue);
hwep->qh.ptr = dma_pool_zalloc(ci->qh_pool, GFP_KERNEL,
&hwep->qh.dma);
if (hwep->qh.ptr == NULL)
retval = -ENOMEM;
/*
* set up shorthands for ep0 out and in endpoints,
* don't add to gadget's ep_list
*/
if (i == 0) {
if (j == RX)
ci->ep0out = hwep;
else
ci->ep0in = hwep;
usb_ep_set_maxpacket_limit(&hwep->ep, CTRL_PAYLOAD_MAX);
continue;
}
list_add_tail(&hwep->ep.ep_list, &ci->gadget.ep_list);
}
return retval;
}
static void destroy_eps(struct ci_hdrc *ci)
{
int i;
for (i = 0; i < ci->hw_ep_max; i++) {
struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
if (hwep->pending_td)
free_pending_td(hwep);
dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);
}
}
/**
* ci_udc_start: register a gadget driver
* @gadget: our gadget
* @driver: the driver being registered
*
* Interrupts are enabled here.
*/
static int ci_udc_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
int retval;
if (driver->disconnect == NULL)
return -EINVAL;
ci->ep0out->ep.desc = &ctrl_endpt_out_desc;
retval = usb_ep_enable(&ci->ep0out->ep);
if (retval)
return retval;
ci->ep0in->ep.desc = &ctrl_endpt_in_desc;
retval = usb_ep_enable(&ci->ep0in->ep);
if (retval)
return retval;
ci->driver = driver;
/* Start otg fsm for B-device */
if (ci_otg_is_fsm_mode(ci) && ci->fsm.id) {
ci_hdrc_otg_fsm_start(ci);
return retval;
}
if (ci->vbus_active)
ci_hdrc_gadget_connect(gadget, 1);
else
usb_udc_vbus_handler(&ci->gadget, false);
return retval;
}
static void ci_udc_stop_for_otg_fsm(struct ci_hdrc *ci)
{
if (!ci_otg_is_fsm_mode(ci))
return;
mutex_lock(&ci->fsm.lock);
if (ci->fsm.otg->state == OTG_STATE_A_PERIPHERAL) {
ci->fsm.a_bidl_adis_tmout = 1;
ci_hdrc_otg_fsm_start(ci);
} else if (ci->fsm.otg->state == OTG_STATE_B_PERIPHERAL) {
ci->fsm.protocol = PROTO_UNDEF;
ci->fsm.otg->state = OTG_STATE_UNDEFINED;
}
mutex_unlock(&ci->fsm.lock);
}
/*
* ci_udc_stop: unregister a gadget driver
*/
static int ci_udc_stop(struct usb_gadget *gadget)
{
struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
unsigned long flags;
spin_lock_irqsave(&ci->lock, flags);
ci->driver = NULL;
if (ci->vbus_active) {
hw_device_state(ci, 0);
spin_unlock_irqrestore(&ci->lock, flags);
if (ci->platdata->notify_event)
ci->platdata->notify_event(ci,
CI_HDRC_CONTROLLER_STOPPED_EVENT);
_gadget_stop_activity(&ci->gadget);
spin_lock_irqsave(&ci->lock, flags);
pm_runtime_put(ci->dev);
}
spin_unlock_irqrestore(&ci->lock, flags);
ci_udc_stop_for_otg_fsm(ci);
return 0;
}
/******************************************************************************
* BUS block
*****************************************************************************/
/*
* udc_irq: ci interrupt handler
*
* This function returns IRQ_HANDLED if the IRQ has been handled
* It locks access to registers
*/
static irqreturn_t udc_irq(struct ci_hdrc *ci)
{
irqreturn_t retval;
u32 intr;
if (ci == NULL)
return IRQ_HANDLED;
spin_lock(&ci->lock);
if (ci->platdata->flags & CI_HDRC_REGS_SHARED) {
if (hw_read(ci, OP_USBMODE, USBMODE_CM) !=
USBMODE_CM_DC) {
spin_unlock(&ci->lock);
return IRQ_NONE;
}
}
intr = hw_test_and_clear_intr_active(ci);
if (intr) {
/* order defines priority - do NOT change it */
if (USBi_URI & intr)
isr_reset_handler(ci);
if (USBi_PCI & intr) {
ci->gadget.speed = hw_port_is_high_speed(ci) ?
USB_SPEED_HIGH : USB_SPEED_FULL;
if (ci->usb_phy)
usb_phy_set_event(ci->usb_phy,
USB_EVENT_ENUMERATED);
if (ci->suspended) {
if (ci->driver->resume) {
spin_unlock(&ci->lock);
ci->driver->resume(&ci->gadget);
spin_lock(&ci->lock);
}
ci->suspended = 0;
usb_gadget_set_state(&ci->gadget,
ci->resume_state);
}
}
if (USBi_UI & intr)
isr_tr_complete_handler(ci);
if ((USBi_SLI & intr) && !(ci->suspended)) {
ci->suspended = 1;
ci->resume_state = ci->gadget.state;
if (ci->gadget.speed != USB_SPEED_UNKNOWN &&
ci->driver->suspend) {
spin_unlock(&ci->lock);
ci->driver->suspend(&ci->gadget);
spin_lock(&ci->lock);
}
usb_gadget_set_state(&ci->gadget,
USB_STATE_SUSPENDED);
}
retval = IRQ_HANDLED;
} else {
retval = IRQ_NONE;
}
spin_unlock(&ci->lock);
return retval;
}
/**
* udc_start: initialize gadget role
* @ci: chipidea controller
*/
static int udc_start(struct ci_hdrc *ci)
{
struct device *dev = ci->dev;
struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps;
int retval = 0;
ci->gadget.ops = &usb_gadget_ops;
ci->gadget.speed = USB_SPEED_UNKNOWN;
ci->gadget.max_speed = USB_SPEED_HIGH;
ci->gadget.name = ci->platdata->name;
ci->gadget.otg_caps = otg_caps;
ci->gadget.sg_supported = 1;
ci->gadget.irq = ci->irq;
if (ci->platdata->flags & CI_HDRC_REQUIRES_ALIGNED_DMA)
ci->gadget.quirk_avoids_skb_reserve = 1;
if (ci->is_otg && (otg_caps->hnp_support || otg_caps->srp_support ||
otg_caps->adp_support))
ci->gadget.is_otg = 1;
INIT_LIST_HEAD(&ci->gadget.ep_list);
/* alloc resources */
ci->qh_pool = dma_pool_create("ci_hw_qh", dev->parent,
sizeof(struct ci_hw_qh),
64, CI_HDRC_PAGE_SIZE);
if (ci->qh_pool == NULL)
return -ENOMEM;
ci->td_pool = dma_pool_create("ci_hw_td", dev->parent,
sizeof(struct ci_hw_td),
64, CI_HDRC_PAGE_SIZE);
if (ci->td_pool == NULL) {
retval = -ENOMEM;
goto free_qh_pool;
}
retval = init_eps(ci);
if (retval)
goto free_pools;
ci->gadget.ep0 = &ci->ep0in->ep;
retval = usb_add_gadget_udc(dev, &ci->gadget);
if (retval)
goto destroy_eps;
return retval;
destroy_eps:
destroy_eps(ci);
free_pools:
dma_pool_destroy(ci->td_pool);
free_qh_pool:
dma_pool_destroy(ci->qh_pool);
return retval;
}
/*
* ci_hdrc_gadget_destroy: parent remove must call this to remove UDC
*
* No interrupts active, the IRQ has been released
*/
void ci_hdrc_gadget_destroy(struct ci_hdrc *ci)
{
if (!ci->roles[CI_ROLE_GADGET])
return;
usb_del_gadget_udc(&ci->gadget);
destroy_eps(ci);
dma_pool_destroy(ci->td_pool);
dma_pool_destroy(ci->qh_pool);
}
static int udc_id_switch_for_device(struct ci_hdrc *ci)
{
if (ci->platdata->pins_device)
pinctrl_select_state(ci->platdata->pctl,
ci->platdata->pins_device);
if (ci->is_otg)
/* Clear and enable BSV irq */
hw_write_otgsc(ci, OTGSC_BSVIS | OTGSC_BSVIE,
OTGSC_BSVIS | OTGSC_BSVIE);
return 0;
}
static void udc_id_switch_for_host(struct ci_hdrc *ci)
{
/*
* host doesn't care B_SESSION_VALID event
* so clear and disable BSV irq
*/
if (ci->is_otg)
hw_write_otgsc(ci, OTGSC_BSVIE | OTGSC_BSVIS, OTGSC_BSVIS);
ci->vbus_active = 0;
if (ci->platdata->pins_device && ci->platdata->pins_default)
pinctrl_select_state(ci->platdata->pctl,
ci->platdata->pins_default);
}
#ifdef CONFIG_PM_SLEEP
static void udc_suspend(struct ci_hdrc *ci)
{
/*
* Set OP_ENDPTLISTADDR to be non-zero for
* checking if controller resume from power lost
* in non-host mode.
*/
if (hw_read(ci, OP_ENDPTLISTADDR, ~0) == 0)
hw_write(ci, OP_ENDPTLISTADDR, ~0, ~0);
}
static void udc_resume(struct ci_hdrc *ci, bool power_lost)
{
if (power_lost) {
if (ci->is_otg)
hw_write_otgsc(ci, OTGSC_BSVIS | OTGSC_BSVIE,
OTGSC_BSVIS | OTGSC_BSVIE);
if (ci->vbus_active)
usb_gadget_vbus_disconnect(&ci->gadget);
}
/* Restore value 0 if it was set for power lost check */
if (hw_read(ci, OP_ENDPTLISTADDR, ~0) == 0xFFFFFFFF)
hw_write(ci, OP_ENDPTLISTADDR, ~0, 0);
}
#endif
/**
* ci_hdrc_gadget_init - initialize device related bits
* @ci: the controller
*
* This function initializes the gadget, if the device is "device capable".
*/
int ci_hdrc_gadget_init(struct ci_hdrc *ci)
{
struct ci_role_driver *rdrv;
int ret;
if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
return -ENXIO;
rdrv = devm_kzalloc(ci->dev, sizeof(*rdrv), GFP_KERNEL);
if (!rdrv)
return -ENOMEM;
rdrv->start = udc_id_switch_for_device;
rdrv->stop = udc_id_switch_for_host;
#ifdef CONFIG_PM_SLEEP
rdrv->suspend = udc_suspend;
rdrv->resume = udc_resume;
#endif
rdrv->irq = udc_irq;
rdrv->name = "gadget";
ret = udc_start(ci);
if (!ret)
ci->roles[CI_ROLE_GADGET] = rdrv;
return ret;
}
| linux-master | drivers/usb/chipidea/udc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2012 Freescale Semiconductor, Inc.
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/usb/otg.h>
#include "ci_hdrc_imx.h"
#define MX25_USB_PHY_CTRL_OFFSET 0x08
#define MX25_BM_EXTERNAL_VBUS_DIVIDER BIT(23)
#define MX25_EHCI_INTERFACE_SINGLE_UNI (2 << 0)
#define MX25_EHCI_INTERFACE_DIFF_UNI (0 << 0)
#define MX25_EHCI_INTERFACE_MASK (0xf)
#define MX25_OTG_SIC_SHIFT 29
#define MX25_OTG_SIC_MASK (0x3 << MX25_OTG_SIC_SHIFT)
#define MX25_OTG_PM_BIT BIT(24)
#define MX25_OTG_PP_BIT BIT(11)
#define MX25_OTG_OCPOL_BIT BIT(3)
#define MX25_H1_SIC_SHIFT 21
#define MX25_H1_SIC_MASK (0x3 << MX25_H1_SIC_SHIFT)
#define MX25_H1_PP_BIT BIT(18)
#define MX25_H1_PM_BIT BIT(16)
#define MX25_H1_IPPUE_UP_BIT BIT(7)
#define MX25_H1_IPPUE_DOWN_BIT BIT(6)
#define MX25_H1_TLL_BIT BIT(5)
#define MX25_H1_USBTE_BIT BIT(4)
#define MX25_H1_OCPOL_BIT BIT(2)
#define MX27_H1_PM_BIT BIT(8)
#define MX27_H2_PM_BIT BIT(16)
#define MX27_OTG_PM_BIT BIT(24)
#define MX53_USB_OTG_PHY_CTRL_0_OFFSET 0x08
#define MX53_USB_OTG_PHY_CTRL_1_OFFSET 0x0c
#define MX53_USB_CTRL_1_OFFSET 0x10
#define MX53_USB_CTRL_1_H2_XCVR_CLK_SEL_MASK (0x11 << 2)
#define MX53_USB_CTRL_1_H2_XCVR_CLK_SEL_ULPI BIT(2)
#define MX53_USB_CTRL_1_H3_XCVR_CLK_SEL_MASK (0x11 << 6)
#define MX53_USB_CTRL_1_H3_XCVR_CLK_SEL_ULPI BIT(6)
#define MX53_USB_UH2_CTRL_OFFSET 0x14
#define MX53_USB_UH3_CTRL_OFFSET 0x18
#define MX53_USB_CLKONOFF_CTRL_OFFSET 0x24
#define MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF BIT(21)
#define MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF BIT(22)
#define MX53_BM_OVER_CUR_DIS_H1 BIT(5)
#define MX53_BM_OVER_CUR_DIS_OTG BIT(8)
#define MX53_BM_OVER_CUR_DIS_UHx BIT(30)
#define MX53_USB_CTRL_1_UH2_ULPI_EN BIT(26)
#define MX53_USB_CTRL_1_UH3_ULPI_EN BIT(27)
#define MX53_USB_UHx_CTRL_WAKE_UP_EN BIT(7)
#define MX53_USB_UHx_CTRL_ULPI_INT_EN BIT(8)
#define MX53_USB_PHYCTRL1_PLLDIV_MASK 0x3
#define MX53_USB_PLL_DIV_24_MHZ 0x01
#define MX6_BM_NON_BURST_SETTING BIT(1)
#define MX6_BM_OVER_CUR_DIS BIT(7)
#define MX6_BM_OVER_CUR_POLARITY BIT(8)
#define MX6_BM_PWR_POLARITY BIT(9)
#define MX6_BM_WAKEUP_ENABLE BIT(10)
#define MX6_BM_UTMI_ON_CLOCK BIT(13)
#define MX6_BM_ID_WAKEUP BIT(16)
#define MX6_BM_VBUS_WAKEUP BIT(17)
#define MX6SX_BM_DPDM_WAKEUP_EN BIT(29)
#define MX6_BM_WAKEUP_INTR BIT(31)
#define MX6_USB_HSIC_CTRL_OFFSET 0x10
/* Send resume signal without 480Mhz PHY clock */
#define MX6SX_BM_HSIC_AUTO_RESUME BIT(23)
/* set before portsc.suspendM = 1 */
#define MX6_BM_HSIC_DEV_CONN BIT(21)
/* HSIC enable */
#define MX6_BM_HSIC_EN BIT(12)
/* Force HSIC module 480M clock on, even when in Host is in suspend mode */
#define MX6_BM_HSIC_CLK_ON BIT(11)
#define MX6_USB_OTG1_PHY_CTRL 0x18
/* For imx6dql, it is host-only controller, for later imx6, it is otg's */
#define MX6_USB_OTG2_PHY_CTRL 0x1c
#define MX6SX_USB_VBUS_WAKEUP_SOURCE(v) (v << 8)
#define MX6SX_USB_VBUS_WAKEUP_SOURCE_VBUS MX6SX_USB_VBUS_WAKEUP_SOURCE(0)
#define MX6SX_USB_VBUS_WAKEUP_SOURCE_AVALID MX6SX_USB_VBUS_WAKEUP_SOURCE(1)
#define MX6SX_USB_VBUS_WAKEUP_SOURCE_BVALID MX6SX_USB_VBUS_WAKEUP_SOURCE(2)
#define MX6SX_USB_VBUS_WAKEUP_SOURCE_SESS_END MX6SX_USB_VBUS_WAKEUP_SOURCE(3)
#define VF610_OVER_CUR_DIS BIT(7)
#define MX7D_USBNC_USB_CTRL2 0x4
#define MX7D_USB_VBUS_WAKEUP_SOURCE_MASK 0x3
#define MX7D_USB_VBUS_WAKEUP_SOURCE(v) (v << 0)
#define MX7D_USB_VBUS_WAKEUP_SOURCE_VBUS MX7D_USB_VBUS_WAKEUP_SOURCE(0)
#define MX7D_USB_VBUS_WAKEUP_SOURCE_AVALID MX7D_USB_VBUS_WAKEUP_SOURCE(1)
#define MX7D_USB_VBUS_WAKEUP_SOURCE_BVALID MX7D_USB_VBUS_WAKEUP_SOURCE(2)
#define MX7D_USB_VBUS_WAKEUP_SOURCE_SESS_END MX7D_USB_VBUS_WAKEUP_SOURCE(3)
#define MX7D_USBNC_AUTO_RESUME BIT(2)
/* The default DM/DP value is pull-down */
#define MX7D_USBNC_USB_CTRL2_OPMODE(v) (v << 6)
#define MX7D_USBNC_USB_CTRL2_OPMODE_NON_DRIVING MX7D_USBNC_USB_CTRL2_OPMODE(1)
#define MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_MASK (BIT(7) | BIT(6))
#define MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_EN BIT(8)
#define MX7D_USBNC_USB_CTRL2_DP_OVERRIDE_VAL BIT(12)
#define MX7D_USBNC_USB_CTRL2_DP_OVERRIDE_EN BIT(13)
#define MX7D_USBNC_USB_CTRL2_DM_OVERRIDE_VAL BIT(14)
#define MX7D_USBNC_USB_CTRL2_DM_OVERRIDE_EN BIT(15)
#define MX7D_USBNC_USB_CTRL2_DP_DM_MASK (BIT(12) | BIT(13) | \
BIT(14) | BIT(15))
#define MX7D_USB_OTG_PHY_CFG2_CHRG_CHRGSEL BIT(0)
#define MX7D_USB_OTG_PHY_CFG2_CHRG_VDATDETENB0 BIT(1)
#define MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0 BIT(2)
#define MX7D_USB_OTG_PHY_CFG2_CHRG_DCDENB BIT(3)
#define MX7D_USB_OTG_PHY_CFG2_DRVVBUS0 BIT(16)
#define MX7D_USB_OTG_PHY_CFG2 0x34
#define MX7D_USB_OTG_PHY_STATUS 0x3c
#define MX7D_USB_OTG_PHY_STATUS_LINE_STATE0 BIT(0)
#define MX7D_USB_OTG_PHY_STATUS_LINE_STATE1 BIT(1)
#define MX7D_USB_OTG_PHY_STATUS_VBUS_VLD BIT(3)
#define MX7D_USB_OTG_PHY_STATUS_CHRGDET BIT(29)
#define MX7D_USB_OTG_PHY_CFG1 0x30
#define TXPREEMPAMPTUNE0_BIT 28
#define TXPREEMPAMPTUNE0_MASK (3 << 28)
#define TXRISETUNE0_BIT 24
#define TXRISETUNE0_MASK (3 << 24)
#define TXVREFTUNE0_BIT 20
#define TXVREFTUNE0_MASK (0xf << 20)
#define MX6_USB_OTG_WAKEUP_BITS (MX6_BM_WAKEUP_ENABLE | MX6_BM_VBUS_WAKEUP | \
MX6_BM_ID_WAKEUP | MX6SX_BM_DPDM_WAKEUP_EN)
struct usbmisc_ops {
/* It's called once when probe a usb device */
int (*init)(struct imx_usbmisc_data *data);
/* It's called once after adding a usb device */
int (*post)(struct imx_usbmisc_data *data);
/* It's called when we need to enable/disable usb wakeup */
int (*set_wakeup)(struct imx_usbmisc_data *data, bool enabled);
/* It's called before setting portsc.suspendM */
int (*hsic_set_connect)(struct imx_usbmisc_data *data);
/* It's called during suspend/resume */
int (*hsic_set_clk)(struct imx_usbmisc_data *data, bool enabled);
/* usb charger detection */
int (*charger_detection)(struct imx_usbmisc_data *data);
/* It's called when system resume from usb power lost */
int (*power_lost_check)(struct imx_usbmisc_data *data);
void (*vbus_comparator_on)(struct imx_usbmisc_data *data, bool on);
};
struct imx_usbmisc {
void __iomem *base;
spinlock_t lock;
const struct usbmisc_ops *ops;
};
static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data);
static int usbmisc_imx25_init(struct imx_usbmisc_data *data)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
unsigned long flags;
u32 val = 0;
if (data->index > 1)
return -EINVAL;
spin_lock_irqsave(&usbmisc->lock, flags);
switch (data->index) {
case 0:
val = readl(usbmisc->base);
val &= ~(MX25_OTG_SIC_MASK | MX25_OTG_PP_BIT);
val |= (MX25_EHCI_INTERFACE_DIFF_UNI & MX25_EHCI_INTERFACE_MASK) << MX25_OTG_SIC_SHIFT;
val |= (MX25_OTG_PM_BIT | MX25_OTG_OCPOL_BIT);
/*
* If the polarity is not configured assume active high for
* historical reasons.
*/
if (data->oc_pol_configured && data->oc_pol_active_low)
val &= ~MX25_OTG_OCPOL_BIT;
writel(val, usbmisc->base);
break;
case 1:
val = readl(usbmisc->base);
val &= ~(MX25_H1_SIC_MASK | MX25_H1_PP_BIT | MX25_H1_IPPUE_UP_BIT);
val |= (MX25_EHCI_INTERFACE_SINGLE_UNI & MX25_EHCI_INTERFACE_MASK) << MX25_H1_SIC_SHIFT;
val |= (MX25_H1_PM_BIT | MX25_H1_OCPOL_BIT | MX25_H1_TLL_BIT |
MX25_H1_USBTE_BIT | MX25_H1_IPPUE_DOWN_BIT);
/*
* If the polarity is not configured assume active high for
* historical reasons.
*/
if (data->oc_pol_configured && data->oc_pol_active_low)
val &= ~MX25_H1_OCPOL_BIT;
writel(val, usbmisc->base);
break;
}
spin_unlock_irqrestore(&usbmisc->lock, flags);
return 0;
}
static int usbmisc_imx25_post(struct imx_usbmisc_data *data)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
void __iomem *reg;
unsigned long flags;
u32 val;
if (data->index > 2)
return -EINVAL;
if (data->index)
return 0;
spin_lock_irqsave(&usbmisc->lock, flags);
reg = usbmisc->base + MX25_USB_PHY_CTRL_OFFSET;
val = readl(reg);
if (data->evdo)
val |= MX25_BM_EXTERNAL_VBUS_DIVIDER;
else
val &= ~MX25_BM_EXTERNAL_VBUS_DIVIDER;
writel(val, reg);
spin_unlock_irqrestore(&usbmisc->lock, flags);
usleep_range(5000, 10000); /* needed to stabilize voltage */
return 0;
}
static int usbmisc_imx27_init(struct imx_usbmisc_data *data)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
unsigned long flags;
u32 val;
switch (data->index) {
case 0:
val = MX27_OTG_PM_BIT;
break;
case 1:
val = MX27_H1_PM_BIT;
break;
case 2:
val = MX27_H2_PM_BIT;
break;
default:
return -EINVAL;
}
spin_lock_irqsave(&usbmisc->lock, flags);
if (data->disable_oc)
val = readl(usbmisc->base) | val;
else
val = readl(usbmisc->base) & ~val;
writel(val, usbmisc->base);
spin_unlock_irqrestore(&usbmisc->lock, flags);
return 0;
}
static int usbmisc_imx53_init(struct imx_usbmisc_data *data)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
void __iomem *reg = NULL;
unsigned long flags;
u32 val = 0;
if (data->index > 3)
return -EINVAL;
/* Select a 24 MHz reference clock for the PHY */
val = readl(usbmisc->base + MX53_USB_OTG_PHY_CTRL_1_OFFSET);
val &= ~MX53_USB_PHYCTRL1_PLLDIV_MASK;
val |= MX53_USB_PLL_DIV_24_MHZ;
writel(val, usbmisc->base + MX53_USB_OTG_PHY_CTRL_1_OFFSET);
spin_lock_irqsave(&usbmisc->lock, flags);
switch (data->index) {
case 0:
if (data->disable_oc) {
reg = usbmisc->base + MX53_USB_OTG_PHY_CTRL_0_OFFSET;
val = readl(reg) | MX53_BM_OVER_CUR_DIS_OTG;
writel(val, reg);
}
break;
case 1:
if (data->disable_oc) {
reg = usbmisc->base + MX53_USB_OTG_PHY_CTRL_0_OFFSET;
val = readl(reg) | MX53_BM_OVER_CUR_DIS_H1;
writel(val, reg);
}
break;
case 2:
if (data->ulpi) {
/* set USBH2 into ULPI-mode. */
reg = usbmisc->base + MX53_USB_CTRL_1_OFFSET;
val = readl(reg) | MX53_USB_CTRL_1_UH2_ULPI_EN;
/* select ULPI clock */
val &= ~MX53_USB_CTRL_1_H2_XCVR_CLK_SEL_MASK;
val |= MX53_USB_CTRL_1_H2_XCVR_CLK_SEL_ULPI;
writel(val, reg);
/* Set interrupt wake up enable */
reg = usbmisc->base + MX53_USB_UH2_CTRL_OFFSET;
val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN
| MX53_USB_UHx_CTRL_ULPI_INT_EN;
writel(val, reg);
if (is_imx53_usbmisc(data)) {
/* Disable internal 60Mhz clock */
reg = usbmisc->base +
MX53_USB_CLKONOFF_CTRL_OFFSET;
val = readl(reg) |
MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF;
writel(val, reg);
}
}
if (data->disable_oc) {
reg = usbmisc->base + MX53_USB_UH2_CTRL_OFFSET;
val = readl(reg) | MX53_BM_OVER_CUR_DIS_UHx;
writel(val, reg);
}
break;
case 3:
if (data->ulpi) {
/* set USBH3 into ULPI-mode. */
reg = usbmisc->base + MX53_USB_CTRL_1_OFFSET;
val = readl(reg) | MX53_USB_CTRL_1_UH3_ULPI_EN;
/* select ULPI clock */
val &= ~MX53_USB_CTRL_1_H3_XCVR_CLK_SEL_MASK;
val |= MX53_USB_CTRL_1_H3_XCVR_CLK_SEL_ULPI;
writel(val, reg);
/* Set interrupt wake up enable */
reg = usbmisc->base + MX53_USB_UH3_CTRL_OFFSET;
val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN
| MX53_USB_UHx_CTRL_ULPI_INT_EN;
writel(val, reg);
if (is_imx53_usbmisc(data)) {
/* Disable internal 60Mhz clock */
reg = usbmisc->base +
MX53_USB_CLKONOFF_CTRL_OFFSET;
val = readl(reg) |
MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF;
writel(val, reg);
}
}
if (data->disable_oc) {
reg = usbmisc->base + MX53_USB_UH3_CTRL_OFFSET;
val = readl(reg) | MX53_BM_OVER_CUR_DIS_UHx;
writel(val, reg);
}
break;
}
spin_unlock_irqrestore(&usbmisc->lock, flags);
return 0;
}
static u32 usbmisc_wakeup_setting(struct imx_usbmisc_data *data)
{
u32 wakeup_setting = MX6_USB_OTG_WAKEUP_BITS;
if (data->ext_id || data->available_role != USB_DR_MODE_OTG)
wakeup_setting &= ~MX6_BM_ID_WAKEUP;
if (data->ext_vbus || data->available_role == USB_DR_MODE_HOST)
wakeup_setting &= ~MX6_BM_VBUS_WAKEUP;
return wakeup_setting;
}
static int usbmisc_imx6q_set_wakeup
(struct imx_usbmisc_data *data, bool enabled)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
unsigned long flags;
u32 val;
int ret = 0;
if (data->index > 3)
return -EINVAL;
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(usbmisc->base + data->index * 4);
if (enabled) {
val &= ~MX6_USB_OTG_WAKEUP_BITS;
val |= usbmisc_wakeup_setting(data);
} else {
if (val & MX6_BM_WAKEUP_INTR)
pr_debug("wakeup int at ci_hdrc.%d\n", data->index);
val &= ~MX6_USB_OTG_WAKEUP_BITS;
}
writel(val, usbmisc->base + data->index * 4);
spin_unlock_irqrestore(&usbmisc->lock, flags);
return ret;
}
static int usbmisc_imx6q_init(struct imx_usbmisc_data *data)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
unsigned long flags;
u32 reg;
if (data->index > 3)
return -EINVAL;
spin_lock_irqsave(&usbmisc->lock, flags);
reg = readl(usbmisc->base + data->index * 4);
if (data->disable_oc) {
reg |= MX6_BM_OVER_CUR_DIS;
} else {
reg &= ~MX6_BM_OVER_CUR_DIS;
/*
* If the polarity is not configured keep it as setup by the
* bootloader.
*/
if (data->oc_pol_configured && data->oc_pol_active_low)
reg |= MX6_BM_OVER_CUR_POLARITY;
else if (data->oc_pol_configured)
reg &= ~MX6_BM_OVER_CUR_POLARITY;
}
/* If the polarity is not set keep it as setup by the bootlader */
if (data->pwr_pol == 1)
reg |= MX6_BM_PWR_POLARITY;
writel(reg, usbmisc->base + data->index * 4);
/* SoC non-burst setting */
reg = readl(usbmisc->base + data->index * 4);
writel(reg | MX6_BM_NON_BURST_SETTING,
usbmisc->base + data->index * 4);
/* For HSIC controller */
if (data->hsic) {
reg = readl(usbmisc->base + data->index * 4);
writel(reg | MX6_BM_UTMI_ON_CLOCK,
usbmisc->base + data->index * 4);
reg = readl(usbmisc->base + MX6_USB_HSIC_CTRL_OFFSET
+ (data->index - 2) * 4);
reg |= MX6_BM_HSIC_EN | MX6_BM_HSIC_CLK_ON;
writel(reg, usbmisc->base + MX6_USB_HSIC_CTRL_OFFSET
+ (data->index - 2) * 4);
}
spin_unlock_irqrestore(&usbmisc->lock, flags);
usbmisc_imx6q_set_wakeup(data, false);
return 0;
}
static int usbmisc_imx6_hsic_get_reg_offset(struct imx_usbmisc_data *data)
{
int offset, ret = 0;
if (data->index == 2 || data->index == 3) {
offset = (data->index - 2) * 4;
} else if (data->index == 0) {
/*
* For SoCs like i.MX7D and later, each USB controller has
* its own non-core register region. For SoCs before i.MX7D,
* the first two USB controllers are non-HSIC controllers.
*/
offset = 0;
} else {
dev_err(data->dev, "index is error for usbmisc\n");
ret = -EINVAL;
}
return ret ? ret : offset;
}
static int usbmisc_imx6_hsic_set_connect(struct imx_usbmisc_data *data)
{
unsigned long flags;
u32 val;
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
int offset;
spin_lock_irqsave(&usbmisc->lock, flags);
offset = usbmisc_imx6_hsic_get_reg_offset(data);
if (offset < 0) {
spin_unlock_irqrestore(&usbmisc->lock, flags);
return offset;
}
val = readl(usbmisc->base + MX6_USB_HSIC_CTRL_OFFSET + offset);
if (!(val & MX6_BM_HSIC_DEV_CONN))
writel(val | MX6_BM_HSIC_DEV_CONN,
usbmisc->base + MX6_USB_HSIC_CTRL_OFFSET + offset);
spin_unlock_irqrestore(&usbmisc->lock, flags);
return 0;
}
static int usbmisc_imx6_hsic_set_clk(struct imx_usbmisc_data *data, bool on)
{
unsigned long flags;
u32 val;
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
int offset;
spin_lock_irqsave(&usbmisc->lock, flags);
offset = usbmisc_imx6_hsic_get_reg_offset(data);
if (offset < 0) {
spin_unlock_irqrestore(&usbmisc->lock, flags);
return offset;
}
val = readl(usbmisc->base + MX6_USB_HSIC_CTRL_OFFSET + offset);
val |= MX6_BM_HSIC_EN | MX6_BM_HSIC_CLK_ON;
if (on)
val |= MX6_BM_HSIC_CLK_ON;
else
val &= ~MX6_BM_HSIC_CLK_ON;
writel(val, usbmisc->base + MX6_USB_HSIC_CTRL_OFFSET + offset);
spin_unlock_irqrestore(&usbmisc->lock, flags);
return 0;
}
static int usbmisc_imx6sx_init(struct imx_usbmisc_data *data)
{
void __iomem *reg = NULL;
unsigned long flags;
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
u32 val;
usbmisc_imx6q_init(data);
if (data->index == 0 || data->index == 1) {
reg = usbmisc->base + MX6_USB_OTG1_PHY_CTRL + data->index * 4;
spin_lock_irqsave(&usbmisc->lock, flags);
/* Set vbus wakeup source as bvalid */
val = readl(reg);
writel(val | MX6SX_USB_VBUS_WAKEUP_SOURCE_BVALID, reg);
/*
* Disable dp/dm wakeup in device mode when vbus is
* not there.
*/
val = readl(usbmisc->base + data->index * 4);
writel(val & ~MX6SX_BM_DPDM_WAKEUP_EN,
usbmisc->base + data->index * 4);
spin_unlock_irqrestore(&usbmisc->lock, flags);
}
/* For HSIC controller */
if (data->hsic) {
val = readl(usbmisc->base + MX6_USB_HSIC_CTRL_OFFSET);
val |= MX6SX_BM_HSIC_AUTO_RESUME;
writel(val, usbmisc->base + MX6_USB_HSIC_CTRL_OFFSET);
}
return 0;
}
static int usbmisc_vf610_init(struct imx_usbmisc_data *data)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
u32 reg;
/*
* Vybrid only has one misc register set, but in two different
* areas. These is reflected in two instances of this driver.
*/
if (data->index >= 1)
return -EINVAL;
if (data->disable_oc) {
reg = readl(usbmisc->base);
writel(reg | VF610_OVER_CUR_DIS, usbmisc->base);
}
return 0;
}
static int usbmisc_imx7d_set_wakeup
(struct imx_usbmisc_data *data, bool enabled)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
unsigned long flags;
u32 val;
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(usbmisc->base);
if (enabled) {
val &= ~MX6_USB_OTG_WAKEUP_BITS;
val |= usbmisc_wakeup_setting(data);
writel(val, usbmisc->base);
} else {
if (val & MX6_BM_WAKEUP_INTR)
dev_dbg(data->dev, "wakeup int\n");
writel(val & ~MX6_USB_OTG_WAKEUP_BITS, usbmisc->base);
}
spin_unlock_irqrestore(&usbmisc->lock, flags);
return 0;
}
static int usbmisc_imx7d_init(struct imx_usbmisc_data *data)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
unsigned long flags;
u32 reg;
if (data->index >= 1)
return -EINVAL;
spin_lock_irqsave(&usbmisc->lock, flags);
reg = readl(usbmisc->base);
if (data->disable_oc) {
reg |= MX6_BM_OVER_CUR_DIS;
} else {
reg &= ~MX6_BM_OVER_CUR_DIS;
/*
* If the polarity is not configured keep it as setup by the
* bootloader.
*/
if (data->oc_pol_configured && data->oc_pol_active_low)
reg |= MX6_BM_OVER_CUR_POLARITY;
else if (data->oc_pol_configured)
reg &= ~MX6_BM_OVER_CUR_POLARITY;
}
/* If the polarity is not set keep it as setup by the bootlader */
if (data->pwr_pol == 1)
reg |= MX6_BM_PWR_POLARITY;
writel(reg, usbmisc->base);
/* SoC non-burst setting */
reg = readl(usbmisc->base);
writel(reg | MX6_BM_NON_BURST_SETTING, usbmisc->base);
if (!data->hsic) {
reg = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
reg &= ~MX7D_USB_VBUS_WAKEUP_SOURCE_MASK;
writel(reg | MX7D_USB_VBUS_WAKEUP_SOURCE_BVALID
| MX7D_USBNC_AUTO_RESUME,
usbmisc->base + MX7D_USBNC_USB_CTRL2);
/* PHY tuning for signal quality */
reg = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG1);
if (data->emp_curr_control >= 0 &&
data->emp_curr_control <=
(TXPREEMPAMPTUNE0_MASK >> TXPREEMPAMPTUNE0_BIT)) {
reg &= ~TXPREEMPAMPTUNE0_MASK;
reg |= (data->emp_curr_control << TXPREEMPAMPTUNE0_BIT);
}
if (data->dc_vol_level_adjust >= 0 &&
data->dc_vol_level_adjust <=
(TXVREFTUNE0_MASK >> TXVREFTUNE0_BIT)) {
reg &= ~TXVREFTUNE0_MASK;
reg |= (data->dc_vol_level_adjust << TXVREFTUNE0_BIT);
}
if (data->rise_fall_time_adjust >= 0 &&
data->rise_fall_time_adjust <=
(TXRISETUNE0_MASK >> TXRISETUNE0_BIT)) {
reg &= ~TXRISETUNE0_MASK;
reg |= (data->rise_fall_time_adjust << TXRISETUNE0_BIT);
}
writel(reg, usbmisc->base + MX7D_USB_OTG_PHY_CFG1);
}
spin_unlock_irqrestore(&usbmisc->lock, flags);
usbmisc_imx7d_set_wakeup(data, false);
return 0;
}
static int imx7d_charger_secondary_detection(struct imx_usbmisc_data *data)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
struct usb_phy *usb_phy = data->usb_phy;
int val;
unsigned long flags;
/* Clear VDATSRCENB0 to disable VDP_SRC and IDM_SNK required by BC 1.2 spec */
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
val &= ~MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0;
writel(val, usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
spin_unlock_irqrestore(&usbmisc->lock, flags);
/* TVDMSRC_DIS */
msleep(20);
/* VDM_SRC is connected to D- and IDP_SINK is connected to D+ */
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
writel(val | MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0 |
MX7D_USB_OTG_PHY_CFG2_CHRG_VDATDETENB0 |
MX7D_USB_OTG_PHY_CFG2_CHRG_CHRGSEL,
usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
spin_unlock_irqrestore(&usbmisc->lock, flags);
/* TVDMSRC_ON */
msleep(40);
/*
* Per BC 1.2, check voltage of D+:
* DCP: if greater than VDAT_REF;
* CDP: if less than VDAT_REF.
*/
val = readl(usbmisc->base + MX7D_USB_OTG_PHY_STATUS);
if (val & MX7D_USB_OTG_PHY_STATUS_CHRGDET) {
dev_dbg(data->dev, "It is a dedicate charging port\n");
usb_phy->chg_type = DCP_TYPE;
} else {
dev_dbg(data->dev, "It is a charging downstream port\n");
usb_phy->chg_type = CDP_TYPE;
}
return 0;
}
static void imx7_disable_charger_detector(struct imx_usbmisc_data *data)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
unsigned long flags;
u32 val;
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
val &= ~(MX7D_USB_OTG_PHY_CFG2_CHRG_DCDENB |
MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0 |
MX7D_USB_OTG_PHY_CFG2_CHRG_VDATDETENB0 |
MX7D_USB_OTG_PHY_CFG2_CHRG_CHRGSEL);
writel(val, usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
/* Set OPMODE to be 2'b00 and disable its override */
val = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
val &= ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_MASK;
writel(val, usbmisc->base + MX7D_USBNC_USB_CTRL2);
val = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
writel(val & ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_EN,
usbmisc->base + MX7D_USBNC_USB_CTRL2);
spin_unlock_irqrestore(&usbmisc->lock, flags);
}
static int imx7d_charger_data_contact_detect(struct imx_usbmisc_data *data)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
unsigned long flags;
u32 val;
int i, data_pin_contact_count = 0;
/* Enable Data Contact Detect (DCD) per the USB BC 1.2 */
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
writel(val | MX7D_USB_OTG_PHY_CFG2_CHRG_DCDENB,
usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
spin_unlock_irqrestore(&usbmisc->lock, flags);
for (i = 0; i < 100; i = i + 1) {
val = readl(usbmisc->base + MX7D_USB_OTG_PHY_STATUS);
if (!(val & MX7D_USB_OTG_PHY_STATUS_LINE_STATE0)) {
if (data_pin_contact_count++ > 5)
/* Data pin makes contact */
break;
usleep_range(5000, 10000);
} else {
data_pin_contact_count = 0;
usleep_range(5000, 6000);
}
}
/* Disable DCD after finished data contact check */
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
writel(val & ~MX7D_USB_OTG_PHY_CFG2_CHRG_DCDENB,
usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
spin_unlock_irqrestore(&usbmisc->lock, flags);
if (i == 100) {
dev_err(data->dev,
"VBUS is coming from a dedicated power supply.\n");
return -ENXIO;
}
return 0;
}
static int imx7d_charger_primary_detection(struct imx_usbmisc_data *data)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
struct usb_phy *usb_phy = data->usb_phy;
unsigned long flags;
u32 val;
/* VDP_SRC is connected to D+ and IDM_SINK is connected to D- */
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
val &= ~MX7D_USB_OTG_PHY_CFG2_CHRG_CHRGSEL;
writel(val | MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0 |
MX7D_USB_OTG_PHY_CFG2_CHRG_VDATDETENB0,
usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
spin_unlock_irqrestore(&usbmisc->lock, flags);
/* TVDPSRC_ON */
msleep(40);
/* Check if D- is less than VDAT_REF to determine an SDP per BC 1.2 */
val = readl(usbmisc->base + MX7D_USB_OTG_PHY_STATUS);
if (!(val & MX7D_USB_OTG_PHY_STATUS_CHRGDET)) {
dev_dbg(data->dev, "It is a standard downstream port\n");
usb_phy->chg_type = SDP_TYPE;
}
return 0;
}
/*
* Whole charger detection process:
* 1. OPMODE override to be non-driving
* 2. Data contact check
* 3. Primary detection
* 4. Secondary detection
* 5. Disable charger detection
*/
static int imx7d_charger_detection(struct imx_usbmisc_data *data)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
struct usb_phy *usb_phy = data->usb_phy;
unsigned long flags;
u32 val;
int ret;
/* Check if vbus is valid */
val = readl(usbmisc->base + MX7D_USB_OTG_PHY_STATUS);
if (!(val & MX7D_USB_OTG_PHY_STATUS_VBUS_VLD)) {
dev_err(data->dev, "vbus is error\n");
return -EINVAL;
}
/*
* Keep OPMODE to be non-driving mode during the whole
* charger detection process.
*/
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
val &= ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_MASK;
val |= MX7D_USBNC_USB_CTRL2_OPMODE_NON_DRIVING;
writel(val, usbmisc->base + MX7D_USBNC_USB_CTRL2);
val = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
writel(val | MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_EN,
usbmisc->base + MX7D_USBNC_USB_CTRL2);
spin_unlock_irqrestore(&usbmisc->lock, flags);
ret = imx7d_charger_data_contact_detect(data);
if (ret)
return ret;
ret = imx7d_charger_primary_detection(data);
if (!ret && usb_phy->chg_type != SDP_TYPE)
ret = imx7d_charger_secondary_detection(data);
imx7_disable_charger_detector(data);
return ret;
}
static void usbmisc_imx7d_vbus_comparator_on(struct imx_usbmisc_data *data,
bool on)
{
unsigned long flags;
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
u32 val;
if (data->hsic)
return;
spin_lock_irqsave(&usbmisc->lock, flags);
/*
* Disable VBUS valid comparator when in suspend mode,
* when OTG is disabled and DRVVBUS0 is asserted case
* the Bandgap circuitry and VBUS Valid comparator are
* still powered, even in Suspend or Sleep mode.
*/
val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
if (on)
val |= MX7D_USB_OTG_PHY_CFG2_DRVVBUS0;
else
val &= ~MX7D_USB_OTG_PHY_CFG2_DRVVBUS0;
writel(val, usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
spin_unlock_irqrestore(&usbmisc->lock, flags);
}
static int usbmisc_imx7ulp_init(struct imx_usbmisc_data *data)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
unsigned long flags;
u32 reg;
if (data->index >= 1)
return -EINVAL;
spin_lock_irqsave(&usbmisc->lock, flags);
reg = readl(usbmisc->base);
if (data->disable_oc) {
reg |= MX6_BM_OVER_CUR_DIS;
} else {
reg &= ~MX6_BM_OVER_CUR_DIS;
/*
* If the polarity is not configured keep it as setup by the
* bootloader.
*/
if (data->oc_pol_configured && data->oc_pol_active_low)
reg |= MX6_BM_OVER_CUR_POLARITY;
else if (data->oc_pol_configured)
reg &= ~MX6_BM_OVER_CUR_POLARITY;
}
/* If the polarity is not set keep it as setup by the bootlader */
if (data->pwr_pol == 1)
reg |= MX6_BM_PWR_POLARITY;
writel(reg, usbmisc->base);
/* SoC non-burst setting */
reg = readl(usbmisc->base);
writel(reg | MX6_BM_NON_BURST_SETTING, usbmisc->base);
if (data->hsic) {
reg = readl(usbmisc->base);
writel(reg | MX6_BM_UTMI_ON_CLOCK, usbmisc->base);
reg = readl(usbmisc->base + MX6_USB_HSIC_CTRL_OFFSET);
reg |= MX6_BM_HSIC_EN | MX6_BM_HSIC_CLK_ON;
writel(reg, usbmisc->base + MX6_USB_HSIC_CTRL_OFFSET);
/*
* For non-HSIC controller, the autoresume is enabled
* at MXS PHY driver (usbphy_ctrl bit18).
*/
reg = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
writel(reg | MX7D_USBNC_AUTO_RESUME,
usbmisc->base + MX7D_USBNC_USB_CTRL2);
} else {
reg = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
reg &= ~MX7D_USB_VBUS_WAKEUP_SOURCE_MASK;
writel(reg | MX7D_USB_VBUS_WAKEUP_SOURCE_BVALID,
usbmisc->base + MX7D_USBNC_USB_CTRL2);
}
spin_unlock_irqrestore(&usbmisc->lock, flags);
usbmisc_imx7d_set_wakeup(data, false);
return 0;
}
static int usbmisc_imx7d_power_lost_check(struct imx_usbmisc_data *data)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
unsigned long flags;
u32 val;
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(usbmisc->base);
spin_unlock_irqrestore(&usbmisc->lock, flags);
/*
* Here use a power on reset value to judge
* if the controller experienced a power lost
*/
if (val == 0x30001000)
return 1;
else
return 0;
}
static int usbmisc_imx6sx_power_lost_check(struct imx_usbmisc_data *data)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
unsigned long flags;
u32 val;
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(usbmisc->base + data->index * 4);
spin_unlock_irqrestore(&usbmisc->lock, flags);
/*
* Here use a power on reset value to judge
* if the controller experienced a power lost
*/
if (val == 0x30001000)
return 1;
else
return 0;
}
static const struct usbmisc_ops imx25_usbmisc_ops = {
.init = usbmisc_imx25_init,
.post = usbmisc_imx25_post,
};
static const struct usbmisc_ops imx27_usbmisc_ops = {
.init = usbmisc_imx27_init,
};
static const struct usbmisc_ops imx51_usbmisc_ops = {
.init = usbmisc_imx53_init,
};
static const struct usbmisc_ops imx53_usbmisc_ops = {
.init = usbmisc_imx53_init,
};
static const struct usbmisc_ops imx6q_usbmisc_ops = {
.set_wakeup = usbmisc_imx6q_set_wakeup,
.init = usbmisc_imx6q_init,
.hsic_set_connect = usbmisc_imx6_hsic_set_connect,
.hsic_set_clk = usbmisc_imx6_hsic_set_clk,
};
static const struct usbmisc_ops vf610_usbmisc_ops = {
.init = usbmisc_vf610_init,
};
static const struct usbmisc_ops imx6sx_usbmisc_ops = {
.set_wakeup = usbmisc_imx6q_set_wakeup,
.init = usbmisc_imx6sx_init,
.hsic_set_connect = usbmisc_imx6_hsic_set_connect,
.hsic_set_clk = usbmisc_imx6_hsic_set_clk,
.power_lost_check = usbmisc_imx6sx_power_lost_check,
};
static const struct usbmisc_ops imx7d_usbmisc_ops = {
.init = usbmisc_imx7d_init,
.set_wakeup = usbmisc_imx7d_set_wakeup,
.charger_detection = imx7d_charger_detection,
.power_lost_check = usbmisc_imx7d_power_lost_check,
.vbus_comparator_on = usbmisc_imx7d_vbus_comparator_on,
};
static const struct usbmisc_ops imx7ulp_usbmisc_ops = {
.init = usbmisc_imx7ulp_init,
.set_wakeup = usbmisc_imx7d_set_wakeup,
.hsic_set_connect = usbmisc_imx6_hsic_set_connect,
.hsic_set_clk = usbmisc_imx6_hsic_set_clk,
.power_lost_check = usbmisc_imx7d_power_lost_check,
};
static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
return usbmisc->ops == &imx53_usbmisc_ops;
}
int imx_usbmisc_init(struct imx_usbmisc_data *data)
{
struct imx_usbmisc *usbmisc;
if (!data)
return 0;
usbmisc = dev_get_drvdata(data->dev);
if (!usbmisc->ops->init)
return 0;
return usbmisc->ops->init(data);
}
EXPORT_SYMBOL_GPL(imx_usbmisc_init);
int imx_usbmisc_init_post(struct imx_usbmisc_data *data)
{
struct imx_usbmisc *usbmisc;
int ret = 0;
if (!data)
return 0;
usbmisc = dev_get_drvdata(data->dev);
if (usbmisc->ops->post)
ret = usbmisc->ops->post(data);
if (ret) {
dev_err(data->dev, "post init failed, ret=%d\n", ret);
return ret;
}
if (usbmisc->ops->set_wakeup)
ret = usbmisc->ops->set_wakeup(data, false);
if (ret) {
dev_err(data->dev, "set_wakeup failed, ret=%d\n", ret);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(imx_usbmisc_init_post);
int imx_usbmisc_hsic_set_connect(struct imx_usbmisc_data *data)
{
struct imx_usbmisc *usbmisc;
if (!data)
return 0;
usbmisc = dev_get_drvdata(data->dev);
if (!usbmisc->ops->hsic_set_connect || !data->hsic)
return 0;
return usbmisc->ops->hsic_set_connect(data);
}
EXPORT_SYMBOL_GPL(imx_usbmisc_hsic_set_connect);
int imx_usbmisc_charger_detection(struct imx_usbmisc_data *data, bool connect)
{
struct imx_usbmisc *usbmisc;
struct usb_phy *usb_phy;
int ret = 0;
if (!data)
return -EINVAL;
usbmisc = dev_get_drvdata(data->dev);
usb_phy = data->usb_phy;
if (!usbmisc->ops->charger_detection)
return -ENOTSUPP;
if (connect) {
ret = usbmisc->ops->charger_detection(data);
if (ret) {
dev_err(data->dev,
"Error occurs during detection: %d\n",
ret);
usb_phy->chg_state = USB_CHARGER_ABSENT;
} else {
usb_phy->chg_state = USB_CHARGER_PRESENT;
}
} else {
usb_phy->chg_state = USB_CHARGER_ABSENT;
usb_phy->chg_type = UNKNOWN_TYPE;
}
return ret;
}
EXPORT_SYMBOL_GPL(imx_usbmisc_charger_detection);
int imx_usbmisc_suspend(struct imx_usbmisc_data *data, bool wakeup)
{
struct imx_usbmisc *usbmisc;
int ret = 0;
if (!data)
return 0;
usbmisc = dev_get_drvdata(data->dev);
if (usbmisc->ops->vbus_comparator_on)
usbmisc->ops->vbus_comparator_on(data, false);
if (wakeup && usbmisc->ops->set_wakeup)
ret = usbmisc->ops->set_wakeup(data, true);
if (ret) {
dev_err(data->dev, "set_wakeup failed, ret=%d\n", ret);
return ret;
}
if (usbmisc->ops->hsic_set_clk && data->hsic)
ret = usbmisc->ops->hsic_set_clk(data, false);
if (ret) {
dev_err(data->dev, "set_wakeup failed, ret=%d\n", ret);
return ret;
}
return ret;
}
EXPORT_SYMBOL_GPL(imx_usbmisc_suspend);
int imx_usbmisc_resume(struct imx_usbmisc_data *data, bool wakeup)
{
struct imx_usbmisc *usbmisc;
int ret = 0;
if (!data)
return 0;
usbmisc = dev_get_drvdata(data->dev);
if (usbmisc->ops->power_lost_check)
ret = usbmisc->ops->power_lost_check(data);
if (ret > 0) {
/* re-init if resume from power lost */
ret = imx_usbmisc_init(data);
if (ret) {
dev_err(data->dev, "re-init failed, ret=%d\n", ret);
return ret;
}
}
if (wakeup && usbmisc->ops->set_wakeup)
ret = usbmisc->ops->set_wakeup(data, false);
if (ret) {
dev_err(data->dev, "set_wakeup failed, ret=%d\n", ret);
return ret;
}
if (usbmisc->ops->hsic_set_clk && data->hsic)
ret = usbmisc->ops->hsic_set_clk(data, true);
if (ret) {
dev_err(data->dev, "set_wakeup failed, ret=%d\n", ret);
goto hsic_set_clk_fail;
}
if (usbmisc->ops->vbus_comparator_on)
usbmisc->ops->vbus_comparator_on(data, true);
return 0;
hsic_set_clk_fail:
if (wakeup && usbmisc->ops->set_wakeup)
usbmisc->ops->set_wakeup(data, true);
return ret;
}
EXPORT_SYMBOL_GPL(imx_usbmisc_resume);
static const struct of_device_id usbmisc_imx_dt_ids[] = {
{
.compatible = "fsl,imx25-usbmisc",
.data = &imx25_usbmisc_ops,
},
{
.compatible = "fsl,imx35-usbmisc",
.data = &imx25_usbmisc_ops,
},
{
.compatible = "fsl,imx27-usbmisc",
.data = &imx27_usbmisc_ops,
},
{
.compatible = "fsl,imx51-usbmisc",
.data = &imx51_usbmisc_ops,
},
{
.compatible = "fsl,imx53-usbmisc",
.data = &imx53_usbmisc_ops,
},
{
.compatible = "fsl,imx6q-usbmisc",
.data = &imx6q_usbmisc_ops,
},
{
.compatible = "fsl,vf610-usbmisc",
.data = &vf610_usbmisc_ops,
},
{
.compatible = "fsl,imx6sx-usbmisc",
.data = &imx6sx_usbmisc_ops,
},
{
.compatible = "fsl,imx6ul-usbmisc",
.data = &imx6sx_usbmisc_ops,
},
{
.compatible = "fsl,imx7d-usbmisc",
.data = &imx7d_usbmisc_ops,
},
{
.compatible = "fsl,imx7ulp-usbmisc",
.data = &imx7ulp_usbmisc_ops,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, usbmisc_imx_dt_ids);
static int usbmisc_imx_probe(struct platform_device *pdev)
{
struct imx_usbmisc *data;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
spin_lock_init(&data->lock);
data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
data->ops = of_device_get_match_data(&pdev->dev);
platform_set_drvdata(pdev, data);
return 0;
}
static struct platform_driver usbmisc_imx_driver = {
.probe = usbmisc_imx_probe,
.driver = {
.name = "usbmisc_imx",
.of_match_table = usbmisc_imx_dt_ids,
},
};
module_platform_driver(usbmisc_imx_driver);
MODULE_ALIAS("platform:usbmisc-imx");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("driver for imx usb non-core registers");
MODULE_AUTHOR("Richard Zhao <[email protected]>");
| linux-master | drivers/usb/chipidea/usbmisc_imx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* core.c - ChipIdea USB IP core family device controller
*
* Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
* Copyright (C) 2020 NXP
*
* Author: David Lopo
* Peter Chen <[email protected]>
*
* Main Features:
* - Four transfers are supported, usbtest is passed
* - USB Certification for gadget: CH9 and Mass Storage are passed
* - Low power mode
* - USB wakeup
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/extcon.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/pinctrl/consumer.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
#include <linux/usb/chipidea.h>
#include <linux/usb/of.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/usb/ehci_def.h>
#include "ci.h"
#include "udc.h"
#include "bits.h"
#include "host.h"
#include "otg.h"
#include "otg_fsm.h"
/* Controller register map */
static const u8 ci_regs_nolpm[] = {
[CAP_CAPLENGTH] = 0x00U,
[CAP_HCCPARAMS] = 0x08U,
[CAP_DCCPARAMS] = 0x24U,
[CAP_TESTMODE] = 0x38U,
[OP_USBCMD] = 0x00U,
[OP_USBSTS] = 0x04U,
[OP_USBINTR] = 0x08U,
[OP_FRINDEX] = 0x0CU,
[OP_DEVICEADDR] = 0x14U,
[OP_ENDPTLISTADDR] = 0x18U,
[OP_TTCTRL] = 0x1CU,
[OP_BURSTSIZE] = 0x20U,
[OP_ULPI_VIEWPORT] = 0x30U,
[OP_PORTSC] = 0x44U,
[OP_DEVLC] = 0x84U,
[OP_OTGSC] = 0x64U,
[OP_USBMODE] = 0x68U,
[OP_ENDPTSETUPSTAT] = 0x6CU,
[OP_ENDPTPRIME] = 0x70U,
[OP_ENDPTFLUSH] = 0x74U,
[OP_ENDPTSTAT] = 0x78U,
[OP_ENDPTCOMPLETE] = 0x7CU,
[OP_ENDPTCTRL] = 0x80U,
};
static const u8 ci_regs_lpm[] = {
[CAP_CAPLENGTH] = 0x00U,
[CAP_HCCPARAMS] = 0x08U,
[CAP_DCCPARAMS] = 0x24U,
[CAP_TESTMODE] = 0xFCU,
[OP_USBCMD] = 0x00U,
[OP_USBSTS] = 0x04U,
[OP_USBINTR] = 0x08U,
[OP_FRINDEX] = 0x0CU,
[OP_DEVICEADDR] = 0x14U,
[OP_ENDPTLISTADDR] = 0x18U,
[OP_TTCTRL] = 0x1CU,
[OP_BURSTSIZE] = 0x20U,
[OP_ULPI_VIEWPORT] = 0x30U,
[OP_PORTSC] = 0x44U,
[OP_DEVLC] = 0x84U,
[OP_OTGSC] = 0xC4U,
[OP_USBMODE] = 0xC8U,
[OP_ENDPTSETUPSTAT] = 0xD8U,
[OP_ENDPTPRIME] = 0xDCU,
[OP_ENDPTFLUSH] = 0xE0U,
[OP_ENDPTSTAT] = 0xE4U,
[OP_ENDPTCOMPLETE] = 0xE8U,
[OP_ENDPTCTRL] = 0xECU,
};
static void hw_alloc_regmap(struct ci_hdrc *ci, bool is_lpm)
{
int i;
for (i = 0; i < OP_ENDPTCTRL; i++)
ci->hw_bank.regmap[i] =
(i <= CAP_LAST ? ci->hw_bank.cap : ci->hw_bank.op) +
(is_lpm ? ci_regs_lpm[i] : ci_regs_nolpm[i]);
for (; i <= OP_LAST; i++)
ci->hw_bank.regmap[i] = ci->hw_bank.op +
4 * (i - OP_ENDPTCTRL) +
(is_lpm
? ci_regs_lpm[OP_ENDPTCTRL]
: ci_regs_nolpm[OP_ENDPTCTRL]);
}
static enum ci_revision ci_get_revision(struct ci_hdrc *ci)
{
int ver = hw_read_id_reg(ci, ID_ID, VERSION) >> __ffs(VERSION);
enum ci_revision rev = CI_REVISION_UNKNOWN;
if (ver == 0x2) {
rev = hw_read_id_reg(ci, ID_ID, REVISION)
>> __ffs(REVISION);
rev += CI_REVISION_20;
} else if (ver == 0x0) {
rev = CI_REVISION_1X;
}
return rev;
}
/**
* hw_read_intr_enable: returns interrupt enable register
*
* @ci: the controller
*
* This function returns register data
*/
u32 hw_read_intr_enable(struct ci_hdrc *ci)
{
return hw_read(ci, OP_USBINTR, ~0);
}
/**
* hw_read_intr_status: returns interrupt status register
*
* @ci: the controller
*
* This function returns register data
*/
u32 hw_read_intr_status(struct ci_hdrc *ci)
{
return hw_read(ci, OP_USBSTS, ~0);
}
/**
* hw_port_test_set: writes port test mode (execute without interruption)
* @ci: the controller
* @mode: new value
*
* This function returns an error code
*/
int hw_port_test_set(struct ci_hdrc *ci, u8 mode)
{
const u8 TEST_MODE_MAX = 7;
if (mode > TEST_MODE_MAX)
return -EINVAL;
hw_write(ci, OP_PORTSC, PORTSC_PTC, mode << __ffs(PORTSC_PTC));
return 0;
}
/**
* hw_port_test_get: reads port test mode value
*
* @ci: the controller
*
* This function returns port test mode value
*/
u8 hw_port_test_get(struct ci_hdrc *ci)
{
return hw_read(ci, OP_PORTSC, PORTSC_PTC) >> __ffs(PORTSC_PTC);
}
static void hw_wait_phy_stable(void)
{
/*
* The phy needs some delay to output the stable status from low
* power mode. And for OTGSC, the status inputs are debounced
* using a 1 ms time constant, so, delay 2ms for controller to get
* the stable status, like vbus and id when the phy leaves low power.
*/
usleep_range(2000, 2500);
}
/* The PHY enters/leaves low power mode */
static void ci_hdrc_enter_lpm_common(struct ci_hdrc *ci, bool enable)
{
enum ci_hw_regs reg = ci->hw_bank.lpm ? OP_DEVLC : OP_PORTSC;
bool lpm = !!(hw_read(ci, reg, PORTSC_PHCD(ci->hw_bank.lpm)));
if (enable && !lpm)
hw_write(ci, reg, PORTSC_PHCD(ci->hw_bank.lpm),
PORTSC_PHCD(ci->hw_bank.lpm));
else if (!enable && lpm)
hw_write(ci, reg, PORTSC_PHCD(ci->hw_bank.lpm),
0);
}
static void ci_hdrc_enter_lpm(struct ci_hdrc *ci, bool enable)
{
return ci->platdata->enter_lpm(ci, enable);
}
static int hw_device_init(struct ci_hdrc *ci, void __iomem *base)
{
u32 reg;
/* bank is a module variable */
ci->hw_bank.abs = base;
ci->hw_bank.cap = ci->hw_bank.abs;
ci->hw_bank.cap += ci->platdata->capoffset;
ci->hw_bank.op = ci->hw_bank.cap + (ioread32(ci->hw_bank.cap) & 0xff);
hw_alloc_regmap(ci, false);
reg = hw_read(ci, CAP_HCCPARAMS, HCCPARAMS_LEN) >>
__ffs(HCCPARAMS_LEN);
ci->hw_bank.lpm = reg;
if (reg)
hw_alloc_regmap(ci, !!reg);
ci->hw_bank.size = ci->hw_bank.op - ci->hw_bank.abs;
ci->hw_bank.size += OP_LAST;
ci->hw_bank.size /= sizeof(u32);
reg = hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DEN) >>
__ffs(DCCPARAMS_DEN);
ci->hw_ep_max = reg * 2; /* cache hw ENDPT_MAX */
if (ci->hw_ep_max > ENDPT_MAX)
return -ENODEV;
ci_hdrc_enter_lpm(ci, false);
/* Disable all interrupts bits */
hw_write(ci, OP_USBINTR, 0xffffffff, 0);
/* Clear all interrupts status bits*/
hw_write(ci, OP_USBSTS, 0xffffffff, 0xffffffff);
ci->rev = ci_get_revision(ci);
dev_dbg(ci->dev,
"revision: %d, lpm: %d; cap: %px op: %px\n",
ci->rev, ci->hw_bank.lpm, ci->hw_bank.cap, ci->hw_bank.op);
/* setup lock mode ? */
/* ENDPTSETUPSTAT is '0' by default */
/* HCSPARAMS.bf.ppc SHOULD BE zero for device */
return 0;
}
void hw_phymode_configure(struct ci_hdrc *ci)
{
u32 portsc, lpm, sts = 0;
switch (ci->platdata->phy_mode) {
case USBPHY_INTERFACE_MODE_UTMI:
portsc = PORTSC_PTS(PTS_UTMI);
lpm = DEVLC_PTS(PTS_UTMI);
break;
case USBPHY_INTERFACE_MODE_UTMIW:
portsc = PORTSC_PTS(PTS_UTMI) | PORTSC_PTW;
lpm = DEVLC_PTS(PTS_UTMI) | DEVLC_PTW;
break;
case USBPHY_INTERFACE_MODE_ULPI:
portsc = PORTSC_PTS(PTS_ULPI);
lpm = DEVLC_PTS(PTS_ULPI);
break;
case USBPHY_INTERFACE_MODE_SERIAL:
portsc = PORTSC_PTS(PTS_SERIAL);
lpm = DEVLC_PTS(PTS_SERIAL);
sts = 1;
break;
case USBPHY_INTERFACE_MODE_HSIC:
portsc = PORTSC_PTS(PTS_HSIC);
lpm = DEVLC_PTS(PTS_HSIC);
break;
default:
return;
}
if (ci->hw_bank.lpm) {
hw_write(ci, OP_DEVLC, DEVLC_PTS(7) | DEVLC_PTW, lpm);
if (sts)
hw_write(ci, OP_DEVLC, DEVLC_STS, DEVLC_STS);
} else {
hw_write(ci, OP_PORTSC, PORTSC_PTS(7) | PORTSC_PTW, portsc);
if (sts)
hw_write(ci, OP_PORTSC, PORTSC_STS, PORTSC_STS);
}
}
EXPORT_SYMBOL_GPL(hw_phymode_configure);
/**
* _ci_usb_phy_init: initialize phy taking in account both phy and usb_phy
* interfaces
* @ci: the controller
*
* This function returns an error code if the phy failed to init
*/
static int _ci_usb_phy_init(struct ci_hdrc *ci)
{
int ret;
if (ci->phy) {
ret = phy_init(ci->phy);
if (ret)
return ret;
ret = phy_power_on(ci->phy);
if (ret) {
phy_exit(ci->phy);
return ret;
}
} else {
ret = usb_phy_init(ci->usb_phy);
}
return ret;
}
/**
* ci_usb_phy_exit: deinitialize phy taking in account both phy and usb_phy
* interfaces
* @ci: the controller
*/
static void ci_usb_phy_exit(struct ci_hdrc *ci)
{
if (ci->platdata->flags & CI_HDRC_OVERRIDE_PHY_CONTROL)
return;
if (ci->phy) {
phy_power_off(ci->phy);
phy_exit(ci->phy);
} else {
usb_phy_shutdown(ci->usb_phy);
}
}
/**
* ci_usb_phy_init: initialize phy according to different phy type
* @ci: the controller
*
* This function returns an error code if usb_phy_init has failed
*/
static int ci_usb_phy_init(struct ci_hdrc *ci)
{
int ret;
if (ci->platdata->flags & CI_HDRC_OVERRIDE_PHY_CONTROL)
return 0;
switch (ci->platdata->phy_mode) {
case USBPHY_INTERFACE_MODE_UTMI:
case USBPHY_INTERFACE_MODE_UTMIW:
case USBPHY_INTERFACE_MODE_HSIC:
ret = _ci_usb_phy_init(ci);
if (!ret)
hw_wait_phy_stable();
else
return ret;
hw_phymode_configure(ci);
break;
case USBPHY_INTERFACE_MODE_ULPI:
case USBPHY_INTERFACE_MODE_SERIAL:
hw_phymode_configure(ci);
ret = _ci_usb_phy_init(ci);
if (ret)
return ret;
break;
default:
ret = _ci_usb_phy_init(ci);
if (!ret)
hw_wait_phy_stable();
}
return ret;
}
/**
* ci_platform_configure: do controller configure
* @ci: the controller
*
*/
void ci_platform_configure(struct ci_hdrc *ci)
{
bool is_device_mode, is_host_mode;
is_device_mode = hw_read(ci, OP_USBMODE, USBMODE_CM) == USBMODE_CM_DC;
is_host_mode = hw_read(ci, OP_USBMODE, USBMODE_CM) == USBMODE_CM_HC;
if (is_device_mode) {
phy_set_mode(ci->phy, PHY_MODE_USB_DEVICE);
if (ci->platdata->flags & CI_HDRC_DISABLE_DEVICE_STREAMING)
hw_write(ci, OP_USBMODE, USBMODE_CI_SDIS,
USBMODE_CI_SDIS);
}
if (is_host_mode) {
phy_set_mode(ci->phy, PHY_MODE_USB_HOST);
if (ci->platdata->flags & CI_HDRC_DISABLE_HOST_STREAMING)
hw_write(ci, OP_USBMODE, USBMODE_CI_SDIS,
USBMODE_CI_SDIS);
}
if (ci->platdata->flags & CI_HDRC_FORCE_FULLSPEED) {
if (ci->hw_bank.lpm)
hw_write(ci, OP_DEVLC, DEVLC_PFSC, DEVLC_PFSC);
else
hw_write(ci, OP_PORTSC, PORTSC_PFSC, PORTSC_PFSC);
}
if (ci->platdata->flags & CI_HDRC_SET_NON_ZERO_TTHA)
hw_write(ci, OP_TTCTRL, TTCTRL_TTHA_MASK, TTCTRL_TTHA);
hw_write(ci, OP_USBCMD, 0xff0000, ci->platdata->itc_setting << 16);
if (ci->platdata->flags & CI_HDRC_OVERRIDE_AHB_BURST)
hw_write_id_reg(ci, ID_SBUSCFG, AHBBRST_MASK,
ci->platdata->ahb_burst_config);
/* override burst size, take effect only when ahb_burst_config is 0 */
if (!hw_read_id_reg(ci, ID_SBUSCFG, AHBBRST_MASK)) {
if (ci->platdata->flags & CI_HDRC_OVERRIDE_TX_BURST)
hw_write(ci, OP_BURSTSIZE, TX_BURST_MASK,
ci->platdata->tx_burst_size << __ffs(TX_BURST_MASK));
if (ci->platdata->flags & CI_HDRC_OVERRIDE_RX_BURST)
hw_write(ci, OP_BURSTSIZE, RX_BURST_MASK,
ci->platdata->rx_burst_size);
}
}
/**
* hw_controller_reset: do controller reset
* @ci: the controller
*
* This function returns an error code
*/
static int hw_controller_reset(struct ci_hdrc *ci)
{
int count = 0;
hw_write(ci, OP_USBCMD, USBCMD_RST, USBCMD_RST);
while (hw_read(ci, OP_USBCMD, USBCMD_RST)) {
udelay(10);
if (count++ > 1000)
return -ETIMEDOUT;
}
return 0;
}
/**
* hw_device_reset: resets chip (execute without interruption)
* @ci: the controller
*
* This function returns an error code
*/
int hw_device_reset(struct ci_hdrc *ci)
{
int ret;
/* should flush & stop before reset */
hw_write(ci, OP_ENDPTFLUSH, ~0, ~0);
hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
ret = hw_controller_reset(ci);
if (ret) {
dev_err(ci->dev, "error resetting controller, ret=%d\n", ret);
return ret;
}
if (ci->platdata->notify_event) {
ret = ci->platdata->notify_event(ci,
CI_HDRC_CONTROLLER_RESET_EVENT);
if (ret)
return ret;
}
/* USBMODE should be configured step by step */
hw_write(ci, OP_USBMODE, USBMODE_CM, USBMODE_CM_IDLE);
hw_write(ci, OP_USBMODE, USBMODE_CM, USBMODE_CM_DC);
/* HW >= 2.3 */
hw_write(ci, OP_USBMODE, USBMODE_SLOM, USBMODE_SLOM);
if (hw_read(ci, OP_USBMODE, USBMODE_CM) != USBMODE_CM_DC) {
dev_err(ci->dev, "cannot enter in %s device mode\n",
ci_role(ci)->name);
dev_err(ci->dev, "lpm = %i\n", ci->hw_bank.lpm);
return -ENODEV;
}
ci_platform_configure(ci);
return 0;
}
static irqreturn_t ci_irq_handler(int irq, void *data)
{
struct ci_hdrc *ci = data;
irqreturn_t ret = IRQ_NONE;
u32 otgsc = 0;
if (ci->in_lpm) {
disable_irq_nosync(irq);
ci->wakeup_int = true;
pm_runtime_get(ci->dev);
return IRQ_HANDLED;
}
if (ci->is_otg) {
otgsc = hw_read_otgsc(ci, ~0);
if (ci_otg_is_fsm_mode(ci)) {
ret = ci_otg_fsm_irq(ci);
if (ret == IRQ_HANDLED)
return ret;
}
}
/*
* Handle id change interrupt, it indicates device/host function
* switch.
*/
if (ci->is_otg && (otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS)) {
ci->id_event = true;
/* Clear ID change irq status */
hw_write_otgsc(ci, OTGSC_IDIS, OTGSC_IDIS);
ci_otg_queue_work(ci);
return IRQ_HANDLED;
}
/*
* Handle vbus change interrupt, it indicates device connection
* and disconnection events.
*/
if (ci->is_otg && (otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS)) {
ci->b_sess_valid_event = true;
/* Clear BSV irq */
hw_write_otgsc(ci, OTGSC_BSVIS, OTGSC_BSVIS);
ci_otg_queue_work(ci);
return IRQ_HANDLED;
}
/* Handle device/host interrupt */
if (ci->role != CI_ROLE_END)
ret = ci_role(ci)->irq(ci);
return ret;
}
static void ci_irq(struct ci_hdrc *ci)
{
unsigned long flags;
local_irq_save(flags);
ci_irq_handler(ci->irq, ci);
local_irq_restore(flags);
}
static int ci_cable_notifier(struct notifier_block *nb, unsigned long event,
void *ptr)
{
struct ci_hdrc_cable *cbl = container_of(nb, struct ci_hdrc_cable, nb);
struct ci_hdrc *ci = cbl->ci;
cbl->connected = event;
cbl->changed = true;
ci_irq(ci);
return NOTIFY_DONE;
}
static enum usb_role ci_usb_role_switch_get(struct usb_role_switch *sw)
{
struct ci_hdrc *ci = usb_role_switch_get_drvdata(sw);
enum usb_role role;
unsigned long flags;
spin_lock_irqsave(&ci->lock, flags);
role = ci_role_to_usb_role(ci);
spin_unlock_irqrestore(&ci->lock, flags);
return role;
}
static int ci_usb_role_switch_set(struct usb_role_switch *sw,
enum usb_role role)
{
struct ci_hdrc *ci = usb_role_switch_get_drvdata(sw);
struct ci_hdrc_cable *cable;
if (role == USB_ROLE_HOST) {
cable = &ci->platdata->id_extcon;
cable->changed = true;
cable->connected = true;
cable = &ci->platdata->vbus_extcon;
cable->changed = true;
cable->connected = false;
} else if (role == USB_ROLE_DEVICE) {
cable = &ci->platdata->id_extcon;
cable->changed = true;
cable->connected = false;
cable = &ci->platdata->vbus_extcon;
cable->changed = true;
cable->connected = true;
} else {
cable = &ci->platdata->id_extcon;
cable->changed = true;
cable->connected = false;
cable = &ci->platdata->vbus_extcon;
cable->changed = true;
cable->connected = false;
}
ci_irq(ci);
return 0;
}
static enum ci_role ci_get_role(struct ci_hdrc *ci)
{
enum ci_role role;
if (ci->roles[CI_ROLE_HOST] && ci->roles[CI_ROLE_GADGET]) {
if (ci->is_otg) {
role = ci_otg_role(ci);
hw_write_otgsc(ci, OTGSC_IDIE, OTGSC_IDIE);
} else {
/*
* If the controller is not OTG capable, but support
* role switch, the defalt role is gadget, and the
* user can switch it through debugfs.
*/
role = CI_ROLE_GADGET;
}
} else {
role = ci->roles[CI_ROLE_HOST] ? CI_ROLE_HOST
: CI_ROLE_GADGET;
}
return role;
}
static struct usb_role_switch_desc ci_role_switch = {
.set = ci_usb_role_switch_set,
.get = ci_usb_role_switch_get,
.allow_userspace_control = true,
};
static int ci_get_platdata(struct device *dev,
struct ci_hdrc_platform_data *platdata)
{
struct extcon_dev *ext_vbus, *ext_id;
struct ci_hdrc_cable *cable;
int ret;
if (!platdata->phy_mode)
platdata->phy_mode = of_usb_get_phy_mode(dev->of_node);
if (!platdata->dr_mode)
platdata->dr_mode = usb_get_dr_mode(dev);
if (platdata->dr_mode == USB_DR_MODE_UNKNOWN)
platdata->dr_mode = USB_DR_MODE_OTG;
if (platdata->dr_mode != USB_DR_MODE_PERIPHERAL) {
/* Get the vbus regulator */
platdata->reg_vbus = devm_regulator_get_optional(dev, "vbus");
if (PTR_ERR(platdata->reg_vbus) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else if (PTR_ERR(platdata->reg_vbus) == -ENODEV) {
/* no vbus regulator is needed */
platdata->reg_vbus = NULL;
} else if (IS_ERR(platdata->reg_vbus)) {
dev_err(dev, "Getting regulator error: %ld\n",
PTR_ERR(platdata->reg_vbus));
return PTR_ERR(platdata->reg_vbus);
}
/* Get TPL support */
if (!platdata->tpl_support)
platdata->tpl_support =
of_usb_host_tpl_support(dev->of_node);
}
if (platdata->dr_mode == USB_DR_MODE_OTG) {
/* We can support HNP and SRP of OTG 2.0 */
platdata->ci_otg_caps.otg_rev = 0x0200;
platdata->ci_otg_caps.hnp_support = true;
platdata->ci_otg_caps.srp_support = true;
/* Update otg capabilities by DT properties */
ret = of_usb_update_otg_caps(dev->of_node,
&platdata->ci_otg_caps);
if (ret)
return ret;
}
if (usb_get_maximum_speed(dev) == USB_SPEED_FULL)
platdata->flags |= CI_HDRC_FORCE_FULLSPEED;
of_property_read_u32(dev->of_node, "phy-clkgate-delay-us",
&platdata->phy_clkgate_delay_us);
platdata->itc_setting = 1;
of_property_read_u32(dev->of_node, "itc-setting",
&platdata->itc_setting);
ret = of_property_read_u32(dev->of_node, "ahb-burst-config",
&platdata->ahb_burst_config);
if (!ret) {
platdata->flags |= CI_HDRC_OVERRIDE_AHB_BURST;
} else if (ret != -EINVAL) {
dev_err(dev, "failed to get ahb-burst-config\n");
return ret;
}
ret = of_property_read_u32(dev->of_node, "tx-burst-size-dword",
&platdata->tx_burst_size);
if (!ret) {
platdata->flags |= CI_HDRC_OVERRIDE_TX_BURST;
} else if (ret != -EINVAL) {
dev_err(dev, "failed to get tx-burst-size-dword\n");
return ret;
}
ret = of_property_read_u32(dev->of_node, "rx-burst-size-dword",
&platdata->rx_burst_size);
if (!ret) {
platdata->flags |= CI_HDRC_OVERRIDE_RX_BURST;
} else if (ret != -EINVAL) {
dev_err(dev, "failed to get rx-burst-size-dword\n");
return ret;
}
if (of_property_read_bool(dev->of_node, "non-zero-ttctrl-ttha"))
platdata->flags |= CI_HDRC_SET_NON_ZERO_TTHA;
ext_id = ERR_PTR(-ENODEV);
ext_vbus = ERR_PTR(-ENODEV);
if (of_property_read_bool(dev->of_node, "extcon")) {
/* Each one of them is not mandatory */
ext_vbus = extcon_get_edev_by_phandle(dev, 0);
if (IS_ERR(ext_vbus) && PTR_ERR(ext_vbus) != -ENODEV)
return PTR_ERR(ext_vbus);
ext_id = extcon_get_edev_by_phandle(dev, 1);
if (IS_ERR(ext_id) && PTR_ERR(ext_id) != -ENODEV)
return PTR_ERR(ext_id);
}
cable = &platdata->vbus_extcon;
cable->nb.notifier_call = ci_cable_notifier;
cable->edev = ext_vbus;
if (!IS_ERR(ext_vbus)) {
ret = extcon_get_state(cable->edev, EXTCON_USB);
if (ret)
cable->connected = true;
else
cable->connected = false;
}
cable = &platdata->id_extcon;
cable->nb.notifier_call = ci_cable_notifier;
cable->edev = ext_id;
if (!IS_ERR(ext_id)) {
ret = extcon_get_state(cable->edev, EXTCON_USB_HOST);
if (ret)
cable->connected = true;
else
cable->connected = false;
}
if (device_property_read_bool(dev, "usb-role-switch"))
ci_role_switch.fwnode = dev->fwnode;
platdata->pctl = devm_pinctrl_get(dev);
if (!IS_ERR(platdata->pctl)) {
struct pinctrl_state *p;
p = pinctrl_lookup_state(platdata->pctl, "default");
if (!IS_ERR(p))
platdata->pins_default = p;
p = pinctrl_lookup_state(platdata->pctl, "host");
if (!IS_ERR(p))
platdata->pins_host = p;
p = pinctrl_lookup_state(platdata->pctl, "device");
if (!IS_ERR(p))
platdata->pins_device = p;
}
if (!platdata->enter_lpm)
platdata->enter_lpm = ci_hdrc_enter_lpm_common;
return 0;
}
static int ci_extcon_register(struct ci_hdrc *ci)
{
struct ci_hdrc_cable *id, *vbus;
int ret;
id = &ci->platdata->id_extcon;
id->ci = ci;
if (!IS_ERR_OR_NULL(id->edev)) {
ret = devm_extcon_register_notifier(ci->dev, id->edev,
EXTCON_USB_HOST, &id->nb);
if (ret < 0) {
dev_err(ci->dev, "register ID failed\n");
return ret;
}
}
vbus = &ci->platdata->vbus_extcon;
vbus->ci = ci;
if (!IS_ERR_OR_NULL(vbus->edev)) {
ret = devm_extcon_register_notifier(ci->dev, vbus->edev,
EXTCON_USB, &vbus->nb);
if (ret < 0) {
dev_err(ci->dev, "register VBUS failed\n");
return ret;
}
}
return 0;
}
static DEFINE_IDA(ci_ida);
struct platform_device *ci_hdrc_add_device(struct device *dev,
struct resource *res, int nres,
struct ci_hdrc_platform_data *platdata)
{
struct platform_device *pdev;
int id, ret;
ret = ci_get_platdata(dev, platdata);
if (ret)
return ERR_PTR(ret);
id = ida_simple_get(&ci_ida, 0, 0, GFP_KERNEL);
if (id < 0)
return ERR_PTR(id);
pdev = platform_device_alloc("ci_hdrc", id);
if (!pdev) {
ret = -ENOMEM;
goto put_id;
}
pdev->dev.parent = dev;
device_set_of_node_from_dev(&pdev->dev, dev);
ret = platform_device_add_resources(pdev, res, nres);
if (ret)
goto err;
ret = platform_device_add_data(pdev, platdata, sizeof(*platdata));
if (ret)
goto err;
ret = platform_device_add(pdev);
if (ret)
goto err;
return pdev;
err:
platform_device_put(pdev);
put_id:
ida_simple_remove(&ci_ida, id);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(ci_hdrc_add_device);
void ci_hdrc_remove_device(struct platform_device *pdev)
{
int id = pdev->id;
platform_device_unregister(pdev);
ida_simple_remove(&ci_ida, id);
}
EXPORT_SYMBOL_GPL(ci_hdrc_remove_device);
/**
* ci_hdrc_query_available_role: get runtime available operation mode
*
* The glue layer can get current operation mode (host/peripheral/otg)
* This function should be called after ci core device has created.
*
* @pdev: the platform device of ci core.
*
* Return runtime usb_dr_mode.
*/
enum usb_dr_mode ci_hdrc_query_available_role(struct platform_device *pdev)
{
struct ci_hdrc *ci = platform_get_drvdata(pdev);
if (!ci)
return USB_DR_MODE_UNKNOWN;
if (ci->roles[CI_ROLE_HOST] && ci->roles[CI_ROLE_GADGET])
return USB_DR_MODE_OTG;
else if (ci->roles[CI_ROLE_HOST])
return USB_DR_MODE_HOST;
else if (ci->roles[CI_ROLE_GADGET])
return USB_DR_MODE_PERIPHERAL;
else
return USB_DR_MODE_UNKNOWN;
}
EXPORT_SYMBOL_GPL(ci_hdrc_query_available_role);
static inline void ci_role_destroy(struct ci_hdrc *ci)
{
ci_hdrc_gadget_destroy(ci);
ci_hdrc_host_destroy(ci);
if (ci->is_otg && ci->roles[CI_ROLE_GADGET])
ci_hdrc_otg_destroy(ci);
}
static void ci_get_otg_capable(struct ci_hdrc *ci)
{
if (ci->platdata->flags & CI_HDRC_DUAL_ROLE_NOT_OTG)
ci->is_otg = false;
else
ci->is_otg = (hw_read(ci, CAP_DCCPARAMS,
DCCPARAMS_DC | DCCPARAMS_HC)
== (DCCPARAMS_DC | DCCPARAMS_HC));
if (ci->is_otg) {
dev_dbg(ci->dev, "It is OTG capable controller\n");
/* Disable and clear all OTG irq */
hw_write_otgsc(ci, OTGSC_INT_EN_BITS | OTGSC_INT_STATUS_BITS,
OTGSC_INT_STATUS_BITS);
}
}
static ssize_t role_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct ci_hdrc *ci = dev_get_drvdata(dev);
if (ci->role != CI_ROLE_END)
return sprintf(buf, "%s\n", ci_role(ci)->name);
return 0;
}
static ssize_t role_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
struct ci_hdrc *ci = dev_get_drvdata(dev);
enum ci_role role;
int ret;
if (!(ci->roles[CI_ROLE_HOST] && ci->roles[CI_ROLE_GADGET])) {
dev_warn(dev, "Current configuration is not dual-role, quit\n");
return -EPERM;
}
for (role = CI_ROLE_HOST; role < CI_ROLE_END; role++)
if (!strncmp(buf, ci->roles[role]->name,
strlen(ci->roles[role]->name)))
break;
if (role == CI_ROLE_END)
return -EINVAL;
mutex_lock(&ci->mutex);
if (role == ci->role) {
mutex_unlock(&ci->mutex);
return n;
}
pm_runtime_get_sync(dev);
disable_irq(ci->irq);
ci_role_stop(ci);
ret = ci_role_start(ci, role);
if (!ret && ci->role == CI_ROLE_GADGET)
ci_handle_vbus_change(ci);
enable_irq(ci->irq);
pm_runtime_put_sync(dev);
mutex_unlock(&ci->mutex);
return (ret == 0) ? n : ret;
}
static DEVICE_ATTR_RW(role);
static struct attribute *ci_attrs[] = {
&dev_attr_role.attr,
NULL,
};
ATTRIBUTE_GROUPS(ci);
static int ci_hdrc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ci_hdrc *ci;
struct resource *res;
void __iomem *base;
int ret;
enum usb_dr_mode dr_mode;
if (!dev_get_platdata(dev)) {
dev_err(dev, "platform data missing\n");
return -ENODEV;
}
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
ci = devm_kzalloc(dev, sizeof(*ci), GFP_KERNEL);
if (!ci)
return -ENOMEM;
spin_lock_init(&ci->lock);
mutex_init(&ci->mutex);
ci->dev = dev;
ci->platdata = dev_get_platdata(dev);
ci->imx28_write_fix = !!(ci->platdata->flags &
CI_HDRC_IMX28_WRITE_FIX);
ci->supports_runtime_pm = !!(ci->platdata->flags &
CI_HDRC_SUPPORTS_RUNTIME_PM);
ci->has_portsc_pec_bug = !!(ci->platdata->flags &
CI_HDRC_HAS_PORTSC_PEC_MISSED);
platform_set_drvdata(pdev, ci);
ret = hw_device_init(ci, base);
if (ret < 0) {
dev_err(dev, "can't initialize hardware\n");
return -ENODEV;
}
ret = ci_ulpi_init(ci);
if (ret)
return ret;
if (ci->platdata->phy) {
ci->phy = ci->platdata->phy;
} else if (ci->platdata->usb_phy) {
ci->usb_phy = ci->platdata->usb_phy;
} else {
/* Look for a generic PHY first */
ci->phy = devm_phy_get(dev->parent, "usb-phy");
if (PTR_ERR(ci->phy) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto ulpi_exit;
} else if (IS_ERR(ci->phy)) {
ci->phy = NULL;
}
/* Look for a legacy USB PHY from device-tree next */
if (!ci->phy) {
ci->usb_phy = devm_usb_get_phy_by_phandle(dev->parent,
"phys", 0);
if (PTR_ERR(ci->usb_phy) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto ulpi_exit;
} else if (IS_ERR(ci->usb_phy)) {
ci->usb_phy = NULL;
}
}
/* Look for any registered legacy USB PHY as last resort */
if (!ci->phy && !ci->usb_phy) {
ci->usb_phy = devm_usb_get_phy(dev->parent,
USB_PHY_TYPE_USB2);
if (PTR_ERR(ci->usb_phy) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto ulpi_exit;
} else if (IS_ERR(ci->usb_phy)) {
ci->usb_phy = NULL;
}
}
/* No USB PHY was found in the end */
if (!ci->phy && !ci->usb_phy) {
ret = -ENXIO;
goto ulpi_exit;
}
}
ret = ci_usb_phy_init(ci);
if (ret) {
dev_err(dev, "unable to init phy: %d\n", ret);
goto ulpi_exit;
}
ci->hw_bank.phys = res->start;
ci->irq = platform_get_irq(pdev, 0);
if (ci->irq < 0) {
ret = ci->irq;
goto deinit_phy;
}
ci_get_otg_capable(ci);
dr_mode = ci->platdata->dr_mode;
/* initialize role(s) before the interrupt is requested */
if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) {
ret = ci_hdrc_host_init(ci);
if (ret) {
if (ret == -ENXIO)
dev_info(dev, "doesn't support host\n");
else
goto deinit_phy;
}
}
if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_PERIPHERAL) {
ret = ci_hdrc_gadget_init(ci);
if (ret) {
if (ret == -ENXIO)
dev_info(dev, "doesn't support gadget\n");
else
goto deinit_host;
}
}
if (!ci->roles[CI_ROLE_HOST] && !ci->roles[CI_ROLE_GADGET]) {
dev_err(dev, "no supported roles\n");
ret = -ENODEV;
goto deinit_gadget;
}
if (ci->is_otg && ci->roles[CI_ROLE_GADGET]) {
ret = ci_hdrc_otg_init(ci);
if (ret) {
dev_err(dev, "init otg fails, ret = %d\n", ret);
goto deinit_gadget;
}
}
if (ci_role_switch.fwnode) {
ci_role_switch.driver_data = ci;
ci->role_switch = usb_role_switch_register(dev,
&ci_role_switch);
if (IS_ERR(ci->role_switch)) {
ret = PTR_ERR(ci->role_switch);
goto deinit_otg;
}
}
ci->role = ci_get_role(ci);
if (!ci_otg_is_fsm_mode(ci)) {
/* only update vbus status for peripheral */
if (ci->role == CI_ROLE_GADGET) {
/* Pull down DP for possible charger detection */
hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
ci_handle_vbus_change(ci);
}
ret = ci_role_start(ci, ci->role);
if (ret) {
dev_err(dev, "can't start %s role\n",
ci_role(ci)->name);
goto stop;
}
}
ret = devm_request_irq(dev, ci->irq, ci_irq_handler, IRQF_SHARED,
ci->platdata->name, ci);
if (ret)
goto stop;
ret = ci_extcon_register(ci);
if (ret)
goto stop;
if (ci->supports_runtime_pm) {
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
pm_runtime_mark_last_busy(ci->dev);
pm_runtime_use_autosuspend(&pdev->dev);
}
if (ci_otg_is_fsm_mode(ci))
ci_hdrc_otg_fsm_start(ci);
device_set_wakeup_capable(&pdev->dev, true);
dbg_create_files(ci);
return 0;
stop:
if (ci->role_switch)
usb_role_switch_unregister(ci->role_switch);
deinit_otg:
if (ci->is_otg && ci->roles[CI_ROLE_GADGET])
ci_hdrc_otg_destroy(ci);
deinit_gadget:
ci_hdrc_gadget_destroy(ci);
deinit_host:
ci_hdrc_host_destroy(ci);
deinit_phy:
ci_usb_phy_exit(ci);
ulpi_exit:
ci_ulpi_exit(ci);
return ret;
}
static void ci_hdrc_remove(struct platform_device *pdev)
{
struct ci_hdrc *ci = platform_get_drvdata(pdev);
if (ci->role_switch)
usb_role_switch_unregister(ci->role_switch);
if (ci->supports_runtime_pm) {
pm_runtime_get_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
}
dbg_remove_files(ci);
ci_role_destroy(ci);
ci_hdrc_enter_lpm(ci, true);
ci_usb_phy_exit(ci);
ci_ulpi_exit(ci);
}
#ifdef CONFIG_PM
/* Prepare wakeup by SRP before suspend */
static void ci_otg_fsm_suspend_for_srp(struct ci_hdrc *ci)
{
if ((ci->fsm.otg->state == OTG_STATE_A_IDLE) &&
!hw_read_otgsc(ci, OTGSC_ID)) {
hw_write(ci, OP_PORTSC, PORTSC_W1C_BITS | PORTSC_PP,
PORTSC_PP);
hw_write(ci, OP_PORTSC, PORTSC_W1C_BITS | PORTSC_WKCN,
PORTSC_WKCN);
}
}
/* Handle SRP when wakeup by data pulse */
static void ci_otg_fsm_wakeup_by_srp(struct ci_hdrc *ci)
{
if ((ci->fsm.otg->state == OTG_STATE_A_IDLE) &&
(ci->fsm.a_bus_drop == 1) && (ci->fsm.a_bus_req == 0)) {
if (!hw_read_otgsc(ci, OTGSC_ID)) {
ci->fsm.a_srp_det = 1;
ci->fsm.a_bus_drop = 0;
} else {
ci->fsm.id = 1;
}
ci_otg_queue_work(ci);
}
}
static void ci_controller_suspend(struct ci_hdrc *ci)
{
disable_irq(ci->irq);
ci_hdrc_enter_lpm(ci, true);
if (ci->platdata->phy_clkgate_delay_us)
usleep_range(ci->platdata->phy_clkgate_delay_us,
ci->platdata->phy_clkgate_delay_us + 50);
usb_phy_set_suspend(ci->usb_phy, 1);
ci->in_lpm = true;
enable_irq(ci->irq);
}
/*
* Handle the wakeup interrupt triggered by extcon connector
* We need to call ci_irq again for extcon since the first
* interrupt (wakeup int) only let the controller be out of
* low power mode, but not handle any interrupts.
*/
static void ci_extcon_wakeup_int(struct ci_hdrc *ci)
{
struct ci_hdrc_cable *cable_id, *cable_vbus;
u32 otgsc = hw_read_otgsc(ci, ~0);
cable_id = &ci->platdata->id_extcon;
cable_vbus = &ci->platdata->vbus_extcon;
if ((!IS_ERR(cable_id->edev) || ci->role_switch)
&& ci->is_otg &&
(otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS))
ci_irq(ci);
if ((!IS_ERR(cable_vbus->edev) || ci->role_switch)
&& ci->is_otg &&
(otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS))
ci_irq(ci);
}
static int ci_controller_resume(struct device *dev)
{
struct ci_hdrc *ci = dev_get_drvdata(dev);
int ret;
dev_dbg(dev, "at %s\n", __func__);
if (!ci->in_lpm) {
WARN_ON(1);
return 0;
}
ci_hdrc_enter_lpm(ci, false);
ret = ci_ulpi_resume(ci);
if (ret)
return ret;
if (ci->usb_phy) {
usb_phy_set_suspend(ci->usb_phy, 0);
usb_phy_set_wakeup(ci->usb_phy, false);
hw_wait_phy_stable();
}
ci->in_lpm = false;
if (ci->wakeup_int) {
ci->wakeup_int = false;
pm_runtime_mark_last_busy(ci->dev);
pm_runtime_put_autosuspend(ci->dev);
enable_irq(ci->irq);
if (ci_otg_is_fsm_mode(ci))
ci_otg_fsm_wakeup_by_srp(ci);
ci_extcon_wakeup_int(ci);
}
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int ci_suspend(struct device *dev)
{
struct ci_hdrc *ci = dev_get_drvdata(dev);
if (ci->wq)
flush_workqueue(ci->wq);
/*
* Controller needs to be active during suspend, otherwise the core
* may run resume when the parent is at suspend if other driver's
* suspend fails, it occurs before parent's suspend has not started,
* but the core suspend has finished.
*/
if (ci->in_lpm)
pm_runtime_resume(dev);
if (ci->in_lpm) {
WARN_ON(1);
return 0;
}
/* Extra routine per role before system suspend */
if (ci->role != CI_ROLE_END && ci_role(ci)->suspend)
ci_role(ci)->suspend(ci);
if (device_may_wakeup(dev)) {
if (ci_otg_is_fsm_mode(ci))
ci_otg_fsm_suspend_for_srp(ci);
usb_phy_set_wakeup(ci->usb_phy, true);
enable_irq_wake(ci->irq);
}
ci_controller_suspend(ci);
return 0;
}
static void ci_handle_power_lost(struct ci_hdrc *ci)
{
enum ci_role role;
disable_irq_nosync(ci->irq);
if (!ci_otg_is_fsm_mode(ci)) {
role = ci_get_role(ci);
if (ci->role != role) {
ci_handle_id_switch(ci);
} else if (role == CI_ROLE_GADGET) {
if (ci->is_otg && hw_read_otgsc(ci, OTGSC_BSV))
usb_gadget_vbus_connect(&ci->gadget);
}
}
enable_irq(ci->irq);
}
static int ci_resume(struct device *dev)
{
struct ci_hdrc *ci = dev_get_drvdata(dev);
bool power_lost;
int ret;
/* Since ASYNCLISTADDR (host mode) and ENDPTLISTADDR (device
* mode) share the same register address. We can check if
* controller resume from power lost based on this address
* due to this register will be reset after power lost.
*/
power_lost = !hw_read(ci, OP_ENDPTLISTADDR, ~0);
if (device_may_wakeup(dev))
disable_irq_wake(ci->irq);
ret = ci_controller_resume(dev);
if (ret)
return ret;
if (power_lost) {
/* shutdown and re-init for phy */
ci_usb_phy_exit(ci);
ci_usb_phy_init(ci);
}
/* Extra routine per role after system resume */
if (ci->role != CI_ROLE_END && ci_role(ci)->resume)
ci_role(ci)->resume(ci, power_lost);
if (power_lost)
ci_handle_power_lost(ci);
if (ci->supports_runtime_pm) {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
return ret;
}
#endif /* CONFIG_PM_SLEEP */
static int ci_runtime_suspend(struct device *dev)
{
struct ci_hdrc *ci = dev_get_drvdata(dev);
dev_dbg(dev, "at %s\n", __func__);
if (ci->in_lpm) {
WARN_ON(1);
return 0;
}
if (ci_otg_is_fsm_mode(ci))
ci_otg_fsm_suspend_for_srp(ci);
usb_phy_set_wakeup(ci->usb_phy, true);
ci_controller_suspend(ci);
return 0;
}
static int ci_runtime_resume(struct device *dev)
{
return ci_controller_resume(dev);
}
#endif /* CONFIG_PM */
static const struct dev_pm_ops ci_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ci_suspend, ci_resume)
SET_RUNTIME_PM_OPS(ci_runtime_suspend, ci_runtime_resume, NULL)
};
static struct platform_driver ci_hdrc_driver = {
.probe = ci_hdrc_probe,
.remove_new = ci_hdrc_remove,
.driver = {
.name = "ci_hdrc",
.pm = &ci_pm_ops,
.dev_groups = ci_groups,
},
};
static int __init ci_hdrc_platform_register(void)
{
ci_hdrc_host_driver_init();
return platform_driver_register(&ci_hdrc_driver);
}
module_init(ci_hdrc_platform_register);
static void __exit ci_hdrc_platform_unregister(void)
{
platform_driver_unregister(&ci_hdrc_driver);
}
module_exit(ci_hdrc_platform_unregister);
MODULE_ALIAS("platform:ci_hdrc");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("David Lopo <[email protected]>");
MODULE_DESCRIPTION("ChipIdea HDRC Driver");
| linux-master | drivers/usb/chipidea/core.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/phy.h>
#include <linux/usb/otg.h>
#include <linux/usb/otg-fsm.h>
#include <linux/usb/chipidea.h>
#include "ci.h"
#include "udc.h"
#include "bits.h"
#include "otg.h"
/*
* ci_device_show: prints information about device capabilities and status
*/
static int ci_device_show(struct seq_file *s, void *data)
{
struct ci_hdrc *ci = s->private;
struct usb_gadget *gadget = &ci->gadget;
seq_printf(s, "speed = %d\n", gadget->speed);
seq_printf(s, "max_speed = %d\n", gadget->max_speed);
seq_printf(s, "is_otg = %d\n", gadget->is_otg);
seq_printf(s, "is_a_peripheral = %d\n", gadget->is_a_peripheral);
seq_printf(s, "b_hnp_enable = %d\n", gadget->b_hnp_enable);
seq_printf(s, "a_hnp_support = %d\n", gadget->a_hnp_support);
seq_printf(s, "a_alt_hnp_support = %d\n", gadget->a_alt_hnp_support);
seq_printf(s, "name = %s\n",
(gadget->name ? gadget->name : ""));
if (!ci->driver)
return 0;
seq_printf(s, "gadget function = %s\n",
(ci->driver->function ? ci->driver->function : ""));
seq_printf(s, "gadget max speed = %d\n", ci->driver->max_speed);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ci_device);
/*
* ci_port_test_show: reads port test mode
*/
static int ci_port_test_show(struct seq_file *s, void *data)
{
struct ci_hdrc *ci = s->private;
unsigned long flags;
unsigned mode;
pm_runtime_get_sync(ci->dev);
spin_lock_irqsave(&ci->lock, flags);
mode = hw_port_test_get(ci);
spin_unlock_irqrestore(&ci->lock, flags);
pm_runtime_put_sync(ci->dev);
seq_printf(s, "mode = %u\n", mode);
return 0;
}
/*
* ci_port_test_write: writes port test mode
*/
static ssize_t ci_port_test_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct ci_hdrc *ci = s->private;
unsigned long flags;
unsigned mode;
char buf[32];
int ret;
count = min_t(size_t, sizeof(buf) - 1, count);
if (copy_from_user(buf, ubuf, count))
return -EFAULT;
/* sscanf requires a zero terminated string */
buf[count] = '\0';
if (sscanf(buf, "%u", &mode) != 1)
return -EINVAL;
if (mode > 255)
return -EBADRQC;
pm_runtime_get_sync(ci->dev);
spin_lock_irqsave(&ci->lock, flags);
ret = hw_port_test_set(ci, mode);
spin_unlock_irqrestore(&ci->lock, flags);
pm_runtime_put_sync(ci->dev);
return ret ? ret : count;
}
static int ci_port_test_open(struct inode *inode, struct file *file)
{
return single_open(file, ci_port_test_show, inode->i_private);
}
static const struct file_operations ci_port_test_fops = {
.open = ci_port_test_open,
.write = ci_port_test_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/*
* ci_qheads_show: DMA contents of all queue heads
*/
static int ci_qheads_show(struct seq_file *s, void *data)
{
struct ci_hdrc *ci = s->private;
unsigned long flags;
unsigned i, j;
if (ci->role != CI_ROLE_GADGET) {
seq_printf(s, "not in gadget mode\n");
return 0;
}
spin_lock_irqsave(&ci->lock, flags);
for (i = 0; i < ci->hw_ep_max/2; i++) {
struct ci_hw_ep *hweprx = &ci->ci_hw_ep[i];
struct ci_hw_ep *hweptx =
&ci->ci_hw_ep[i + ci->hw_ep_max/2];
seq_printf(s, "EP=%02i: RX=%08X TX=%08X\n",
i, (u32)hweprx->qh.dma, (u32)hweptx->qh.dma);
for (j = 0; j < (sizeof(struct ci_hw_qh)/sizeof(u32)); j++)
seq_printf(s, " %04X: %08X %08X\n", j,
*((u32 *)hweprx->qh.ptr + j),
*((u32 *)hweptx->qh.ptr + j));
}
spin_unlock_irqrestore(&ci->lock, flags);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ci_qheads);
/*
* ci_requests_show: DMA contents of all requests currently queued (all endpts)
*/
static int ci_requests_show(struct seq_file *s, void *data)
{
struct ci_hdrc *ci = s->private;
unsigned long flags;
struct ci_hw_req *req = NULL;
struct td_node *node, *tmpnode;
unsigned i, j, qsize = sizeof(struct ci_hw_td)/sizeof(u32);
if (ci->role != CI_ROLE_GADGET) {
seq_printf(s, "not in gadget mode\n");
return 0;
}
spin_lock_irqsave(&ci->lock, flags);
for (i = 0; i < ci->hw_ep_max; i++)
list_for_each_entry(req, &ci->ci_hw_ep[i].qh.queue, queue) {
list_for_each_entry_safe(node, tmpnode, &req->tds, td) {
seq_printf(s, "EP=%02i: TD=%08X %s\n",
i % (ci->hw_ep_max / 2),
(u32)node->dma,
((i < ci->hw_ep_max/2) ?
"RX" : "TX"));
for (j = 0; j < qsize; j++)
seq_printf(s, " %04X: %08X\n", j,
*((u32 *)node->ptr + j));
}
}
spin_unlock_irqrestore(&ci->lock, flags);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ci_requests);
static int ci_otg_show(struct seq_file *s, void *unused)
{
struct ci_hdrc *ci = s->private;
struct otg_fsm *fsm;
if (!ci || !ci_otg_is_fsm_mode(ci))
return 0;
fsm = &ci->fsm;
/* ------ State ----- */
seq_printf(s, "OTG state: %s\n\n",
usb_otg_state_string(ci->otg.state));
/* ------ State Machine Variables ----- */
seq_printf(s, "a_bus_drop: %d\n", fsm->a_bus_drop);
seq_printf(s, "a_bus_req: %d\n", fsm->a_bus_req);
seq_printf(s, "a_srp_det: %d\n", fsm->a_srp_det);
seq_printf(s, "a_vbus_vld: %d\n", fsm->a_vbus_vld);
seq_printf(s, "b_conn: %d\n", fsm->b_conn);
seq_printf(s, "adp_change: %d\n", fsm->adp_change);
seq_printf(s, "power_up: %d\n", fsm->power_up);
seq_printf(s, "a_bus_resume: %d\n", fsm->a_bus_resume);
seq_printf(s, "a_bus_suspend: %d\n", fsm->a_bus_suspend);
seq_printf(s, "a_conn: %d\n", fsm->a_conn);
seq_printf(s, "b_bus_req: %d\n", fsm->b_bus_req);
seq_printf(s, "b_bus_suspend: %d\n", fsm->b_bus_suspend);
seq_printf(s, "b_se0_srp: %d\n", fsm->b_se0_srp);
seq_printf(s, "b_ssend_srp: %d\n", fsm->b_ssend_srp);
seq_printf(s, "b_sess_vld: %d\n", fsm->b_sess_vld);
seq_printf(s, "b_srp_done: %d\n", fsm->b_srp_done);
seq_printf(s, "drv_vbus: %d\n", fsm->drv_vbus);
seq_printf(s, "loc_conn: %d\n", fsm->loc_conn);
seq_printf(s, "loc_sof: %d\n", fsm->loc_sof);
seq_printf(s, "adp_prb: %d\n", fsm->adp_prb);
seq_printf(s, "id: %d\n", fsm->id);
seq_printf(s, "protocol: %d\n", fsm->protocol);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ci_otg);
static int ci_registers_show(struct seq_file *s, void *unused)
{
struct ci_hdrc *ci = s->private;
u32 tmp_reg;
if (!ci || ci->in_lpm)
return -EPERM;
/* ------ Registers ----- */
tmp_reg = hw_read_intr_enable(ci);
seq_printf(s, "USBINTR reg: %08x\n", tmp_reg);
tmp_reg = hw_read_intr_status(ci);
seq_printf(s, "USBSTS reg: %08x\n", tmp_reg);
tmp_reg = hw_read(ci, OP_USBMODE, ~0);
seq_printf(s, "USBMODE reg: %08x\n", tmp_reg);
tmp_reg = hw_read(ci, OP_USBCMD, ~0);
seq_printf(s, "USBCMD reg: %08x\n", tmp_reg);
tmp_reg = hw_read(ci, OP_PORTSC, ~0);
seq_printf(s, "PORTSC reg: %08x\n", tmp_reg);
if (ci->is_otg) {
tmp_reg = hw_read_otgsc(ci, ~0);
seq_printf(s, "OTGSC reg: %08x\n", tmp_reg);
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ci_registers);
/**
* dbg_create_files: initializes the attribute interface
* @ci: device
*
* This function returns an error code
*/
void dbg_create_files(struct ci_hdrc *ci)
{
struct dentry *dir;
dir = debugfs_create_dir(dev_name(ci->dev), usb_debug_root);
debugfs_create_file("device", S_IRUGO, dir, ci, &ci_device_fops);
debugfs_create_file("port_test", S_IRUGO | S_IWUSR, dir, ci, &ci_port_test_fops);
debugfs_create_file("qheads", S_IRUGO, dir, ci, &ci_qheads_fops);
debugfs_create_file("requests", S_IRUGO, dir, ci, &ci_requests_fops);
if (ci_otg_is_fsm_mode(ci))
debugfs_create_file("otg", S_IRUGO, dir, ci, &ci_otg_fops);
debugfs_create_file("registers", S_IRUGO, dir, ci, &ci_registers_fops);
}
/**
* dbg_remove_files: destroys the attribute interface
* @ci: device
*/
void dbg_remove_files(struct ci_hdrc *ci)
{
debugfs_lookup_and_remove(dev_name(ci->dev), usb_debug_root);
}
| linux-master | drivers/usb/chipidea/debug.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016, NVIDIA Corporation
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/usb.h>
#include <linux/usb/chipidea.h>
#include <linux/usb/hcd.h>
#include <linux/usb/of.h>
#include <linux/usb/phy.h>
#include <soc/tegra/common.h>
#include "../host/ehci.h"
#include "ci.h"
struct tegra_usb {
struct ci_hdrc_platform_data data;
struct platform_device *dev;
const struct tegra_usb_soc_info *soc;
struct usb_phy *phy;
struct clk *clk;
bool needs_double_reset;
};
struct tegra_usb_soc_info {
unsigned long flags;
unsigned int txfifothresh;
enum usb_dr_mode dr_mode;
};
static const struct tegra_usb_soc_info tegra20_ehci_soc_info = {
.flags = CI_HDRC_REQUIRES_ALIGNED_DMA |
CI_HDRC_OVERRIDE_PHY_CONTROL |
CI_HDRC_SUPPORTS_RUNTIME_PM,
.dr_mode = USB_DR_MODE_HOST,
.txfifothresh = 10,
};
static const struct tegra_usb_soc_info tegra30_ehci_soc_info = {
.flags = CI_HDRC_REQUIRES_ALIGNED_DMA |
CI_HDRC_OVERRIDE_PHY_CONTROL |
CI_HDRC_SUPPORTS_RUNTIME_PM,
.dr_mode = USB_DR_MODE_HOST,
.txfifothresh = 16,
};
static const struct tegra_usb_soc_info tegra20_udc_soc_info = {
.flags = CI_HDRC_REQUIRES_ALIGNED_DMA |
CI_HDRC_OVERRIDE_PHY_CONTROL |
CI_HDRC_SUPPORTS_RUNTIME_PM,
.dr_mode = USB_DR_MODE_UNKNOWN,
.txfifothresh = 10,
};
static const struct tegra_usb_soc_info tegra30_udc_soc_info = {
.flags = CI_HDRC_REQUIRES_ALIGNED_DMA |
CI_HDRC_OVERRIDE_PHY_CONTROL |
CI_HDRC_SUPPORTS_RUNTIME_PM,
.dr_mode = USB_DR_MODE_UNKNOWN,
.txfifothresh = 16,
};
static const struct of_device_id tegra_usb_of_match[] = {
{
.compatible = "nvidia,tegra20-ehci",
.data = &tegra20_ehci_soc_info,
}, {
.compatible = "nvidia,tegra30-ehci",
.data = &tegra30_ehci_soc_info,
}, {
.compatible = "nvidia,tegra20-udc",
.data = &tegra20_udc_soc_info,
}, {
.compatible = "nvidia,tegra30-udc",
.data = &tegra30_udc_soc_info,
}, {
.compatible = "nvidia,tegra114-udc",
.data = &tegra30_udc_soc_info,
}, {
.compatible = "nvidia,tegra124-udc",
.data = &tegra30_udc_soc_info,
}, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(of, tegra_usb_of_match);
static int tegra_usb_reset_controller(struct device *dev)
{
struct reset_control *rst, *rst_utmi;
struct device_node *phy_np;
int err;
rst = devm_reset_control_get_shared(dev, "usb");
if (IS_ERR(rst)) {
dev_err(dev, "can't get ehci reset: %pe\n", rst);
return PTR_ERR(rst);
}
phy_np = of_parse_phandle(dev->of_node, "nvidia,phy", 0);
if (!phy_np)
return -ENOENT;
/*
* The 1st USB controller contains some UTMI pad registers that are
* global for all the controllers on the chip. Those registers are
* also cleared when reset is asserted to the 1st controller.
*/
rst_utmi = of_reset_control_get_shared(phy_np, "utmi-pads");
if (IS_ERR(rst_utmi)) {
dev_warn(dev, "can't get utmi-pads reset from the PHY\n");
dev_warn(dev, "continuing, but please update your DT\n");
} else {
/*
* PHY driver performs UTMI-pads reset in a case of a
* non-legacy DT.
*/
reset_control_put(rst_utmi);
}
of_node_put(phy_np);
/* reset control is shared, hence initialize it first */
err = reset_control_deassert(rst);
if (err)
return err;
err = reset_control_assert(rst);
if (err)
return err;
udelay(1);
err = reset_control_deassert(rst);
if (err)
return err;
return 0;
}
static int tegra_usb_notify_event(struct ci_hdrc *ci, unsigned int event)
{
struct tegra_usb *usb = dev_get_drvdata(ci->dev->parent);
struct ehci_hcd *ehci;
switch (event) {
case CI_HDRC_CONTROLLER_RESET_EVENT:
if (ci->hcd) {
ehci = hcd_to_ehci(ci->hcd);
ehci->has_tdi_phy_lpm = false;
ehci_writel(ehci, usb->soc->txfifothresh << 16,
&ehci->regs->txfill_tuning);
}
break;
}
return 0;
}
static int tegra_usb_internal_port_reset(struct ehci_hcd *ehci,
u32 __iomem *portsc_reg,
unsigned long *flags)
{
u32 saved_usbintr, temp;
unsigned int i, tries;
int retval = 0;
saved_usbintr = ehci_readl(ehci, &ehci->regs->intr_enable);
/* disable USB interrupt */
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
spin_unlock_irqrestore(&ehci->lock, *flags);
/*
* Here we have to do Port Reset at most twice for
* Port Enable bit to be set.
*/
for (i = 0; i < 2; i++) {
temp = ehci_readl(ehci, portsc_reg);
temp |= PORT_RESET;
ehci_writel(ehci, temp, portsc_reg);
fsleep(10000);
temp &= ~PORT_RESET;
ehci_writel(ehci, temp, portsc_reg);
fsleep(1000);
tries = 100;
do {
fsleep(1000);
/*
* Up to this point, Port Enable bit is
* expected to be set after 2 ms waiting.
* USB1 usually takes extra 45 ms, for safety,
* we take 100 ms as timeout.
*/
temp = ehci_readl(ehci, portsc_reg);
} while (!(temp & PORT_PE) && tries--);
if (temp & PORT_PE)
break;
}
if (i == 2)
retval = -ETIMEDOUT;
/*
* Clear Connect Status Change bit if it's set.
* We can't clear PORT_PEC. It will also cause PORT_PE to be cleared.
*/
if (temp & PORT_CSC)
ehci_writel(ehci, PORT_CSC, portsc_reg);
/*
* Write to clear any interrupt status bits that might be set
* during port reset.
*/
temp = ehci_readl(ehci, &ehci->regs->status);
ehci_writel(ehci, temp, &ehci->regs->status);
/* restore original interrupt-enable bits */
spin_lock_irqsave(&ehci->lock, *flags);
ehci_writel(ehci, saved_usbintr, &ehci->regs->intr_enable);
return retval;
}
static int tegra_ehci_hub_control(struct ci_hdrc *ci, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength,
bool *done, unsigned long *flags)
{
struct tegra_usb *usb = dev_get_drvdata(ci->dev->parent);
struct ehci_hcd *ehci = hcd_to_ehci(ci->hcd);
u32 __iomem *status_reg;
int retval = 0;
status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1];
switch (typeReq) {
case SetPortFeature:
if (wValue != USB_PORT_FEAT_RESET || !usb->needs_double_reset)
break;
/* for USB1 port we need to issue Port Reset twice internally */
retval = tegra_usb_internal_port_reset(ehci, status_reg, flags);
*done = true;
break;
}
return retval;
}
static void tegra_usb_enter_lpm(struct ci_hdrc *ci, bool enable)
{
/*
* Touching any register which belongs to AHB clock domain will
* hang CPU if USB controller is put into low power mode because
* AHB USB clock is gated on Tegra in the LPM.
*
* Tegra PHY has a separate register for checking the clock status
* and usb_phy_set_suspend() takes care of gating/ungating the clocks
* and restoring the PHY state on Tegra. Hence DEVLC/PORTSC registers
* shouldn't be touched directly by the CI driver.
*/
usb_phy_set_suspend(ci->usb_phy, enable);
}
static int tegra_usb_probe(struct platform_device *pdev)
{
const struct tegra_usb_soc_info *soc;
struct tegra_usb *usb;
int err;
usb = devm_kzalloc(&pdev->dev, sizeof(*usb), GFP_KERNEL);
if (!usb)
return -ENOMEM;
platform_set_drvdata(pdev, usb);
soc = of_device_get_match_data(&pdev->dev);
if (!soc) {
dev_err(&pdev->dev, "failed to match OF data\n");
return -EINVAL;
}
usb->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0);
if (IS_ERR(usb->phy))
return dev_err_probe(&pdev->dev, PTR_ERR(usb->phy),
"failed to get PHY\n");
usb->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(usb->clk)) {
err = PTR_ERR(usb->clk);
dev_err(&pdev->dev, "failed to get clock: %d\n", err);
return err;
}
err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (err)
return err;
pm_runtime_enable(&pdev->dev);
err = pm_runtime_resume_and_get(&pdev->dev);
if (err)
return err;
if (device_property_present(&pdev->dev, "nvidia,needs-double-reset"))
usb->needs_double_reset = true;
err = tegra_usb_reset_controller(&pdev->dev);
if (err) {
dev_err(&pdev->dev, "failed to reset controller: %d\n", err);
goto fail_power_off;
}
/*
* USB controller registers shouldn't be touched before PHY is
* initialized, otherwise CPU will hang because clocks are gated.
* PHY driver controls gating of internal USB clocks on Tegra.
*/
err = usb_phy_init(usb->phy);
if (err)
goto fail_power_off;
/* setup and register ChipIdea HDRC device */
usb->soc = soc;
usb->data.name = "tegra-usb";
usb->data.flags = soc->flags;
usb->data.usb_phy = usb->phy;
usb->data.dr_mode = soc->dr_mode;
usb->data.capoffset = DEF_CAPOFFSET;
usb->data.enter_lpm = tegra_usb_enter_lpm;
usb->data.hub_control = tegra_ehci_hub_control;
usb->data.notify_event = tegra_usb_notify_event;
/* Tegra PHY driver currently doesn't support LPM for ULPI */
if (of_usb_get_phy_mode(pdev->dev.of_node) == USBPHY_INTERFACE_MODE_ULPI)
usb->data.flags &= ~CI_HDRC_SUPPORTS_RUNTIME_PM;
usb->dev = ci_hdrc_add_device(&pdev->dev, pdev->resource,
pdev->num_resources, &usb->data);
if (IS_ERR(usb->dev)) {
err = PTR_ERR(usb->dev);
dev_err(&pdev->dev, "failed to add HDRC device: %d\n", err);
goto phy_shutdown;
}
return 0;
phy_shutdown:
usb_phy_shutdown(usb->phy);
fail_power_off:
pm_runtime_put_sync_suspend(&pdev->dev);
pm_runtime_force_suspend(&pdev->dev);
return err;
}
static void tegra_usb_remove(struct platform_device *pdev)
{
struct tegra_usb *usb = platform_get_drvdata(pdev);
ci_hdrc_remove_device(usb->dev);
usb_phy_shutdown(usb->phy);
pm_runtime_put_sync_suspend(&pdev->dev);
pm_runtime_force_suspend(&pdev->dev);
}
static int __maybe_unused tegra_usb_runtime_resume(struct device *dev)
{
struct tegra_usb *usb = dev_get_drvdata(dev);
int err;
err = clk_prepare_enable(usb->clk);
if (err < 0) {
dev_err(dev, "failed to enable clock: %d\n", err);
return err;
}
return 0;
}
static int __maybe_unused tegra_usb_runtime_suspend(struct device *dev)
{
struct tegra_usb *usb = dev_get_drvdata(dev);
clk_disable_unprepare(usb->clk);
return 0;
}
static const struct dev_pm_ops tegra_usb_pm = {
SET_RUNTIME_PM_OPS(tegra_usb_runtime_suspend, tegra_usb_runtime_resume,
NULL)
};
static struct platform_driver tegra_usb_driver = {
.driver = {
.name = "tegra-usb",
.of_match_table = tegra_usb_of_match,
.pm = &tegra_usb_pm,
},
.probe = tegra_usb_probe,
.remove_new = tegra_usb_remove,
};
module_platform_driver(tegra_usb_driver);
MODULE_DESCRIPTION("NVIDIA Tegra USB driver");
MODULE_AUTHOR("Thierry Reding <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/chipidea/ci_hdrc_tegra.c |
// SPDX-License-Identifier: GPL-2.0
/*
* host.c - ChipIdea USB host controller driver
*
* Copyright (c) 2012 Intel Corporation
*
* Author: Alexander Shishkin
*/
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/chipidea.h>
#include <linux/regulator/consumer.h>
#include <linux/pinctrl/consumer.h>
#include "../host/ehci.h"
#include "ci.h"
#include "bits.h"
#include "host.h"
static struct hc_driver __read_mostly ci_ehci_hc_driver;
static int (*orig_bus_suspend)(struct usb_hcd *hcd);
struct ehci_ci_priv {
struct regulator *reg_vbus;
bool enabled;
};
struct ci_hdrc_dma_aligned_buffer {
void *kmalloc_ptr;
void *old_xfer_buffer;
u8 data[];
};
static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct ehci_ci_priv *priv = (struct ehci_ci_priv *)ehci->priv;
struct device *dev = hcd->self.controller;
struct ci_hdrc *ci = dev_get_drvdata(dev);
int ret = 0;
int port = HCS_N_PORTS(ehci->hcs_params);
if (priv->reg_vbus && enable != priv->enabled) {
if (port > 1) {
dev_warn(dev,
"Not support multi-port regulator control\n");
return 0;
}
if (enable)
ret = regulator_enable(priv->reg_vbus);
else
ret = regulator_disable(priv->reg_vbus);
if (ret) {
dev_err(dev,
"Failed to %s vbus regulator, ret=%d\n",
enable ? "enable" : "disable", ret);
return ret;
}
priv->enabled = enable;
}
if (ci->platdata->flags & CI_HDRC_PHY_VBUS_CONTROL) {
if (enable)
usb_phy_vbus_on(ci->usb_phy);
else
usb_phy_vbus_off(ci->usb_phy);
}
if (enable && (ci->platdata->phy_mode == USBPHY_INTERFACE_MODE_HSIC)) {
/*
* Marvell 28nm HSIC PHY requires forcing the port to HS mode.
* As HSIC is always HS, this should be safe for others.
*/
hw_port_test_set(ci, 5);
hw_port_test_set(ci, 0);
}
return 0;
};
static int ehci_ci_reset(struct usb_hcd *hcd)
{
struct device *dev = hcd->self.controller;
struct ci_hdrc *ci = dev_get_drvdata(dev);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int ret;
ret = ehci_setup(hcd);
if (ret)
return ret;
ehci->need_io_watchdog = 0;
if (ci->platdata->notify_event) {
ret = ci->platdata->notify_event(ci,
CI_HDRC_CONTROLLER_RESET_EVENT);
if (ret)
return ret;
}
ci_platform_configure(ci);
return ret;
}
static const struct ehci_driver_overrides ehci_ci_overrides = {
.extra_priv_size = sizeof(struct ehci_ci_priv),
.port_power = ehci_ci_portpower,
.reset = ehci_ci_reset,
};
static irqreturn_t host_irq(struct ci_hdrc *ci)
{
return usb_hcd_irq(ci->irq, ci->hcd);
}
static int host_start(struct ci_hdrc *ci)
{
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
struct ehci_ci_priv *priv;
int ret;
if (usb_disabled())
return -ENODEV;
hcd = __usb_create_hcd(&ci_ehci_hc_driver, ci->dev->parent,
ci->dev, dev_name(ci->dev), NULL);
if (!hcd)
return -ENOMEM;
dev_set_drvdata(ci->dev, ci);
hcd->rsrc_start = ci->hw_bank.phys;
hcd->rsrc_len = ci->hw_bank.size;
hcd->regs = ci->hw_bank.abs;
hcd->has_tt = 1;
hcd->power_budget = ci->platdata->power_budget;
hcd->tpl_support = ci->platdata->tpl_support;
if (ci->phy || ci->usb_phy) {
hcd->skip_phy_initialization = 1;
if (ci->usb_phy)
hcd->usb_phy = ci->usb_phy;
}
ehci = hcd_to_ehci(hcd);
ehci->caps = ci->hw_bank.cap;
ehci->has_hostpc = ci->hw_bank.lpm;
ehci->has_tdi_phy_lpm = ci->hw_bank.lpm;
ehci->imx28_write_fix = ci->imx28_write_fix;
ehci->has_ci_pec_bug = ci->has_portsc_pec_bug;
priv = (struct ehci_ci_priv *)ehci->priv;
priv->reg_vbus = NULL;
if (ci->platdata->reg_vbus && !ci_otg_is_fsm_mode(ci)) {
if (ci->platdata->flags & CI_HDRC_TURN_VBUS_EARLY_ON) {
ret = regulator_enable(ci->platdata->reg_vbus);
if (ret) {
dev_err(ci->dev,
"Failed to enable vbus regulator, ret=%d\n",
ret);
goto put_hcd;
}
} else {
priv->reg_vbus = ci->platdata->reg_vbus;
}
}
if (ci->platdata->pins_host)
pinctrl_select_state(ci->platdata->pctl,
ci->platdata->pins_host);
ci->hcd = hcd;
ret = usb_add_hcd(hcd, 0, 0);
if (ret) {
ci->hcd = NULL;
goto disable_reg;
} else {
struct usb_otg *otg = &ci->otg;
if (ci_otg_is_fsm_mode(ci)) {
otg->host = &hcd->self;
hcd->self.otg_port = 1;
}
if (ci->platdata->notify_event &&
(ci->platdata->flags & CI_HDRC_IMX_IS_HSIC))
ci->platdata->notify_event
(ci, CI_HDRC_IMX_HSIC_ACTIVE_EVENT);
}
return ret;
disable_reg:
if (ci->platdata->reg_vbus && !ci_otg_is_fsm_mode(ci) &&
(ci->platdata->flags & CI_HDRC_TURN_VBUS_EARLY_ON))
regulator_disable(ci->platdata->reg_vbus);
put_hcd:
usb_put_hcd(hcd);
return ret;
}
static void host_stop(struct ci_hdrc *ci)
{
struct usb_hcd *hcd = ci->hcd;
if (hcd) {
if (ci->platdata->notify_event)
ci->platdata->notify_event(ci,
CI_HDRC_CONTROLLER_STOPPED_EVENT);
usb_remove_hcd(hcd);
ci->role = CI_ROLE_END;
synchronize_irq(ci->irq);
usb_put_hcd(hcd);
if (ci->platdata->reg_vbus && !ci_otg_is_fsm_mode(ci) &&
(ci->platdata->flags & CI_HDRC_TURN_VBUS_EARLY_ON))
regulator_disable(ci->platdata->reg_vbus);
}
ci->hcd = NULL;
ci->otg.host = NULL;
if (ci->platdata->pins_host && ci->platdata->pins_default)
pinctrl_select_state(ci->platdata->pctl,
ci->platdata->pins_default);
}
void ci_hdrc_host_destroy(struct ci_hdrc *ci)
{
if (ci->role == CI_ROLE_HOST && ci->hcd)
host_stop(ci);
}
/* The below code is based on tegra ehci driver */
static int ci_ehci_hub_control(
struct usb_hcd *hcd,
u16 typeReq,
u16 wValue,
u16 wIndex,
char *buf,
u16 wLength
)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
unsigned int ports = HCS_N_PORTS(ehci->hcs_params);
u32 __iomem *status_reg;
u32 temp, port_index;
unsigned long flags;
int retval = 0;
bool done = false;
struct device *dev = hcd->self.controller;
struct ci_hdrc *ci = dev_get_drvdata(dev);
port_index = wIndex & 0xff;
port_index -= (port_index > 0);
status_reg = &ehci->regs->port_status[port_index];
spin_lock_irqsave(&ehci->lock, flags);
if (ci->platdata->hub_control) {
retval = ci->platdata->hub_control(ci, typeReq, wValue, wIndex,
buf, wLength, &done, &flags);
if (done)
goto done;
}
if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
if (!wIndex || wIndex > ports) {
retval = -EPIPE;
goto done;
}
temp = ehci_readl(ehci, status_reg);
if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
retval = -EPIPE;
goto done;
}
temp &= ~(PORT_RWC_BITS | PORT_WKCONN_E);
temp |= PORT_WKDISC_E | PORT_WKOC_E;
ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
/*
* If a transaction is in progress, there may be a delay in
* suspending the port. Poll until the port is suspended.
*/
if (ehci_handshake(ehci, status_reg, PORT_SUSPEND,
PORT_SUSPEND, 5000))
ehci_err(ehci, "timeout waiting for SUSPEND\n");
if (ci->platdata->flags & CI_HDRC_IMX_IS_HSIC) {
if (ci->platdata->notify_event)
ci->platdata->notify_event(ci,
CI_HDRC_IMX_HSIC_SUSPEND_EVENT);
temp = ehci_readl(ehci, status_reg);
temp &= ~(PORT_WKDISC_E | PORT_WKCONN_E);
ehci_writel(ehci, temp, status_reg);
}
set_bit(port_index, &ehci->suspended_ports);
goto done;
}
/*
* After resume has finished, it needs do some post resume
* operation for some SoCs.
*/
else if (typeReq == ClearPortFeature &&
wValue == USB_PORT_FEAT_C_SUSPEND) {
/* Make sure the resume has finished, it should be finished */
if (ehci_handshake(ehci, status_reg, PORT_RESUME, 0, 25000))
ehci_err(ehci, "timeout waiting for resume\n");
}
spin_unlock_irqrestore(&ehci->lock, flags);
/* Handle the hub control events here */
return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
done:
spin_unlock_irqrestore(&ehci->lock, flags);
return retval;
}
static int ci_ehci_bus_suspend(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct device *dev = hcd->self.controller;
struct ci_hdrc *ci = dev_get_drvdata(dev);
int port;
u32 tmp;
int ret = orig_bus_suspend(hcd);
if (ret)
return ret;
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *reg = &ehci->regs->port_status[port];
u32 portsc = ehci_readl(ehci, reg);
if (portsc & PORT_CONNECT) {
/*
* For chipidea, the resume signal will be ended
* automatically, so for remote wakeup case, the
* usbcmd.rs may not be set before the resume has
* ended if other resume paths consumes too much
* time (~24ms), in that case, the SOF will not
* send out within 3ms after resume ends, then the
* high speed device will enter full speed mode.
*/
tmp = ehci_readl(ehci, &ehci->regs->command);
tmp |= CMD_RUN;
ehci_writel(ehci, tmp, &ehci->regs->command);
/*
* It needs a short delay between set RS bit and PHCD.
*/
usleep_range(150, 200);
/*
* Need to clear WKCN and WKOC for imx HSIC,
* otherwise, there will be wakeup event.
*/
if (ci->platdata->flags & CI_HDRC_IMX_IS_HSIC) {
tmp = ehci_readl(ehci, reg);
tmp &= ~(PORT_WKDISC_E | PORT_WKCONN_E);
ehci_writel(ehci, tmp, reg);
}
break;
}
}
return 0;
}
static void ci_hdrc_free_dma_aligned_buffer(struct urb *urb)
{
struct ci_hdrc_dma_aligned_buffer *temp;
size_t length;
if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
return;
temp = container_of(urb->transfer_buffer,
struct ci_hdrc_dma_aligned_buffer, data);
if (usb_urb_dir_in(urb)) {
if (usb_pipeisoc(urb->pipe))
length = urb->transfer_buffer_length;
else
length = urb->actual_length;
memcpy(temp->old_xfer_buffer, temp->data, length);
}
urb->transfer_buffer = temp->old_xfer_buffer;
kfree(temp->kmalloc_ptr);
urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
}
static int ci_hdrc_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
{
struct ci_hdrc_dma_aligned_buffer *temp, *kmalloc_ptr;
const unsigned int ci_hdrc_usb_dma_align = 32;
size_t kmalloc_size;
if (urb->num_sgs || urb->sg || urb->transfer_buffer_length == 0 ||
!((uintptr_t)urb->transfer_buffer & (ci_hdrc_usb_dma_align - 1)))
return 0;
/* Allocate a buffer with enough padding for alignment */
kmalloc_size = urb->transfer_buffer_length +
sizeof(struct ci_hdrc_dma_aligned_buffer) +
ci_hdrc_usb_dma_align - 1;
kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
if (!kmalloc_ptr)
return -ENOMEM;
/* Position our struct dma_aligned_buffer such that data is aligned */
temp = PTR_ALIGN(kmalloc_ptr + 1, ci_hdrc_usb_dma_align) - 1;
temp->kmalloc_ptr = kmalloc_ptr;
temp->old_xfer_buffer = urb->transfer_buffer;
if (usb_urb_dir_out(urb))
memcpy(temp->data, urb->transfer_buffer,
urb->transfer_buffer_length);
urb->transfer_buffer = temp->data;
urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
return 0;
}
static int ci_hdrc_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
int ret;
ret = ci_hdrc_alloc_dma_aligned_buffer(urb, mem_flags);
if (ret)
return ret;
ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
if (ret)
ci_hdrc_free_dma_aligned_buffer(urb);
return ret;
}
static void ci_hdrc_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
{
usb_hcd_unmap_urb_for_dma(hcd, urb);
ci_hdrc_free_dma_aligned_buffer(urb);
}
#ifdef CONFIG_PM_SLEEP
static void ci_hdrc_host_suspend(struct ci_hdrc *ci)
{
ehci_suspend(ci->hcd, device_may_wakeup(ci->dev));
}
static void ci_hdrc_host_resume(struct ci_hdrc *ci, bool power_lost)
{
ehci_resume(ci->hcd, power_lost);
}
#endif
int ci_hdrc_host_init(struct ci_hdrc *ci)
{
struct ci_role_driver *rdrv;
if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_HC))
return -ENXIO;
rdrv = devm_kzalloc(ci->dev, sizeof(struct ci_role_driver), GFP_KERNEL);
if (!rdrv)
return -ENOMEM;
rdrv->start = host_start;
rdrv->stop = host_stop;
#ifdef CONFIG_PM_SLEEP
rdrv->suspend = ci_hdrc_host_suspend;
rdrv->resume = ci_hdrc_host_resume;
#endif
rdrv->irq = host_irq;
rdrv->name = "host";
ci->roles[CI_ROLE_HOST] = rdrv;
if (ci->platdata->flags & CI_HDRC_REQUIRES_ALIGNED_DMA) {
ci_ehci_hc_driver.map_urb_for_dma = ci_hdrc_map_urb_for_dma;
ci_ehci_hc_driver.unmap_urb_for_dma = ci_hdrc_unmap_urb_for_dma;
}
return 0;
}
void ci_hdrc_host_driver_init(void)
{
ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides);
orig_bus_suspend = ci_ehci_hc_driver.bus_suspend;
ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend;
ci_ehci_hc_driver.hub_control = ci_ehci_hub_control;
}
| linux-master | drivers/usb/chipidea/host.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2012 Freescale Semiconductor, Inc.
* Copyright (C) 2012 Marek Vasut <[email protected]>
* on behalf of DENX Software Engineering GmbH
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/usb/chipidea.h>
#include <linux/usb/of.h>
#include <linux/clk.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_qos.h>
#include "ci.h"
#include "ci_hdrc_imx.h"
struct ci_hdrc_imx_platform_flag {
unsigned int flags;
};
static const struct ci_hdrc_imx_platform_flag imx23_usb_data = {
.flags = CI_HDRC_TURN_VBUS_EARLY_ON |
CI_HDRC_DISABLE_STREAMING,
};
static const struct ci_hdrc_imx_platform_flag imx27_usb_data = {
.flags = CI_HDRC_DISABLE_STREAMING,
};
static const struct ci_hdrc_imx_platform_flag imx28_usb_data = {
.flags = CI_HDRC_IMX28_WRITE_FIX |
CI_HDRC_TURN_VBUS_EARLY_ON |
CI_HDRC_DISABLE_STREAMING,
};
static const struct ci_hdrc_imx_platform_flag imx6q_usb_data = {
.flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
CI_HDRC_TURN_VBUS_EARLY_ON |
CI_HDRC_DISABLE_STREAMING,
};
static const struct ci_hdrc_imx_platform_flag imx6sl_usb_data = {
.flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
CI_HDRC_TURN_VBUS_EARLY_ON |
CI_HDRC_DISABLE_HOST_STREAMING,
};
static const struct ci_hdrc_imx_platform_flag imx6sx_usb_data = {
.flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
CI_HDRC_TURN_VBUS_EARLY_ON |
CI_HDRC_DISABLE_HOST_STREAMING,
};
static const struct ci_hdrc_imx_platform_flag imx6ul_usb_data = {
.flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
CI_HDRC_TURN_VBUS_EARLY_ON |
CI_HDRC_DISABLE_DEVICE_STREAMING,
};
static const struct ci_hdrc_imx_platform_flag imx7d_usb_data = {
.flags = CI_HDRC_SUPPORTS_RUNTIME_PM,
};
static const struct ci_hdrc_imx_platform_flag imx7ulp_usb_data = {
.flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
CI_HDRC_HAS_PORTSC_PEC_MISSED |
CI_HDRC_PMQOS,
};
static const struct ci_hdrc_imx_platform_flag imx8ulp_usb_data = {
.flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
CI_HDRC_HAS_PORTSC_PEC_MISSED,
};
static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
{ .compatible = "fsl,imx23-usb", .data = &imx23_usb_data},
{ .compatible = "fsl,imx28-usb", .data = &imx28_usb_data},
{ .compatible = "fsl,imx27-usb", .data = &imx27_usb_data},
{ .compatible = "fsl,imx6q-usb", .data = &imx6q_usb_data},
{ .compatible = "fsl,imx6sl-usb", .data = &imx6sl_usb_data},
{ .compatible = "fsl,imx6sx-usb", .data = &imx6sx_usb_data},
{ .compatible = "fsl,imx6ul-usb", .data = &imx6ul_usb_data},
{ .compatible = "fsl,imx7d-usb", .data = &imx7d_usb_data},
{ .compatible = "fsl,imx7ulp-usb", .data = &imx7ulp_usb_data},
{ .compatible = "fsl,imx8ulp-usb", .data = &imx8ulp_usb_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
struct ci_hdrc_imx_data {
struct usb_phy *phy;
struct platform_device *ci_pdev;
struct clk *clk;
struct imx_usbmisc_data *usbmisc_data;
bool supports_runtime_pm;
bool override_phy_control;
bool in_lpm;
struct pinctrl *pinctrl;
struct pinctrl_state *pinctrl_hsic_active;
struct regulator *hsic_pad_regulator;
/* SoC before i.mx6 (except imx23/imx28) needs three clks */
bool need_three_clks;
struct clk *clk_ipg;
struct clk *clk_ahb;
struct clk *clk_per;
/* --------------------------------- */
struct pm_qos_request pm_qos_req;
const struct ci_hdrc_imx_platform_flag *plat_data;
};
/* Common functions shared by usbmisc drivers */
static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
{
struct platform_device *misc_pdev;
struct device_node *np = dev->of_node;
struct of_phandle_args args;
struct imx_usbmisc_data *data;
int ret;
/*
* In case the fsl,usbmisc property is not present this device doesn't
* need usbmisc. Return NULL (which is no error here)
*/
if (!of_get_property(np, "fsl,usbmisc", NULL))
return NULL;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
ret = of_parse_phandle_with_args(np, "fsl,usbmisc", "#index-cells",
0, &args);
if (ret) {
dev_err(dev, "Failed to parse property fsl,usbmisc, errno %d\n",
ret);
return ERR_PTR(ret);
}
data->index = args.args[0];
misc_pdev = of_find_device_by_node(args.np);
of_node_put(args.np);
if (!misc_pdev)
return ERR_PTR(-EPROBE_DEFER);
if (!platform_get_drvdata(misc_pdev)) {
put_device(&misc_pdev->dev);
return ERR_PTR(-EPROBE_DEFER);
}
data->dev = &misc_pdev->dev;
/*
* Check the various over current related properties. If over current
* detection is disabled we're not interested in the polarity.
*/
if (of_property_read_bool(np, "disable-over-current")) {
data->disable_oc = 1;
} else if (of_property_read_bool(np, "over-current-active-high")) {
data->oc_pol_active_low = 0;
data->oc_pol_configured = 1;
} else if (of_property_read_bool(np, "over-current-active-low")) {
data->oc_pol_active_low = 1;
data->oc_pol_configured = 1;
} else {
dev_warn(dev, "No over current polarity defined\n");
}
data->pwr_pol = of_property_read_bool(np, "power-active-high");
data->evdo = of_property_read_bool(np, "external-vbus-divider");
if (of_usb_get_phy_mode(np) == USBPHY_INTERFACE_MODE_ULPI)
data->ulpi = 1;
if (of_property_read_u32(np, "samsung,picophy-pre-emp-curr-control",
&data->emp_curr_control))
data->emp_curr_control = -1;
if (of_property_read_u32(np, "samsung,picophy-dc-vol-level-adjust",
&data->dc_vol_level_adjust))
data->dc_vol_level_adjust = -1;
if (of_property_read_u32(np, "fsl,picophy-rise-fall-time-adjust",
&data->rise_fall_time_adjust))
data->rise_fall_time_adjust = -1;
return data;
}
/* End of common functions shared by usbmisc drivers*/
static int imx_get_clks(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret = 0;
data->clk_ipg = devm_clk_get(dev, "ipg");
if (IS_ERR(data->clk_ipg)) {
/* If the platform only needs one clocks */
data->clk = devm_clk_get(dev, NULL);
if (IS_ERR(data->clk)) {
ret = PTR_ERR(data->clk);
dev_err(dev,
"Failed to get clks, err=%ld,%ld\n",
PTR_ERR(data->clk), PTR_ERR(data->clk_ipg));
return ret;
}
return ret;
}
data->clk_ahb = devm_clk_get(dev, "ahb");
if (IS_ERR(data->clk_ahb)) {
ret = PTR_ERR(data->clk_ahb);
dev_err(dev,
"Failed to get ahb clock, err=%d\n", ret);
return ret;
}
data->clk_per = devm_clk_get(dev, "per");
if (IS_ERR(data->clk_per)) {
ret = PTR_ERR(data->clk_per);
dev_err(dev,
"Failed to get per clock, err=%d\n", ret);
return ret;
}
data->need_three_clks = true;
return ret;
}
static int imx_prepare_enable_clks(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret = 0;
if (data->need_three_clks) {
ret = clk_prepare_enable(data->clk_ipg);
if (ret) {
dev_err(dev,
"Failed to prepare/enable ipg clk, err=%d\n",
ret);
return ret;
}
ret = clk_prepare_enable(data->clk_ahb);
if (ret) {
dev_err(dev,
"Failed to prepare/enable ahb clk, err=%d\n",
ret);
clk_disable_unprepare(data->clk_ipg);
return ret;
}
ret = clk_prepare_enable(data->clk_per);
if (ret) {
dev_err(dev,
"Failed to prepare/enable per clk, err=%d\n",
ret);
clk_disable_unprepare(data->clk_ahb);
clk_disable_unprepare(data->clk_ipg);
return ret;
}
} else {
ret = clk_prepare_enable(data->clk);
if (ret) {
dev_err(dev,
"Failed to prepare/enable clk, err=%d\n",
ret);
return ret;
}
}
return ret;
}
static void imx_disable_unprepare_clks(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
if (data->need_three_clks) {
clk_disable_unprepare(data->clk_per);
clk_disable_unprepare(data->clk_ahb);
clk_disable_unprepare(data->clk_ipg);
} else {
clk_disable_unprepare(data->clk);
}
}
static int ci_hdrc_imx_notify_event(struct ci_hdrc *ci, unsigned int event)
{
struct device *dev = ci->dev->parent;
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret = 0;
struct imx_usbmisc_data *mdata = data->usbmisc_data;
switch (event) {
case CI_HDRC_IMX_HSIC_ACTIVE_EVENT:
if (data->pinctrl) {
ret = pinctrl_select_state(data->pinctrl,
data->pinctrl_hsic_active);
if (ret)
dev_err(dev,
"hsic_active select failed, err=%d\n",
ret);
}
break;
case CI_HDRC_IMX_HSIC_SUSPEND_EVENT:
ret = imx_usbmisc_hsic_set_connect(mdata);
if (ret)
dev_err(dev,
"hsic_set_connect failed, err=%d\n", ret);
break;
case CI_HDRC_CONTROLLER_VBUS_EVENT:
if (ci->vbus_active)
ret = imx_usbmisc_charger_detection(mdata, true);
else
ret = imx_usbmisc_charger_detection(mdata, false);
if (ci->usb_phy)
schedule_work(&ci->usb_phy->chg_work);
break;
default:
break;
}
return ret;
}
static int ci_hdrc_imx_probe(struct platform_device *pdev)
{
struct ci_hdrc_imx_data *data;
struct ci_hdrc_platform_data pdata = {
.name = dev_name(&pdev->dev),
.capoffset = DEF_CAPOFFSET,
.notify_event = ci_hdrc_imx_notify_event,
};
int ret;
const struct ci_hdrc_imx_platform_flag *imx_platform_flag;
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
imx_platform_flag = of_device_get_match_data(&pdev->dev);
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->plat_data = imx_platform_flag;
pdata.flags |= imx_platform_flag->flags;
platform_set_drvdata(pdev, data);
data->usbmisc_data = usbmisc_get_init_data(dev);
if (IS_ERR(data->usbmisc_data))
return PTR_ERR(data->usbmisc_data);
if ((of_usb_get_phy_mode(dev->of_node) == USBPHY_INTERFACE_MODE_HSIC)
&& data->usbmisc_data) {
pdata.flags |= CI_HDRC_IMX_IS_HSIC;
data->usbmisc_data->hsic = 1;
data->pinctrl = devm_pinctrl_get(dev);
if (PTR_ERR(data->pinctrl) == -ENODEV)
data->pinctrl = NULL;
else if (IS_ERR(data->pinctrl))
return dev_err_probe(dev, PTR_ERR(data->pinctrl),
"pinctrl get failed\n");
data->hsic_pad_regulator =
devm_regulator_get_optional(dev, "hsic");
if (PTR_ERR(data->hsic_pad_regulator) == -ENODEV) {
/* no pad regulator is needed */
data->hsic_pad_regulator = NULL;
} else if (IS_ERR(data->hsic_pad_regulator))
return dev_err_probe(dev, PTR_ERR(data->hsic_pad_regulator),
"Get HSIC pad regulator error\n");
if (data->hsic_pad_regulator) {
ret = regulator_enable(data->hsic_pad_regulator);
if (ret) {
dev_err(dev,
"Failed to enable HSIC pad regulator\n");
return ret;
}
}
}
/* HSIC pinctrl handling */
if (data->pinctrl) {
struct pinctrl_state *pinctrl_hsic_idle;
pinctrl_hsic_idle = pinctrl_lookup_state(data->pinctrl, "idle");
if (IS_ERR(pinctrl_hsic_idle)) {
dev_err(dev,
"pinctrl_hsic_idle lookup failed, err=%ld\n",
PTR_ERR(pinctrl_hsic_idle));
return PTR_ERR(pinctrl_hsic_idle);
}
ret = pinctrl_select_state(data->pinctrl, pinctrl_hsic_idle);
if (ret) {
dev_err(dev, "hsic_idle select failed, err=%d\n", ret);
return ret;
}
data->pinctrl_hsic_active = pinctrl_lookup_state(data->pinctrl,
"active");
if (IS_ERR(data->pinctrl_hsic_active)) {
dev_err(dev,
"pinctrl_hsic_active lookup failed, err=%ld\n",
PTR_ERR(data->pinctrl_hsic_active));
return PTR_ERR(data->pinctrl_hsic_active);
}
}
if (pdata.flags & CI_HDRC_PMQOS)
cpu_latency_qos_add_request(&data->pm_qos_req, 0);
ret = imx_get_clks(dev);
if (ret)
goto disable_hsic_regulator;
ret = imx_prepare_enable_clks(dev);
if (ret)
goto disable_hsic_regulator;
data->phy = devm_usb_get_phy_by_phandle(dev, "fsl,usbphy", 0);
if (IS_ERR(data->phy)) {
ret = PTR_ERR(data->phy);
if (ret != -ENODEV) {
dev_err_probe(dev, ret, "Failed to parse fsl,usbphy\n");
goto err_clk;
}
data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0);
if (IS_ERR(data->phy)) {
ret = PTR_ERR(data->phy);
if (ret == -ENODEV) {
data->phy = NULL;
} else {
dev_err_probe(dev, ret, "Failed to parse phys\n");
goto err_clk;
}
}
}
pdata.usb_phy = data->phy;
if (data->usbmisc_data)
data->usbmisc_data->usb_phy = data->phy;
if ((of_device_is_compatible(np, "fsl,imx53-usb") ||
of_device_is_compatible(np, "fsl,imx51-usb")) && pdata.usb_phy &&
of_usb_get_phy_mode(np) == USBPHY_INTERFACE_MODE_ULPI) {
pdata.flags |= CI_HDRC_OVERRIDE_PHY_CONTROL;
data->override_phy_control = true;
usb_phy_init(pdata.usb_phy);
}
if (pdata.flags & CI_HDRC_SUPPORTS_RUNTIME_PM)
data->supports_runtime_pm = true;
ret = imx_usbmisc_init(data->usbmisc_data);
if (ret) {
dev_err(dev, "usbmisc init failed, ret=%d\n", ret);
goto err_clk;
}
data->ci_pdev = ci_hdrc_add_device(dev,
pdev->resource, pdev->num_resources,
&pdata);
if (IS_ERR(data->ci_pdev)) {
ret = PTR_ERR(data->ci_pdev);
dev_err_probe(dev, ret, "ci_hdrc_add_device failed\n");
goto err_clk;
}
if (data->usbmisc_data) {
if (!IS_ERR(pdata.id_extcon.edev) ||
of_property_read_bool(np, "usb-role-switch"))
data->usbmisc_data->ext_id = 1;
if (!IS_ERR(pdata.vbus_extcon.edev) ||
of_property_read_bool(np, "usb-role-switch"))
data->usbmisc_data->ext_vbus = 1;
/* usbmisc needs to know dr mode to choose wakeup setting */
data->usbmisc_data->available_role =
ci_hdrc_query_available_role(data->ci_pdev);
}
ret = imx_usbmisc_init_post(data->usbmisc_data);
if (ret) {
dev_err(dev, "usbmisc post failed, ret=%d\n", ret);
goto disable_device;
}
if (data->supports_runtime_pm) {
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
device_set_wakeup_capable(dev, true);
return 0;
disable_device:
ci_hdrc_remove_device(data->ci_pdev);
err_clk:
imx_disable_unprepare_clks(dev);
disable_hsic_regulator:
if (data->hsic_pad_regulator)
/* don't overwrite original ret (cf. EPROBE_DEFER) */
regulator_disable(data->hsic_pad_regulator);
if (pdata.flags & CI_HDRC_PMQOS)
cpu_latency_qos_remove_request(&data->pm_qos_req);
data->ci_pdev = NULL;
return ret;
}
static void ci_hdrc_imx_remove(struct platform_device *pdev)
{
struct ci_hdrc_imx_data *data = platform_get_drvdata(pdev);
if (data->supports_runtime_pm) {
pm_runtime_get_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
}
if (data->ci_pdev)
ci_hdrc_remove_device(data->ci_pdev);
if (data->override_phy_control)
usb_phy_shutdown(data->phy);
if (data->ci_pdev) {
imx_disable_unprepare_clks(&pdev->dev);
if (data->plat_data->flags & CI_HDRC_PMQOS)
cpu_latency_qos_remove_request(&data->pm_qos_req);
if (data->hsic_pad_regulator)
regulator_disable(data->hsic_pad_regulator);
}
}
static void ci_hdrc_imx_shutdown(struct platform_device *pdev)
{
ci_hdrc_imx_remove(pdev);
}
static int __maybe_unused imx_controller_suspend(struct device *dev,
pm_message_t msg)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret = 0;
dev_dbg(dev, "at %s\n", __func__);
ret = imx_usbmisc_suspend(data->usbmisc_data,
PMSG_IS_AUTO(msg) || device_may_wakeup(dev));
if (ret) {
dev_err(dev,
"usbmisc suspend failed, ret=%d\n", ret);
return ret;
}
imx_disable_unprepare_clks(dev);
if (data->plat_data->flags & CI_HDRC_PMQOS)
cpu_latency_qos_remove_request(&data->pm_qos_req);
data->in_lpm = true;
return 0;
}
static int __maybe_unused imx_controller_resume(struct device *dev,
pm_message_t msg)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret = 0;
dev_dbg(dev, "at %s\n", __func__);
if (!data->in_lpm) {
WARN_ON(1);
return 0;
}
if (data->plat_data->flags & CI_HDRC_PMQOS)
cpu_latency_qos_add_request(&data->pm_qos_req, 0);
ret = imx_prepare_enable_clks(dev);
if (ret)
return ret;
data->in_lpm = false;
ret = imx_usbmisc_resume(data->usbmisc_data,
PMSG_IS_AUTO(msg) || device_may_wakeup(dev));
if (ret) {
dev_err(dev, "usbmisc resume failed, ret=%d\n", ret);
goto clk_disable;
}
return 0;
clk_disable:
imx_disable_unprepare_clks(dev);
return ret;
}
static int __maybe_unused ci_hdrc_imx_suspend(struct device *dev)
{
int ret;
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
if (data->in_lpm)
/* The core's suspend doesn't run */
return 0;
ret = imx_controller_suspend(dev, PMSG_SUSPEND);
if (ret)
return ret;
pinctrl_pm_select_sleep_state(dev);
return ret;
}
static int __maybe_unused ci_hdrc_imx_resume(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret;
pinctrl_pm_select_default_state(dev);
ret = imx_controller_resume(dev, PMSG_RESUME);
if (!ret && data->supports_runtime_pm) {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
return ret;
}
static int __maybe_unused ci_hdrc_imx_runtime_suspend(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
if (data->in_lpm) {
WARN_ON(1);
return 0;
}
return imx_controller_suspend(dev, PMSG_AUTO_SUSPEND);
}
static int __maybe_unused ci_hdrc_imx_runtime_resume(struct device *dev)
{
return imx_controller_resume(dev, PMSG_AUTO_RESUME);
}
static const struct dev_pm_ops ci_hdrc_imx_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ci_hdrc_imx_suspend, ci_hdrc_imx_resume)
SET_RUNTIME_PM_OPS(ci_hdrc_imx_runtime_suspend,
ci_hdrc_imx_runtime_resume, NULL)
};
static struct platform_driver ci_hdrc_imx_driver = {
.probe = ci_hdrc_imx_probe,
.remove_new = ci_hdrc_imx_remove,
.shutdown = ci_hdrc_imx_shutdown,
.driver = {
.name = "imx_usb",
.of_match_table = ci_hdrc_imx_dt_ids,
.pm = &ci_hdrc_imx_pm_ops,
},
};
module_platform_driver(ci_hdrc_imx_driver);
MODULE_ALIAS("platform:imx-usb");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CI HDRC i.MX USB binding");
MODULE_AUTHOR("Marek Vasut <[email protected]>");
MODULE_AUTHOR("Richard Zhao <[email protected]>");
| linux-master | drivers/usb/chipidea/ci_hdrc_imx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Marvell Technology Group Ltd.
*
* Antoine Tenart <[email protected]>
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/usb/chipidea.h>
#include <linux/usb/hcd.h>
#include <linux/usb/ulpi.h>
#include "ci.h"
struct ci_hdrc_usb2_priv {
struct platform_device *ci_pdev;
struct clk *clk;
};
static const struct ci_hdrc_platform_data ci_default_pdata = {
.capoffset = DEF_CAPOFFSET,
.flags = CI_HDRC_DISABLE_STREAMING,
};
static const struct ci_hdrc_platform_data ci_zynq_pdata = {
.capoffset = DEF_CAPOFFSET,
.flags = CI_HDRC_PHY_VBUS_CONTROL,
};
static const struct ci_hdrc_platform_data ci_zevio_pdata = {
.capoffset = DEF_CAPOFFSET,
.flags = CI_HDRC_REGS_SHARED | CI_HDRC_FORCE_FULLSPEED,
};
static const struct of_device_id ci_hdrc_usb2_of_match[] = {
{ .compatible = "chipidea,usb2" },
{ .compatible = "xlnx,zynq-usb-2.20a", .data = &ci_zynq_pdata },
{ .compatible = "lsi,zevio-usb", .data = &ci_zevio_pdata },
{ }
};
MODULE_DEVICE_TABLE(of, ci_hdrc_usb2_of_match);
static int ci_hdrc_usb2_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ci_hdrc_usb2_priv *priv;
struct ci_hdrc_platform_data *ci_pdata = dev_get_platdata(dev);
int ret;
const struct of_device_id *match;
if (!ci_pdata) {
ci_pdata = devm_kmalloc(dev, sizeof(*ci_pdata), GFP_KERNEL);
if (!ci_pdata)
return -ENOMEM;
*ci_pdata = ci_default_pdata; /* struct copy */
}
match = of_match_device(ci_hdrc_usb2_of_match, &pdev->dev);
if (match && match->data) {
/* struct copy */
*ci_pdata = *(struct ci_hdrc_platform_data *)match->data;
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
ret = clk_prepare_enable(priv->clk);
if (ret) {
dev_err(dev, "failed to enable the clock: %d\n", ret);
return ret;
}
ci_pdata->name = dev_name(dev);
priv->ci_pdev = ci_hdrc_add_device(dev, pdev->resource,
pdev->num_resources, ci_pdata);
if (IS_ERR(priv->ci_pdev)) {
ret = PTR_ERR(priv->ci_pdev);
if (ret != -EPROBE_DEFER)
dev_err(dev,
"failed to register ci_hdrc platform device: %d\n",
ret);
goto clk_err;
}
platform_set_drvdata(pdev, priv);
pm_runtime_no_callbacks(dev);
pm_runtime_enable(dev);
return 0;
clk_err:
clk_disable_unprepare(priv->clk);
return ret;
}
static void ci_hdrc_usb2_remove(struct platform_device *pdev)
{
struct ci_hdrc_usb2_priv *priv = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
ci_hdrc_remove_device(priv->ci_pdev);
clk_disable_unprepare(priv->clk);
}
static struct platform_driver ci_hdrc_usb2_driver = {
.probe = ci_hdrc_usb2_probe,
.remove_new = ci_hdrc_usb2_remove,
.driver = {
.name = "chipidea-usb2",
.of_match_table = of_match_ptr(ci_hdrc_usb2_of_match),
},
};
module_platform_driver(ci_hdrc_usb2_driver);
MODULE_DESCRIPTION("ChipIdea HDRC USB2 binding for ci13xxx");
MODULE_AUTHOR("Antoine Tenart <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/chipidea/ci_hdrc_usb2.c |
// SPDX-License-Identifier: GPL-2.0
/*
* otg.c - ChipIdea USB IP core OTG driver
*
* Copyright (C) 2013 Freescale Semiconductor, Inc.
*
* Author: Peter Chen
*/
/*
* This file mainly handles otgsc register, OTG fsm operations for HNP and SRP
* are also included.
*/
#include <linux/usb/otg.h>
#include <linux/usb/gadget.h>
#include <linux/usb/chipidea.h>
#include "ci.h"
#include "bits.h"
#include "otg.h"
#include "otg_fsm.h"
/**
* hw_read_otgsc - returns otgsc register bits value.
* @ci: the controller
* @mask: bitfield mask
*/
u32 hw_read_otgsc(struct ci_hdrc *ci, u32 mask)
{
struct ci_hdrc_cable *cable;
u32 val = hw_read(ci, OP_OTGSC, mask);
/*
* If using extcon framework for VBUS and/or ID signal
* detection overwrite OTGSC register value
*/
cable = &ci->platdata->vbus_extcon;
if (!IS_ERR(cable->edev) || ci->role_switch) {
if (cable->changed)
val |= OTGSC_BSVIS;
else
val &= ~OTGSC_BSVIS;
if (cable->connected)
val |= OTGSC_BSV;
else
val &= ~OTGSC_BSV;
if (cable->enabled)
val |= OTGSC_BSVIE;
else
val &= ~OTGSC_BSVIE;
}
cable = &ci->platdata->id_extcon;
if (!IS_ERR(cable->edev) || ci->role_switch) {
if (cable->changed)
val |= OTGSC_IDIS;
else
val &= ~OTGSC_IDIS;
if (cable->connected)
val &= ~OTGSC_ID; /* host */
else
val |= OTGSC_ID; /* device */
if (cable->enabled)
val |= OTGSC_IDIE;
else
val &= ~OTGSC_IDIE;
}
return val & mask;
}
/**
* hw_write_otgsc - updates target bits of OTGSC register.
* @ci: the controller
* @mask: bitfield mask
* @data: to be written
*/
void hw_write_otgsc(struct ci_hdrc *ci, u32 mask, u32 data)
{
struct ci_hdrc_cable *cable;
cable = &ci->platdata->vbus_extcon;
if (!IS_ERR(cable->edev) || ci->role_switch) {
if (data & mask & OTGSC_BSVIS)
cable->changed = false;
/* Don't enable vbus interrupt if using external notifier */
if (data & mask & OTGSC_BSVIE) {
cable->enabled = true;
data &= ~OTGSC_BSVIE;
} else if (mask & OTGSC_BSVIE) {
cable->enabled = false;
}
}
cable = &ci->platdata->id_extcon;
if (!IS_ERR(cable->edev) || ci->role_switch) {
if (data & mask & OTGSC_IDIS)
cable->changed = false;
/* Don't enable id interrupt if using external notifier */
if (data & mask & OTGSC_IDIE) {
cable->enabled = true;
data &= ~OTGSC_IDIE;
} else if (mask & OTGSC_IDIE) {
cable->enabled = false;
}
}
hw_write(ci, OP_OTGSC, mask | OTGSC_INT_STATUS_BITS, data);
}
/**
* ci_otg_role - pick role based on ID pin state
* @ci: the controller
*/
enum ci_role ci_otg_role(struct ci_hdrc *ci)
{
enum ci_role role = hw_read_otgsc(ci, OTGSC_ID)
? CI_ROLE_GADGET
: CI_ROLE_HOST;
return role;
}
void ci_handle_vbus_change(struct ci_hdrc *ci)
{
if (!ci->is_otg)
return;
if (hw_read_otgsc(ci, OTGSC_BSV) && !ci->vbus_active)
usb_gadget_vbus_connect(&ci->gadget);
else if (!hw_read_otgsc(ci, OTGSC_BSV) && ci->vbus_active)
usb_gadget_vbus_disconnect(&ci->gadget);
}
/**
* hw_wait_vbus_lower_bsv - When we switch to device mode, the vbus value
* should be lower than OTGSC_BSV before connecting
* to host.
*
* @ci: the controller
*
* This function returns an error code if timeout
*/
static int hw_wait_vbus_lower_bsv(struct ci_hdrc *ci)
{
unsigned long elapse = jiffies + msecs_to_jiffies(5000);
u32 mask = OTGSC_BSV;
while (hw_read_otgsc(ci, mask)) {
if (time_after(jiffies, elapse)) {
dev_err(ci->dev, "timeout waiting for %08x in OTGSC\n",
mask);
return -ETIMEDOUT;
}
msleep(20);
}
return 0;
}
void ci_handle_id_switch(struct ci_hdrc *ci)
{
enum ci_role role;
mutex_lock(&ci->mutex);
role = ci_otg_role(ci);
if (role != ci->role) {
dev_dbg(ci->dev, "switching from %s to %s\n",
ci_role(ci)->name, ci->roles[role]->name);
if (ci->vbus_active && ci->role == CI_ROLE_GADGET)
/*
* vbus disconnect event is lost due to role
* switch occurs during system suspend.
*/
usb_gadget_vbus_disconnect(&ci->gadget);
ci_role_stop(ci);
if (role == CI_ROLE_GADGET &&
IS_ERR(ci->platdata->vbus_extcon.edev))
/*
* Wait vbus lower than OTGSC_BSV before connecting
* to host. If connecting status is from an external
* connector instead of register, we don't need to
* care vbus on the board, since it will not affect
* external connector status.
*/
hw_wait_vbus_lower_bsv(ci);
ci_role_start(ci, role);
/* vbus change may have already occurred */
if (role == CI_ROLE_GADGET)
ci_handle_vbus_change(ci);
}
mutex_unlock(&ci->mutex);
}
/**
* ci_otg_work - perform otg (vbus/id) event handle
* @work: work struct
*/
static void ci_otg_work(struct work_struct *work)
{
struct ci_hdrc *ci = container_of(work, struct ci_hdrc, work);
if (ci_otg_is_fsm_mode(ci) && !ci_otg_fsm_work(ci)) {
enable_irq(ci->irq);
return;
}
pm_runtime_get_sync(ci->dev);
if (ci->id_event) {
ci->id_event = false;
ci_handle_id_switch(ci);
}
if (ci->b_sess_valid_event) {
ci->b_sess_valid_event = false;
ci_handle_vbus_change(ci);
}
pm_runtime_put_sync(ci->dev);
enable_irq(ci->irq);
}
/**
* ci_hdrc_otg_init - initialize otg struct
* @ci: the controller
*/
int ci_hdrc_otg_init(struct ci_hdrc *ci)
{
INIT_WORK(&ci->work, ci_otg_work);
ci->wq = create_freezable_workqueue("ci_otg");
if (!ci->wq) {
dev_err(ci->dev, "can't create workqueue\n");
return -ENODEV;
}
if (ci_otg_is_fsm_mode(ci))
return ci_hdrc_otg_fsm_init(ci);
return 0;
}
/**
* ci_hdrc_otg_destroy - destroy otg struct
* @ci: the controller
*/
void ci_hdrc_otg_destroy(struct ci_hdrc *ci)
{
if (ci->wq)
destroy_workqueue(ci->wq);
/* Disable all OTG irq and clear status */
hw_write_otgsc(ci, OTGSC_INT_EN_BITS | OTGSC_INT_STATUS_BITS,
OTGSC_INT_STATUS_BITS);
if (ci_otg_is_fsm_mode(ci))
ci_hdrc_otg_fsm_remove(ci);
}
| linux-master | drivers/usb/chipidea/otg.c |
// SPDX-License-Identifier: GPL-2.0
/*
* FOTG210 UDC Driver supports Bulk transfer so far
*
* Copyright (C) 2013 Faraday Technology Corporation
*
* Author : Yuan-Hsin Chen <[email protected]>
*/
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
#include <linux/usb/phy.h>
#include "fotg210.h"
#include "fotg210-udc.h"
#define DRIVER_DESC "FOTG210 USB Device Controller Driver"
#define DRIVER_VERSION "30-April-2013"
static const char udc_name[] = "fotg210_udc";
static const char * const fotg210_ep_name[] = {
"ep0", "ep1", "ep2", "ep3", "ep4"};
static void fotg210_ack_int(struct fotg210_udc *fotg210, u32 offset, u32 mask)
{
u32 value = ioread32(fotg210->reg + offset);
value &= ~mask;
iowrite32(value, fotg210->reg + offset);
}
static void fotg210_disable_fifo_int(struct fotg210_ep *ep)
{
u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR1);
if (ep->dir_in)
value |= DMISGR1_MF_IN_INT(ep->epnum - 1);
else
value |= DMISGR1_MF_OUTSPK_INT(ep->epnum - 1);
iowrite32(value, ep->fotg210->reg + FOTG210_DMISGR1);
}
static void fotg210_enable_fifo_int(struct fotg210_ep *ep)
{
u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR1);
if (ep->dir_in)
value &= ~DMISGR1_MF_IN_INT(ep->epnum - 1);
else
value &= ~DMISGR1_MF_OUTSPK_INT(ep->epnum - 1);
iowrite32(value, ep->fotg210->reg + FOTG210_DMISGR1);
}
static void fotg210_set_cxdone(struct fotg210_udc *fotg210)
{
u32 value = ioread32(fotg210->reg + FOTG210_DCFESR);
value |= DCFESR_CX_DONE;
iowrite32(value, fotg210->reg + FOTG210_DCFESR);
}
static void fotg210_done(struct fotg210_ep *ep, struct fotg210_request *req,
int status)
{
list_del_init(&req->queue);
/* don't modify queue heads during completion callback */
if (ep->fotg210->gadget.speed == USB_SPEED_UNKNOWN)
req->req.status = -ESHUTDOWN;
else
req->req.status = status;
spin_unlock(&ep->fotg210->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&ep->fotg210->lock);
if (ep->epnum) {
if (list_empty(&ep->queue))
fotg210_disable_fifo_int(ep);
} else {
fotg210_set_cxdone(ep->fotg210);
}
}
static void fotg210_fifo_ep_mapping(struct fotg210_ep *ep, u32 epnum,
u32 dir_in)
{
struct fotg210_udc *fotg210 = ep->fotg210;
u32 val;
/* Driver should map an ep to a fifo and then map the fifo
* to the ep. What a brain-damaged design!
*/
/* map a fifo to an ep */
val = ioread32(fotg210->reg + FOTG210_EPMAP);
val &= ~EPMAP_FIFONOMSK(epnum, dir_in);
val |= EPMAP_FIFONO(epnum, dir_in);
iowrite32(val, fotg210->reg + FOTG210_EPMAP);
/* map the ep to the fifo */
val = ioread32(fotg210->reg + FOTG210_FIFOMAP);
val &= ~FIFOMAP_EPNOMSK(epnum);
val |= FIFOMAP_EPNO(epnum);
iowrite32(val, fotg210->reg + FOTG210_FIFOMAP);
/* enable fifo */
val = ioread32(fotg210->reg + FOTG210_FIFOCF);
val |= FIFOCF_FIFO_EN(epnum - 1);
iowrite32(val, fotg210->reg + FOTG210_FIFOCF);
}
static void fotg210_set_fifo_dir(struct fotg210_ep *ep, u32 epnum, u32 dir_in)
{
struct fotg210_udc *fotg210 = ep->fotg210;
u32 val;
val = ioread32(fotg210->reg + FOTG210_FIFOMAP);
val |= (dir_in ? FIFOMAP_DIRIN(epnum - 1) : FIFOMAP_DIROUT(epnum - 1));
iowrite32(val, fotg210->reg + FOTG210_FIFOMAP);
}
static void fotg210_set_tfrtype(struct fotg210_ep *ep, u32 epnum, u32 type)
{
struct fotg210_udc *fotg210 = ep->fotg210;
u32 val;
val = ioread32(fotg210->reg + FOTG210_FIFOCF);
val |= FIFOCF_TYPE(type, epnum - 1);
iowrite32(val, fotg210->reg + FOTG210_FIFOCF);
}
static void fotg210_set_mps(struct fotg210_ep *ep, u32 epnum, u32 mps,
u32 dir_in)
{
struct fotg210_udc *fotg210 = ep->fotg210;
u32 val;
u32 offset = dir_in ? FOTG210_INEPMPSR(epnum) :
FOTG210_OUTEPMPSR(epnum);
val = ioread32(fotg210->reg + offset);
val |= INOUTEPMPSR_MPS(mps);
iowrite32(val, fotg210->reg + offset);
}
static int fotg210_config_ep(struct fotg210_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
struct fotg210_udc *fotg210 = ep->fotg210;
fotg210_set_fifo_dir(ep, ep->epnum, ep->dir_in);
fotg210_set_tfrtype(ep, ep->epnum, ep->type);
fotg210_set_mps(ep, ep->epnum, ep->ep.maxpacket, ep->dir_in);
fotg210_fifo_ep_mapping(ep, ep->epnum, ep->dir_in);
fotg210->ep[ep->epnum] = ep;
return 0;
}
static int fotg210_ep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct fotg210_ep *ep;
ep = container_of(_ep, struct fotg210_ep, ep);
ep->desc = desc;
ep->epnum = usb_endpoint_num(desc);
ep->type = usb_endpoint_type(desc);
ep->dir_in = usb_endpoint_dir_in(desc);
ep->ep.maxpacket = usb_endpoint_maxp(desc);
return fotg210_config_ep(ep, desc);
}
static void fotg210_reset_tseq(struct fotg210_udc *fotg210, u8 epnum)
{
struct fotg210_ep *ep = fotg210->ep[epnum];
u32 value;
void __iomem *reg;
reg = (ep->dir_in) ?
fotg210->reg + FOTG210_INEPMPSR(epnum) :
fotg210->reg + FOTG210_OUTEPMPSR(epnum);
/* Note: Driver needs to set and clear INOUTEPMPSR_RESET_TSEQ
* bit. Controller wouldn't clear this bit. WTF!!!
*/
value = ioread32(reg);
value |= INOUTEPMPSR_RESET_TSEQ;
iowrite32(value, reg);
value = ioread32(reg);
value &= ~INOUTEPMPSR_RESET_TSEQ;
iowrite32(value, reg);
}
static int fotg210_ep_release(struct fotg210_ep *ep)
{
if (!ep->epnum)
return 0;
ep->epnum = 0;
ep->stall = 0;
ep->wedged = 0;
fotg210_reset_tseq(ep->fotg210, ep->epnum);
return 0;
}
static int fotg210_ep_disable(struct usb_ep *_ep)
{
struct fotg210_ep *ep;
struct fotg210_request *req;
unsigned long flags;
BUG_ON(!_ep);
ep = container_of(_ep, struct fotg210_ep, ep);
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct fotg210_request, queue);
spin_lock_irqsave(&ep->fotg210->lock, flags);
fotg210_done(ep, req, -ECONNRESET);
spin_unlock_irqrestore(&ep->fotg210->lock, flags);
}
return fotg210_ep_release(ep);
}
static struct usb_request *fotg210_ep_alloc_request(struct usb_ep *_ep,
gfp_t gfp_flags)
{
struct fotg210_request *req;
req = kzalloc(sizeof(struct fotg210_request), gfp_flags);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void fotg210_ep_free_request(struct usb_ep *_ep,
struct usb_request *_req)
{
struct fotg210_request *req;
req = container_of(_req, struct fotg210_request, req);
kfree(req);
}
static void fotg210_enable_dma(struct fotg210_ep *ep,
dma_addr_t d, u32 len)
{
u32 value;
struct fotg210_udc *fotg210 = ep->fotg210;
/* set transfer length and direction */
value = ioread32(fotg210->reg + FOTG210_DMACPSR1);
value &= ~(DMACPSR1_DMA_LEN(0xFFFF) | DMACPSR1_DMA_TYPE(1));
value |= DMACPSR1_DMA_LEN(len) | DMACPSR1_DMA_TYPE(ep->dir_in);
iowrite32(value, fotg210->reg + FOTG210_DMACPSR1);
/* set device DMA target FIFO number */
value = ioread32(fotg210->reg + FOTG210_DMATFNR);
if (ep->epnum)
value |= DMATFNR_ACC_FN(ep->epnum - 1);
else
value |= DMATFNR_ACC_CXF;
iowrite32(value, fotg210->reg + FOTG210_DMATFNR);
/* set DMA memory address */
iowrite32(d, fotg210->reg + FOTG210_DMACPSR2);
/* enable MDMA_EROR and MDMA_CMPLT interrupt */
value = ioread32(fotg210->reg + FOTG210_DMISGR2);
value &= ~(DMISGR2_MDMA_CMPLT | DMISGR2_MDMA_ERROR);
iowrite32(value, fotg210->reg + FOTG210_DMISGR2);
/* start DMA */
value = ioread32(fotg210->reg + FOTG210_DMACPSR1);
value |= DMACPSR1_DMA_START;
iowrite32(value, fotg210->reg + FOTG210_DMACPSR1);
}
static void fotg210_disable_dma(struct fotg210_ep *ep)
{
iowrite32(DMATFNR_DISDMA, ep->fotg210->reg + FOTG210_DMATFNR);
}
static void fotg210_wait_dma_done(struct fotg210_ep *ep)
{
u32 value;
do {
value = ioread32(ep->fotg210->reg + FOTG210_DISGR2);
if ((value & DISGR2_USBRST_INT) ||
(value & DISGR2_DMA_ERROR))
goto dma_reset;
} while (!(value & DISGR2_DMA_CMPLT));
fotg210_ack_int(ep->fotg210, FOTG210_DISGR2, DISGR2_DMA_CMPLT);
return;
dma_reset:
value = ioread32(ep->fotg210->reg + FOTG210_DMACPSR1);
value |= DMACPSR1_DMA_ABORT;
iowrite32(value, ep->fotg210->reg + FOTG210_DMACPSR1);
/* reset fifo */
if (ep->epnum) {
value = ioread32(ep->fotg210->reg +
FOTG210_FIBCR(ep->epnum - 1));
value |= FIBCR_FFRST;
iowrite32(value, ep->fotg210->reg +
FOTG210_FIBCR(ep->epnum - 1));
} else {
value = ioread32(ep->fotg210->reg + FOTG210_DCFESR);
value |= DCFESR_CX_CLR;
iowrite32(value, ep->fotg210->reg + FOTG210_DCFESR);
}
}
static void fotg210_start_dma(struct fotg210_ep *ep,
struct fotg210_request *req)
{
struct device *dev = &ep->fotg210->gadget.dev;
dma_addr_t d;
u8 *buffer;
u32 length;
if (ep->epnum) {
if (ep->dir_in) {
buffer = req->req.buf;
length = req->req.length;
} else {
buffer = req->req.buf + req->req.actual;
length = ioread32(ep->fotg210->reg +
FOTG210_FIBCR(ep->epnum - 1)) & FIBCR_BCFX;
if (length > req->req.length - req->req.actual)
length = req->req.length - req->req.actual;
}
} else {
buffer = req->req.buf + req->req.actual;
if (req->req.length - req->req.actual > ep->ep.maxpacket)
length = ep->ep.maxpacket;
else
length = req->req.length - req->req.actual;
}
d = dma_map_single(dev, buffer, length,
ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (dma_mapping_error(dev, d)) {
pr_err("dma_mapping_error\n");
return;
}
fotg210_enable_dma(ep, d, length);
/* check if dma is done */
fotg210_wait_dma_done(ep);
fotg210_disable_dma(ep);
/* update actual transfer length */
req->req.actual += length;
dma_unmap_single(dev, d, length, DMA_TO_DEVICE);
}
static void fotg210_ep0_queue(struct fotg210_ep *ep,
struct fotg210_request *req)
{
if (!req->req.length) {
fotg210_done(ep, req, 0);
return;
}
if (ep->dir_in) { /* if IN */
fotg210_start_dma(ep, req);
if (req->req.length == req->req.actual)
fotg210_done(ep, req, 0);
} else { /* OUT */
u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR0);
value &= ~DMISGR0_MCX_OUT_INT;
iowrite32(value, ep->fotg210->reg + FOTG210_DMISGR0);
}
}
static int fotg210_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
gfp_t gfp_flags)
{
struct fotg210_ep *ep;
struct fotg210_request *req;
unsigned long flags;
int request = 0;
ep = container_of(_ep, struct fotg210_ep, ep);
req = container_of(_req, struct fotg210_request, req);
if (ep->fotg210->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
spin_lock_irqsave(&ep->fotg210->lock, flags);
if (list_empty(&ep->queue))
request = 1;
list_add_tail(&req->queue, &ep->queue);
req->req.actual = 0;
req->req.status = -EINPROGRESS;
if (!ep->epnum) /* ep0 */
fotg210_ep0_queue(ep, req);
else if (request && !ep->stall)
fotg210_enable_fifo_int(ep);
spin_unlock_irqrestore(&ep->fotg210->lock, flags);
return 0;
}
static int fotg210_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct fotg210_ep *ep;
struct fotg210_request *req;
unsigned long flags;
ep = container_of(_ep, struct fotg210_ep, ep);
req = container_of(_req, struct fotg210_request, req);
spin_lock_irqsave(&ep->fotg210->lock, flags);
if (!list_empty(&ep->queue))
fotg210_done(ep, req, -ECONNRESET);
spin_unlock_irqrestore(&ep->fotg210->lock, flags);
return 0;
}
static void fotg210_set_epnstall(struct fotg210_ep *ep)
{
struct fotg210_udc *fotg210 = ep->fotg210;
u32 value;
void __iomem *reg;
/* check if IN FIFO is empty before stall */
if (ep->dir_in) {
do {
value = ioread32(fotg210->reg + FOTG210_DCFESR);
} while (!(value & DCFESR_FIFO_EMPTY(ep->epnum - 1)));
}
reg = (ep->dir_in) ?
fotg210->reg + FOTG210_INEPMPSR(ep->epnum) :
fotg210->reg + FOTG210_OUTEPMPSR(ep->epnum);
value = ioread32(reg);
value |= INOUTEPMPSR_STL_EP;
iowrite32(value, reg);
}
static void fotg210_clear_epnstall(struct fotg210_ep *ep)
{
struct fotg210_udc *fotg210 = ep->fotg210;
u32 value;
void __iomem *reg;
reg = (ep->dir_in) ?
fotg210->reg + FOTG210_INEPMPSR(ep->epnum) :
fotg210->reg + FOTG210_OUTEPMPSR(ep->epnum);
value = ioread32(reg);
value &= ~INOUTEPMPSR_STL_EP;
iowrite32(value, reg);
}
static int fotg210_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedge)
{
struct fotg210_ep *ep;
struct fotg210_udc *fotg210;
unsigned long flags;
ep = container_of(_ep, struct fotg210_ep, ep);
fotg210 = ep->fotg210;
spin_lock_irqsave(&ep->fotg210->lock, flags);
if (value) {
fotg210_set_epnstall(ep);
ep->stall = 1;
if (wedge)
ep->wedged = 1;
} else {
fotg210_reset_tseq(fotg210, ep->epnum);
fotg210_clear_epnstall(ep);
ep->stall = 0;
ep->wedged = 0;
if (!list_empty(&ep->queue))
fotg210_enable_fifo_int(ep);
}
spin_unlock_irqrestore(&ep->fotg210->lock, flags);
return 0;
}
static int fotg210_ep_set_halt(struct usb_ep *_ep, int value)
{
return fotg210_set_halt_and_wedge(_ep, value, 0);
}
static int fotg210_ep_set_wedge(struct usb_ep *_ep)
{
return fotg210_set_halt_and_wedge(_ep, 1, 1);
}
static void fotg210_ep_fifo_flush(struct usb_ep *_ep)
{
}
static const struct usb_ep_ops fotg210_ep_ops = {
.enable = fotg210_ep_enable,
.disable = fotg210_ep_disable,
.alloc_request = fotg210_ep_alloc_request,
.free_request = fotg210_ep_free_request,
.queue = fotg210_ep_queue,
.dequeue = fotg210_ep_dequeue,
.set_halt = fotg210_ep_set_halt,
.fifo_flush = fotg210_ep_fifo_flush,
.set_wedge = fotg210_ep_set_wedge,
};
static void fotg210_clear_tx0byte(struct fotg210_udc *fotg210)
{
u32 value = ioread32(fotg210->reg + FOTG210_TX0BYTE);
value &= ~(TX0BYTE_EP1 | TX0BYTE_EP2 | TX0BYTE_EP3
| TX0BYTE_EP4);
iowrite32(value, fotg210->reg + FOTG210_TX0BYTE);
}
static void fotg210_clear_rx0byte(struct fotg210_udc *fotg210)
{
u32 value = ioread32(fotg210->reg + FOTG210_RX0BYTE);
value &= ~(RX0BYTE_EP1 | RX0BYTE_EP2 | RX0BYTE_EP3
| RX0BYTE_EP4);
iowrite32(value, fotg210->reg + FOTG210_RX0BYTE);
}
/* read 8-byte setup packet only */
static void fotg210_rdsetupp(struct fotg210_udc *fotg210,
u8 *buffer)
{
int i = 0;
u8 *tmp = buffer;
u32 data;
u32 length = 8;
iowrite32(DMATFNR_ACC_CXF, fotg210->reg + FOTG210_DMATFNR);
for (i = (length >> 2); i > 0; i--) {
data = ioread32(fotg210->reg + FOTG210_CXPORT);
*tmp = data & 0xFF;
*(tmp + 1) = (data >> 8) & 0xFF;
*(tmp + 2) = (data >> 16) & 0xFF;
*(tmp + 3) = (data >> 24) & 0xFF;
tmp = tmp + 4;
}
switch (length % 4) {
case 1:
data = ioread32(fotg210->reg + FOTG210_CXPORT);
*tmp = data & 0xFF;
break;
case 2:
data = ioread32(fotg210->reg + FOTG210_CXPORT);
*tmp = data & 0xFF;
*(tmp + 1) = (data >> 8) & 0xFF;
break;
case 3:
data = ioread32(fotg210->reg + FOTG210_CXPORT);
*tmp = data & 0xFF;
*(tmp + 1) = (data >> 8) & 0xFF;
*(tmp + 2) = (data >> 16) & 0xFF;
break;
default:
break;
}
iowrite32(DMATFNR_DISDMA, fotg210->reg + FOTG210_DMATFNR);
}
static void fotg210_set_configuration(struct fotg210_udc *fotg210)
{
u32 value = ioread32(fotg210->reg + FOTG210_DAR);
value |= DAR_AFT_CONF;
iowrite32(value, fotg210->reg + FOTG210_DAR);
}
static void fotg210_set_dev_addr(struct fotg210_udc *fotg210, u32 addr)
{
u32 value = ioread32(fotg210->reg + FOTG210_DAR);
value |= (addr & 0x7F);
iowrite32(value, fotg210->reg + FOTG210_DAR);
}
static void fotg210_set_cxstall(struct fotg210_udc *fotg210)
{
u32 value = ioread32(fotg210->reg + FOTG210_DCFESR);
value |= DCFESR_CX_STL;
iowrite32(value, fotg210->reg + FOTG210_DCFESR);
}
static void fotg210_request_error(struct fotg210_udc *fotg210)
{
fotg210_set_cxstall(fotg210);
pr_err("request error!!\n");
}
static void fotg210_set_address(struct fotg210_udc *fotg210,
struct usb_ctrlrequest *ctrl)
{
if (le16_to_cpu(ctrl->wValue) >= 0x0100) {
fotg210_request_error(fotg210);
} else {
fotg210_set_dev_addr(fotg210, le16_to_cpu(ctrl->wValue));
fotg210_set_cxdone(fotg210);
}
}
static void fotg210_set_feature(struct fotg210_udc *fotg210,
struct usb_ctrlrequest *ctrl)
{
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
fotg210_set_cxdone(fotg210);
break;
case USB_RECIP_INTERFACE:
fotg210_set_cxdone(fotg210);
break;
case USB_RECIP_ENDPOINT: {
u8 epnum;
epnum = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK;
if (epnum)
fotg210_set_epnstall(fotg210->ep[epnum]);
else
fotg210_set_cxstall(fotg210);
fotg210_set_cxdone(fotg210);
}
break;
default:
fotg210_request_error(fotg210);
break;
}
}
static void fotg210_clear_feature(struct fotg210_udc *fotg210,
struct usb_ctrlrequest *ctrl)
{
struct fotg210_ep *ep =
fotg210->ep[ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK];
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
fotg210_set_cxdone(fotg210);
break;
case USB_RECIP_INTERFACE:
fotg210_set_cxdone(fotg210);
break;
case USB_RECIP_ENDPOINT:
if (ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK) {
if (ep->wedged) {
fotg210_set_cxdone(fotg210);
break;
}
if (ep->stall)
fotg210_set_halt_and_wedge(&ep->ep, 0, 0);
}
fotg210_set_cxdone(fotg210);
break;
default:
fotg210_request_error(fotg210);
break;
}
}
static int fotg210_is_epnstall(struct fotg210_ep *ep)
{
struct fotg210_udc *fotg210 = ep->fotg210;
u32 value;
void __iomem *reg;
reg = (ep->dir_in) ?
fotg210->reg + FOTG210_INEPMPSR(ep->epnum) :
fotg210->reg + FOTG210_OUTEPMPSR(ep->epnum);
value = ioread32(reg);
return value & INOUTEPMPSR_STL_EP ? 1 : 0;
}
/* For EP0 requests triggered by this driver (currently GET_STATUS response) */
static void fotg210_ep0_complete(struct usb_ep *_ep, struct usb_request *req)
{
struct fotg210_ep *ep;
struct fotg210_udc *fotg210;
ep = container_of(_ep, struct fotg210_ep, ep);
fotg210 = ep->fotg210;
if (req->status || req->actual != req->length) {
dev_warn(&fotg210->gadget.dev, "EP0 request failed: %d\n", req->status);
}
}
static void fotg210_get_status(struct fotg210_udc *fotg210,
struct usb_ctrlrequest *ctrl)
{
u8 epnum;
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
fotg210->ep0_data = cpu_to_le16(1 << USB_DEVICE_SELF_POWERED);
break;
case USB_RECIP_INTERFACE:
fotg210->ep0_data = cpu_to_le16(0);
break;
case USB_RECIP_ENDPOINT:
epnum = ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK;
if (epnum)
fotg210->ep0_data =
cpu_to_le16(fotg210_is_epnstall(fotg210->ep[epnum])
<< USB_ENDPOINT_HALT);
else
fotg210_request_error(fotg210);
break;
default:
fotg210_request_error(fotg210);
return; /* exit */
}
fotg210->ep0_req->buf = &fotg210->ep0_data;
fotg210->ep0_req->length = 2;
spin_unlock(&fotg210->lock);
fotg210_ep_queue(fotg210->gadget.ep0, fotg210->ep0_req, GFP_ATOMIC);
spin_lock(&fotg210->lock);
}
static int fotg210_setup_packet(struct fotg210_udc *fotg210,
struct usb_ctrlrequest *ctrl)
{
u8 *p = (u8 *)ctrl;
u8 ret = 0;
fotg210_rdsetupp(fotg210, p);
fotg210->ep[0]->dir_in = ctrl->bRequestType & USB_DIR_IN;
if (fotg210->gadget.speed == USB_SPEED_UNKNOWN) {
u32 value = ioread32(fotg210->reg + FOTG210_DMCR);
fotg210->gadget.speed = value & DMCR_HS_EN ?
USB_SPEED_HIGH : USB_SPEED_FULL;
}
/* check request */
if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
switch (ctrl->bRequest) {
case USB_REQ_GET_STATUS:
fotg210_get_status(fotg210, ctrl);
break;
case USB_REQ_CLEAR_FEATURE:
fotg210_clear_feature(fotg210, ctrl);
break;
case USB_REQ_SET_FEATURE:
fotg210_set_feature(fotg210, ctrl);
break;
case USB_REQ_SET_ADDRESS:
fotg210_set_address(fotg210, ctrl);
break;
case USB_REQ_SET_CONFIGURATION:
fotg210_set_configuration(fotg210);
ret = 1;
break;
default:
ret = 1;
break;
}
} else {
ret = 1;
}
return ret;
}
static void fotg210_ep0out(struct fotg210_udc *fotg210)
{
struct fotg210_ep *ep = fotg210->ep[0];
if (!list_empty(&ep->queue) && !ep->dir_in) {
struct fotg210_request *req;
req = list_first_entry(&ep->queue,
struct fotg210_request, queue);
if (req->req.length)
fotg210_start_dma(ep, req);
if ((req->req.length - req->req.actual) < ep->ep.maxpacket)
fotg210_done(ep, req, 0);
} else {
pr_err("%s : empty queue\n", __func__);
}
}
static void fotg210_ep0in(struct fotg210_udc *fotg210)
{
struct fotg210_ep *ep = fotg210->ep[0];
if ((!list_empty(&ep->queue)) && (ep->dir_in)) {
struct fotg210_request *req;
req = list_entry(ep->queue.next,
struct fotg210_request, queue);
if (req->req.length)
fotg210_start_dma(ep, req);
if (req->req.actual == req->req.length)
fotg210_done(ep, req, 0);
} else {
fotg210_set_cxdone(fotg210);
}
}
static void fotg210_in_fifo_handler(struct fotg210_ep *ep)
{
struct fotg210_request *req = list_entry(ep->queue.next,
struct fotg210_request, queue);
if (req->req.length)
fotg210_start_dma(ep, req);
fotg210_done(ep, req, 0);
}
static void fotg210_out_fifo_handler(struct fotg210_ep *ep)
{
struct fotg210_request *req = list_entry(ep->queue.next,
struct fotg210_request, queue);
int disgr1 = ioread32(ep->fotg210->reg + FOTG210_DISGR1);
fotg210_start_dma(ep, req);
/* Complete the request when it's full or a short packet arrived.
* Like other drivers, short_not_ok isn't handled.
*/
if (req->req.length == req->req.actual ||
(disgr1 & DISGR1_SPK_INT(ep->epnum - 1)))
fotg210_done(ep, req, 0);
}
static irqreturn_t fotg210_irq(int irq, void *_fotg210)
{
struct fotg210_udc *fotg210 = _fotg210;
u32 int_grp = ioread32(fotg210->reg + FOTG210_DIGR);
u32 int_msk = ioread32(fotg210->reg + FOTG210_DMIGR);
int_grp &= ~int_msk;
spin_lock(&fotg210->lock);
if (int_grp & DIGR_INT_G2) {
void __iomem *reg = fotg210->reg + FOTG210_DISGR2;
u32 int_grp2 = ioread32(reg);
u32 int_msk2 = ioread32(fotg210->reg + FOTG210_DMISGR2);
int_grp2 &= ~int_msk2;
if (int_grp2 & DISGR2_USBRST_INT) {
usb_gadget_udc_reset(&fotg210->gadget,
fotg210->driver);
fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_USBRST_INT);
pr_info("fotg210 udc reset\n");
}
if (int_grp2 & DISGR2_SUSP_INT) {
fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_SUSP_INT);
pr_info("fotg210 udc suspend\n");
}
if (int_grp2 & DISGR2_RESM_INT) {
fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_RESM_INT);
pr_info("fotg210 udc resume\n");
}
if (int_grp2 & DISGR2_ISO_SEQ_ERR_INT) {
fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_ISO_SEQ_ERR_INT);
pr_info("fotg210 iso sequence error\n");
}
if (int_grp2 & DISGR2_ISO_SEQ_ABORT_INT) {
fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_ISO_SEQ_ABORT_INT);
pr_info("fotg210 iso sequence abort\n");
}
if (int_grp2 & DISGR2_TX0BYTE_INT) {
fotg210_clear_tx0byte(fotg210);
fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_TX0BYTE_INT);
pr_info("fotg210 transferred 0 byte\n");
}
if (int_grp2 & DISGR2_RX0BYTE_INT) {
fotg210_clear_rx0byte(fotg210);
fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_RX0BYTE_INT);
pr_info("fotg210 received 0 byte\n");
}
if (int_grp2 & DISGR2_DMA_ERROR) {
fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_DMA_ERROR);
}
}
if (int_grp & DIGR_INT_G0) {
void __iomem *reg = fotg210->reg + FOTG210_DISGR0;
u32 int_grp0 = ioread32(reg);
u32 int_msk0 = ioread32(fotg210->reg + FOTG210_DMISGR0);
struct usb_ctrlrequest ctrl;
int_grp0 &= ~int_msk0;
/* the highest priority in this source register */
if (int_grp0 & DISGR0_CX_COMABT_INT) {
fotg210_ack_int(fotg210, FOTG210_DISGR0, DISGR0_CX_COMABT_INT);
pr_info("fotg210 CX command abort\n");
}
if (int_grp0 & DISGR0_CX_SETUP_INT) {
if (fotg210_setup_packet(fotg210, &ctrl)) {
spin_unlock(&fotg210->lock);
if (fotg210->driver->setup(&fotg210->gadget,
&ctrl) < 0)
fotg210_set_cxstall(fotg210);
spin_lock(&fotg210->lock);
}
}
if (int_grp0 & DISGR0_CX_COMEND_INT)
pr_info("fotg210 cmd end\n");
if (int_grp0 & DISGR0_CX_IN_INT)
fotg210_ep0in(fotg210);
if (int_grp0 & DISGR0_CX_OUT_INT)
fotg210_ep0out(fotg210);
if (int_grp0 & DISGR0_CX_COMFAIL_INT) {
fotg210_set_cxstall(fotg210);
pr_info("fotg210 ep0 fail\n");
}
}
if (int_grp & DIGR_INT_G1) {
void __iomem *reg = fotg210->reg + FOTG210_DISGR1;
u32 int_grp1 = ioread32(reg);
u32 int_msk1 = ioread32(fotg210->reg + FOTG210_DMISGR1);
int fifo;
int_grp1 &= ~int_msk1;
for (fifo = 0; fifo < FOTG210_MAX_FIFO_NUM; fifo++) {
if (int_grp1 & DISGR1_IN_INT(fifo))
fotg210_in_fifo_handler(fotg210->ep[fifo + 1]);
if ((int_grp1 & DISGR1_OUT_INT(fifo)) ||
(int_grp1 & DISGR1_SPK_INT(fifo)))
fotg210_out_fifo_handler(fotg210->ep[fifo + 1]);
}
}
spin_unlock(&fotg210->lock);
return IRQ_HANDLED;
}
static void fotg210_disable_unplug(struct fotg210_udc *fotg210)
{
u32 reg = ioread32(fotg210->reg + FOTG210_PHYTMSR);
reg &= ~PHYTMSR_UNPLUG;
iowrite32(reg, fotg210->reg + FOTG210_PHYTMSR);
}
static int fotg210_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
struct fotg210_udc *fotg210 = gadget_to_fotg210(g);
u32 value;
int ret;
/* hook up the driver */
fotg210->driver = driver;
fotg210->gadget.dev.of_node = fotg210->dev->of_node;
fotg210->gadget.speed = USB_SPEED_UNKNOWN;
dev_info(fotg210->dev, "bound driver %s\n", driver->driver.name);
if (!IS_ERR_OR_NULL(fotg210->phy)) {
ret = otg_set_peripheral(fotg210->phy->otg,
&fotg210->gadget);
if (ret)
dev_err(fotg210->dev, "can't bind to phy\n");
}
/* chip enable */
value = ioread32(fotg210->reg + FOTG210_DMCR);
value |= DMCR_CHIP_EN;
iowrite32(value, fotg210->reg + FOTG210_DMCR);
/* enable device global interrupt */
value = ioread32(fotg210->reg + FOTG210_DMCR);
value |= DMCR_GLINT_EN;
iowrite32(value, fotg210->reg + FOTG210_DMCR);
return 0;
}
static void fotg210_init(struct fotg210_udc *fotg210)
{
u32 value;
/* disable global interrupt and set int polarity to active high */
iowrite32(GMIR_MHC_INT | GMIR_MOTG_INT | GMIR_INT_POLARITY,
fotg210->reg + FOTG210_GMIR);
/* mask interrupts for groups other than 0-2 */
iowrite32(~(DMIGR_MINT_G0 | DMIGR_MINT_G1 | DMIGR_MINT_G2),
fotg210->reg + FOTG210_DMIGR);
/* udc software reset */
iowrite32(DMCR_SFRST, fotg210->reg + FOTG210_DMCR);
/* Better wait a bit, but without a datasheet, no idea how long. */
usleep_range(100, 200);
/* disable device global interrupt */
value = ioread32(fotg210->reg + FOTG210_DMCR);
value &= ~DMCR_GLINT_EN;
iowrite32(value, fotg210->reg + FOTG210_DMCR);
/* enable only grp2 irqs we handle */
iowrite32(~(DISGR2_DMA_ERROR | DISGR2_RX0BYTE_INT | DISGR2_TX0BYTE_INT
| DISGR2_ISO_SEQ_ABORT_INT | DISGR2_ISO_SEQ_ERR_INT
| DISGR2_RESM_INT | DISGR2_SUSP_INT | DISGR2_USBRST_INT),
fotg210->reg + FOTG210_DMISGR2);
/* disable all fifo interrupt */
iowrite32(~(u32)0, fotg210->reg + FOTG210_DMISGR1);
/* disable cmd end */
value = ioread32(fotg210->reg + FOTG210_DMISGR0);
value |= DMISGR0_MCX_COMEND;
iowrite32(value, fotg210->reg + FOTG210_DMISGR0);
}
static int fotg210_udc_stop(struct usb_gadget *g)
{
struct fotg210_udc *fotg210 = gadget_to_fotg210(g);
unsigned long flags;
if (!IS_ERR_OR_NULL(fotg210->phy))
return otg_set_peripheral(fotg210->phy->otg, NULL);
spin_lock_irqsave(&fotg210->lock, flags);
fotg210_init(fotg210);
fotg210->driver = NULL;
fotg210->gadget.speed = USB_SPEED_UNKNOWN;
spin_unlock_irqrestore(&fotg210->lock, flags);
return 0;
}
/**
* fotg210_vbus_session - Called by external transceiver to enable/disable udc
* @_gadget: usb gadget
* @is_active: 0 if should disable UDC VBUS, 1 if should enable
*
* Returns 0
*/
static int fotg210_vbus_session(struct usb_gadget *g, int is_active)
{
struct fotg210_udc *fotg210 = gadget_to_fotg210(g);
/* Call down to core integration layer to drive or disable VBUS */
fotg210_vbus(fotg210->fotg, is_active);
return 0;
}
static const struct usb_gadget_ops fotg210_gadget_ops = {
.udc_start = fotg210_udc_start,
.udc_stop = fotg210_udc_stop,
.vbus_session = fotg210_vbus_session,
};
/**
* fotg210_phy_event - Called by phy upon VBus event
* @nb: notifier block
* @action: phy action, is vbus connect or disconnect
* @data: the usb_gadget structure in fotg210
*
* Called by the USB Phy when a cable connect or disconnect is sensed.
*
* Returns NOTIFY_OK or NOTIFY_DONE
*/
static int fotg210_phy_event(struct notifier_block *nb, unsigned long action,
void *data)
{
struct usb_gadget *gadget = data;
if (!gadget)
return NOTIFY_DONE;
switch (action) {
case USB_EVENT_VBUS:
usb_gadget_vbus_connect(gadget);
return NOTIFY_OK;
case USB_EVENT_NONE:
usb_gadget_vbus_disconnect(gadget);
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
static struct notifier_block fotg210_phy_notifier = {
.notifier_call = fotg210_phy_event,
};
int fotg210_udc_remove(struct platform_device *pdev)
{
struct fotg210_udc *fotg210 = platform_get_drvdata(pdev);
int i;
usb_del_gadget_udc(&fotg210->gadget);
if (!IS_ERR_OR_NULL(fotg210->phy)) {
usb_unregister_notifier(fotg210->phy, &fotg210_phy_notifier);
usb_put_phy(fotg210->phy);
}
iounmap(fotg210->reg);
free_irq(platform_get_irq(pdev, 0), fotg210);
fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
kfree(fotg210->ep[i]);
kfree(fotg210);
return 0;
}
int fotg210_udc_probe(struct platform_device *pdev, struct fotg210 *fotg)
{
struct fotg210_udc *fotg210 = NULL;
struct device *dev = &pdev->dev;
int irq;
int ret = 0;
int i;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
/* initialize udc */
fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
if (fotg210 == NULL)
return -ENOMEM;
fotg210->dev = dev;
fotg210->fotg = fotg;
fotg210->phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0);
if (IS_ERR(fotg210->phy)) {
ret = PTR_ERR(fotg210->phy);
if (ret == -EPROBE_DEFER)
goto err_free;
dev_info(dev, "no PHY found\n");
fotg210->phy = NULL;
} else {
ret = usb_phy_init(fotg210->phy);
if (ret)
goto err_free;
dev_info(dev, "found and initialized PHY\n");
}
ret = -ENOMEM;
for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
fotg210->ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
if (!fotg210->ep[i])
goto err_alloc;
}
fotg210->reg = fotg->base;
spin_lock_init(&fotg210->lock);
platform_set_drvdata(pdev, fotg210);
fotg210->gadget.ops = &fotg210_gadget_ops;
fotg210->gadget.max_speed = USB_SPEED_HIGH;
fotg210->gadget.dev.parent = dev;
fotg210->gadget.dev.dma_mask = dev->dma_mask;
fotg210->gadget.name = udc_name;
INIT_LIST_HEAD(&fotg210->gadget.ep_list);
for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
struct fotg210_ep *ep = fotg210->ep[i];
if (i) {
INIT_LIST_HEAD(&fotg210->ep[i]->ep.ep_list);
list_add_tail(&fotg210->ep[i]->ep.ep_list,
&fotg210->gadget.ep_list);
}
ep->fotg210 = fotg210;
INIT_LIST_HEAD(&ep->queue);
ep->ep.name = fotg210_ep_name[i];
ep->ep.ops = &fotg210_ep_ops;
usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
if (i == 0) {
ep->ep.caps.type_control = true;
} else {
ep->ep.caps.type_iso = true;
ep->ep.caps.type_bulk = true;
ep->ep.caps.type_int = true;
}
ep->ep.caps.dir_in = true;
ep->ep.caps.dir_out = true;
}
usb_ep_set_maxpacket_limit(&fotg210->ep[0]->ep, 0x40);
fotg210->gadget.ep0 = &fotg210->ep[0]->ep;
INIT_LIST_HEAD(&fotg210->gadget.ep0->ep_list);
fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep,
GFP_KERNEL);
if (fotg210->ep0_req == NULL)
goto err_map;
fotg210->ep0_req->complete = fotg210_ep0_complete;
fotg210_init(fotg210);
fotg210_disable_unplug(fotg210);
ret = request_irq(irq, fotg210_irq, IRQF_SHARED,
udc_name, fotg210);
if (ret < 0) {
dev_err_probe(dev, ret, "request_irq error\n");
goto err_req;
}
if (!IS_ERR_OR_NULL(fotg210->phy))
usb_register_notifier(fotg210->phy, &fotg210_phy_notifier);
ret = usb_add_gadget_udc(dev, &fotg210->gadget);
if (ret)
goto err_add_udc;
dev_info(dev, "version %s\n", DRIVER_VERSION);
return 0;
err_add_udc:
if (!IS_ERR_OR_NULL(fotg210->phy))
usb_unregister_notifier(fotg210->phy, &fotg210_phy_notifier);
free_irq(irq, fotg210);
err_req:
fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
err_map:
iounmap(fotg210->reg);
err_alloc:
for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
kfree(fotg210->ep[i]);
err_free:
kfree(fotg210);
return ret;
}
| linux-master | drivers/usb/fotg210/fotg210-udc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Central probing code for the FOTG210 dual role driver
* We register one driver for the hardware and then we decide
* whether to proceed with probing the host or the peripheral
* driver.
*/
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/usb.h>
#include <linux/usb/otg.h>
#include "fotg210.h"
/* Role Register 0x80 */
#define FOTG210_RR 0x80
#define FOTG210_RR_ID BIT(21) /* 1 = B-device, 0 = A-device */
#define FOTG210_RR_CROLE BIT(20) /* 1 = device, 0 = host */
/*
* Gemini-specific initialization function, only executed on the
* Gemini SoC using the global misc control register.
*
* The gemini USB blocks are connected to either Mini-A (host mode) or
* Mini-B (peripheral mode) plugs. There is no role switch support on the
* Gemini SoC, just either-or.
*/
#define GEMINI_GLOBAL_MISC_CTRL 0x30
#define GEMINI_MISC_USB0_WAKEUP BIT(14)
#define GEMINI_MISC_USB1_WAKEUP BIT(15)
#define GEMINI_MISC_USB0_VBUS_ON BIT(22)
#define GEMINI_MISC_USB1_VBUS_ON BIT(23)
#define GEMINI_MISC_USB0_MINI_B BIT(29)
#define GEMINI_MISC_USB1_MINI_B BIT(30)
static int fotg210_gemini_init(struct fotg210 *fotg, struct resource *res,
enum usb_dr_mode mode)
{
struct device *dev = fotg->dev;
struct device_node *np = dev->of_node;
struct regmap *map;
bool wakeup;
u32 mask, val;
int ret;
map = syscon_regmap_lookup_by_phandle(np, "syscon");
if (IS_ERR(map))
return dev_err_probe(dev, PTR_ERR(map), "no syscon\n");
fotg->map = map;
wakeup = of_property_read_bool(np, "wakeup-source");
/*
* Figure out if this is USB0 or USB1 by simply checking the
* physical base address.
*/
mask = 0;
if (res->start == 0x69000000) {
fotg->port = GEMINI_PORT_1;
mask = GEMINI_MISC_USB1_VBUS_ON | GEMINI_MISC_USB1_MINI_B |
GEMINI_MISC_USB1_WAKEUP;
if (mode == USB_DR_MODE_HOST)
val = GEMINI_MISC_USB1_VBUS_ON;
else
val = GEMINI_MISC_USB1_MINI_B;
if (wakeup)
val |= GEMINI_MISC_USB1_WAKEUP;
} else {
fotg->port = GEMINI_PORT_0;
mask = GEMINI_MISC_USB0_VBUS_ON | GEMINI_MISC_USB0_MINI_B |
GEMINI_MISC_USB0_WAKEUP;
if (mode == USB_DR_MODE_HOST)
val = GEMINI_MISC_USB0_VBUS_ON;
else
val = GEMINI_MISC_USB0_MINI_B;
if (wakeup)
val |= GEMINI_MISC_USB0_WAKEUP;
}
ret = regmap_update_bits(map, GEMINI_GLOBAL_MISC_CTRL, mask, val);
if (ret) {
dev_err(dev, "failed to initialize Gemini PHY\n");
return ret;
}
dev_info(dev, "initialized Gemini PHY in %s mode\n",
(mode == USB_DR_MODE_HOST) ? "host" : "gadget");
return 0;
}
/**
* fotg210_vbus() - Called by gadget driver to enable/disable VBUS
* @enable: true to enable VBUS, false to disable VBUS
*/
void fotg210_vbus(struct fotg210 *fotg, bool enable)
{
u32 mask;
u32 val;
int ret;
switch (fotg->port) {
case GEMINI_PORT_0:
mask = GEMINI_MISC_USB0_VBUS_ON;
val = enable ? GEMINI_MISC_USB0_VBUS_ON : 0;
break;
case GEMINI_PORT_1:
mask = GEMINI_MISC_USB1_VBUS_ON;
val = enable ? GEMINI_MISC_USB1_VBUS_ON : 0;
break;
default:
return;
}
ret = regmap_update_bits(fotg->map, GEMINI_GLOBAL_MISC_CTRL, mask, val);
if (ret)
dev_err(fotg->dev, "failed to %s VBUS\n",
enable ? "enable" : "disable");
dev_info(fotg->dev, "%s: %s VBUS\n", __func__, enable ? "enable" : "disable");
}
static int fotg210_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
enum usb_dr_mode mode;
struct fotg210 *fotg;
u32 val;
int ret;
fotg = devm_kzalloc(dev, sizeof(*fotg), GFP_KERNEL);
if (!fotg)
return -ENOMEM;
fotg->dev = dev;
fotg->base = devm_platform_get_and_ioremap_resource(pdev, 0, &fotg->res);
if (IS_ERR(fotg->base))
return PTR_ERR(fotg->base);
fotg->pclk = devm_clk_get_optional_enabled(dev, "PCLK");
if (IS_ERR(fotg->pclk))
return PTR_ERR(fotg->pclk);
mode = usb_get_dr_mode(dev);
if (of_device_is_compatible(dev->of_node, "cortina,gemini-usb")) {
ret = fotg210_gemini_init(fotg, fotg->res, mode);
if (ret)
return ret;
}
val = readl(fotg->base + FOTG210_RR);
if (mode == USB_DR_MODE_PERIPHERAL) {
if (!(val & FOTG210_RR_CROLE))
dev_err(dev, "block not in device role\n");
ret = fotg210_udc_probe(pdev, fotg);
} else {
if (val & FOTG210_RR_CROLE)
dev_err(dev, "block not in host role\n");
ret = fotg210_hcd_probe(pdev, fotg);
}
return ret;
}
static void fotg210_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
enum usb_dr_mode mode;
mode = usb_get_dr_mode(dev);
if (mode == USB_DR_MODE_PERIPHERAL)
fotg210_udc_remove(pdev);
else
fotg210_hcd_remove(pdev);
}
#ifdef CONFIG_OF
static const struct of_device_id fotg210_of_match[] = {
{ .compatible = "faraday,fotg200" },
{ .compatible = "faraday,fotg210" },
/* TODO: can we also handle FUSB220? */
{},
};
MODULE_DEVICE_TABLE(of, fotg210_of_match);
#endif
static struct platform_driver fotg210_driver = {
.driver = {
.name = "fotg210",
.of_match_table = of_match_ptr(fotg210_of_match),
},
.probe = fotg210_probe,
.remove_new = fotg210_remove,
};
static int __init fotg210_init(void)
{
if (IS_ENABLED(CONFIG_USB_FOTG210_HCD) && !usb_disabled())
fotg210_hcd_init();
return platform_driver_register(&fotg210_driver);
}
module_init(fotg210_init);
static void __exit fotg210_cleanup(void)
{
platform_driver_unregister(&fotg210_driver);
if (IS_ENABLED(CONFIG_USB_FOTG210_HCD))
fotg210_hcd_cleanup();
}
module_exit(fotg210_cleanup);
MODULE_AUTHOR("Yuan-Hsin Chen, Feng-Hsin Chiang");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FOTG210 Dual Role Controller Driver");
| linux-master | drivers/usb/fotg210/fotg210-core.c |
// SPDX-License-Identifier: GPL-2.0+
/* Faraday FOTG210 EHCI-like driver
*
* Copyright (c) 2013 Faraday Technology Corporation
*
* Author: Yuan-Hsin Chen <[email protected]>
* Feng-Hsin Chiang <[email protected]>
* Po-Yu Chuang <[email protected]>
*
* Most of code borrowed from the Linux-3.7 EHCI driver
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/device.h>
#include <linux/dmapool.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/hrtimer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <asm/byteorder.h>
#include <asm/irq.h>
#include <asm/unaligned.h>
#include "fotg210.h"
static const char hcd_name[] = "fotg210_hcd";
#undef FOTG210_URB_TRACE
#define FOTG210_STATS
/* magic numbers that can affect system performance */
#define FOTG210_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
#define FOTG210_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
#define FOTG210_TUNE_RL_TT 0
#define FOTG210_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
#define FOTG210_TUNE_MULT_TT 1
/* Some drivers think it's safe to schedule isochronous transfers more than 256
* ms into the future (partly as a result of an old bug in the scheduling
* code). In an attempt to avoid trouble, we will use a minimum scheduling
* length of 512 frames instead of 256.
*/
#define FOTG210_TUNE_FLS 1 /* (medium) 512-frame schedule */
/* Initial IRQ latency: faster than hw default */
static int log2_irq_thresh; /* 0 to 6 */
module_param(log2_irq_thresh, int, S_IRUGO);
MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
/* initial park setting: slower than hw default */
static unsigned park;
module_param(park, uint, S_IRUGO);
MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets");
/* for link power management(LPM) feature */
static unsigned int hird;
module_param(hird, int, S_IRUGO);
MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us");
#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
#include "fotg210-hcd.h"
#define fotg210_dbg(fotg210, fmt, args...) \
dev_dbg(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args)
#define fotg210_err(fotg210, fmt, args...) \
dev_err(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args)
#define fotg210_info(fotg210, fmt, args...) \
dev_info(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args)
#define fotg210_warn(fotg210, fmt, args...) \
dev_warn(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args)
/* check the values in the HCSPARAMS register (host controller _Structural_
* parameters) see EHCI spec, Table 2-4 for each value
*/
static void dbg_hcs_params(struct fotg210_hcd *fotg210, char *label)
{
u32 params = fotg210_readl(fotg210, &fotg210->caps->hcs_params);
fotg210_dbg(fotg210, "%s hcs_params 0x%x ports=%d\n", label, params,
HCS_N_PORTS(params));
}
/* check the values in the HCCPARAMS register (host controller _Capability_
* parameters) see EHCI Spec, Table 2-5 for each value
*/
static void dbg_hcc_params(struct fotg210_hcd *fotg210, char *label)
{
u32 params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
fotg210_dbg(fotg210, "%s hcc_params %04x uframes %s%s\n", label,
params,
HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
HCC_CANPARK(params) ? " park" : "");
}
static void __maybe_unused
dbg_qtd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd)
{
fotg210_dbg(fotg210, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd,
hc32_to_cpup(fotg210, &qtd->hw_next),
hc32_to_cpup(fotg210, &qtd->hw_alt_next),
hc32_to_cpup(fotg210, &qtd->hw_token),
hc32_to_cpup(fotg210, &qtd->hw_buf[0]));
if (qtd->hw_buf[1])
fotg210_dbg(fotg210, " p1=%08x p2=%08x p3=%08x p4=%08x\n",
hc32_to_cpup(fotg210, &qtd->hw_buf[1]),
hc32_to_cpup(fotg210, &qtd->hw_buf[2]),
hc32_to_cpup(fotg210, &qtd->hw_buf[3]),
hc32_to_cpup(fotg210, &qtd->hw_buf[4]));
}
static void __maybe_unused
dbg_qh(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
{
struct fotg210_qh_hw *hw = qh->hw;
fotg210_dbg(fotg210, "%s qh %p n%08x info %x %x qtd %x\n", label, qh,
hw->hw_next, hw->hw_info1, hw->hw_info2,
hw->hw_current);
dbg_qtd("overlay", fotg210, (struct fotg210_qtd *) &hw->hw_qtd_next);
}
static void __maybe_unused
dbg_itd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_itd *itd)
{
fotg210_dbg(fotg210, "%s[%d] itd %p, next %08x, urb %p\n", label,
itd->frame, itd, hc32_to_cpu(fotg210, itd->hw_next),
itd->urb);
fotg210_dbg(fotg210,
" trans: %08x %08x %08x %08x %08x %08x %08x %08x\n",
hc32_to_cpu(fotg210, itd->hw_transaction[0]),
hc32_to_cpu(fotg210, itd->hw_transaction[1]),
hc32_to_cpu(fotg210, itd->hw_transaction[2]),
hc32_to_cpu(fotg210, itd->hw_transaction[3]),
hc32_to_cpu(fotg210, itd->hw_transaction[4]),
hc32_to_cpu(fotg210, itd->hw_transaction[5]),
hc32_to_cpu(fotg210, itd->hw_transaction[6]),
hc32_to_cpu(fotg210, itd->hw_transaction[7]));
fotg210_dbg(fotg210,
" buf: %08x %08x %08x %08x %08x %08x %08x\n",
hc32_to_cpu(fotg210, itd->hw_bufp[0]),
hc32_to_cpu(fotg210, itd->hw_bufp[1]),
hc32_to_cpu(fotg210, itd->hw_bufp[2]),
hc32_to_cpu(fotg210, itd->hw_bufp[3]),
hc32_to_cpu(fotg210, itd->hw_bufp[4]),
hc32_to_cpu(fotg210, itd->hw_bufp[5]),
hc32_to_cpu(fotg210, itd->hw_bufp[6]));
fotg210_dbg(fotg210, " index: %d %d %d %d %d %d %d %d\n",
itd->index[0], itd->index[1], itd->index[2],
itd->index[3], itd->index[4], itd->index[5],
itd->index[6], itd->index[7]);
}
static int __maybe_unused
dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
{
return scnprintf(buf, len, "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
label, label[0] ? " " : "", status,
(status & STS_ASS) ? " Async" : "",
(status & STS_PSS) ? " Periodic" : "",
(status & STS_RECL) ? " Recl" : "",
(status & STS_HALT) ? " Halt" : "",
(status & STS_IAA) ? " IAA" : "",
(status & STS_FATAL) ? " FATAL" : "",
(status & STS_FLR) ? " FLR" : "",
(status & STS_PCD) ? " PCD" : "",
(status & STS_ERR) ? " ERR" : "",
(status & STS_INT) ? " INT" : "");
}
static int __maybe_unused
dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
{
return scnprintf(buf, len, "%s%sintrenable %02x%s%s%s%s%s%s",
label, label[0] ? " " : "", enable,
(enable & STS_IAA) ? " IAA" : "",
(enable & STS_FATAL) ? " FATAL" : "",
(enable & STS_FLR) ? " FLR" : "",
(enable & STS_PCD) ? " PCD" : "",
(enable & STS_ERR) ? " ERR" : "",
(enable & STS_INT) ? " INT" : "");
}
static const char *const fls_strings[] = { "1024", "512", "256", "??" };
static int dbg_command_buf(char *buf, unsigned len, const char *label,
u32 command)
{
return scnprintf(buf, len,
"%s%scommand %07x %s=%d ithresh=%d%s%s%s period=%s%s %s",
label, label[0] ? " " : "", command,
(command & CMD_PARK) ? " park" : "(park)",
CMD_PARK_CNT(command),
(command >> 16) & 0x3f,
(command & CMD_IAAD) ? " IAAD" : "",
(command & CMD_ASE) ? " Async" : "",
(command & CMD_PSE) ? " Periodic" : "",
fls_strings[(command >> 2) & 0x3],
(command & CMD_RESET) ? " Reset" : "",
(command & CMD_RUN) ? "RUN" : "HALT");
}
static char *dbg_port_buf(char *buf, unsigned len, const char *label, int port,
u32 status)
{
char *sig;
/* signaling state */
switch (status & (3 << 10)) {
case 0 << 10:
sig = "se0";
break;
case 1 << 10:
sig = "k";
break; /* low speed */
case 2 << 10:
sig = "j";
break;
default:
sig = "?";
break;
}
scnprintf(buf, len, "%s%sport:%d status %06x %d sig=%s%s%s%s%s%s%s%s",
label, label[0] ? " " : "", port, status,
status >> 25, /*device address */
sig,
(status & PORT_RESET) ? " RESET" : "",
(status & PORT_SUSPEND) ? " SUSPEND" : "",
(status & PORT_RESUME) ? " RESUME" : "",
(status & PORT_PEC) ? " PEC" : "",
(status & PORT_PE) ? " PE" : "",
(status & PORT_CSC) ? " CSC" : "",
(status & PORT_CONNECT) ? " CONNECT" : "");
return buf;
}
/* functions have the "wrong" filename when they're output... */
#define dbg_status(fotg210, label, status) { \
char _buf[80]; \
dbg_status_buf(_buf, sizeof(_buf), label, status); \
fotg210_dbg(fotg210, "%s\n", _buf); \
}
#define dbg_cmd(fotg210, label, command) { \
char _buf[80]; \
dbg_command_buf(_buf, sizeof(_buf), label, command); \
fotg210_dbg(fotg210, "%s\n", _buf); \
}
#define dbg_port(fotg210, label, port, status) { \
char _buf[80]; \
fotg210_dbg(fotg210, "%s\n", \
dbg_port_buf(_buf, sizeof(_buf), label, port, status));\
}
/* troubleshooting help: expose state in debugfs */
static int debug_async_open(struct inode *, struct file *);
static int debug_periodic_open(struct inode *, struct file *);
static int debug_registers_open(struct inode *, struct file *);
static int debug_async_open(struct inode *, struct file *);
static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
static int debug_close(struct inode *, struct file *);
static const struct file_operations debug_async_fops = {
.owner = THIS_MODULE,
.open = debug_async_open,
.read = debug_output,
.release = debug_close,
.llseek = default_llseek,
};
static const struct file_operations debug_periodic_fops = {
.owner = THIS_MODULE,
.open = debug_periodic_open,
.read = debug_output,
.release = debug_close,
.llseek = default_llseek,
};
static const struct file_operations debug_registers_fops = {
.owner = THIS_MODULE,
.open = debug_registers_open,
.read = debug_output,
.release = debug_close,
.llseek = default_llseek,
};
static struct dentry *fotg210_debug_root;
struct debug_buffer {
ssize_t (*fill_func)(struct debug_buffer *); /* fill method */
struct usb_bus *bus;
struct mutex mutex; /* protect filling of buffer */
size_t count; /* number of characters filled into buffer */
char *output_buf;
size_t alloc_size;
};
static inline char speed_char(u32 scratch)
{
switch (scratch & (3 << 12)) {
case QH_FULL_SPEED:
return 'f';
case QH_LOW_SPEED:
return 'l';
case QH_HIGH_SPEED:
return 'h';
default:
return '?';
}
}
static inline char token_mark(struct fotg210_hcd *fotg210, __hc32 token)
{
__u32 v = hc32_to_cpu(fotg210, token);
if (v & QTD_STS_ACTIVE)
return '*';
if (v & QTD_STS_HALT)
return '-';
if (!IS_SHORT_READ(v))
return ' ';
/* tries to advance through hw_alt_next */
return '/';
}
static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
char **nextp, unsigned *sizep)
{
u32 scratch;
u32 hw_curr;
struct fotg210_qtd *td;
unsigned temp;
unsigned size = *sizep;
char *next = *nextp;
char mark;
__le32 list_end = FOTG210_LIST_END(fotg210);
struct fotg210_qh_hw *hw = qh->hw;
if (hw->hw_qtd_next == list_end) /* NEC does this */
mark = '@';
else
mark = token_mark(fotg210, hw->hw_token);
if (mark == '/') { /* qh_alt_next controls qh advance? */
if ((hw->hw_alt_next & QTD_MASK(fotg210)) ==
fotg210->async->hw->hw_alt_next)
mark = '#'; /* blocked */
else if (hw->hw_alt_next == list_end)
mark = '.'; /* use hw_qtd_next */
/* else alt_next points to some other qtd */
}
scratch = hc32_to_cpup(fotg210, &hw->hw_info1);
hw_curr = (mark == '*') ? hc32_to_cpup(fotg210, &hw->hw_current) : 0;
temp = scnprintf(next, size,
"qh/%p dev%d %cs ep%d %08x %08x(%08x%c %s nak%d)",
qh, scratch & 0x007f,
speed_char(scratch),
(scratch >> 8) & 0x000f,
scratch, hc32_to_cpup(fotg210, &hw->hw_info2),
hc32_to_cpup(fotg210, &hw->hw_token), mark,
(cpu_to_hc32(fotg210, QTD_TOGGLE) & hw->hw_token)
? "data1" : "data0",
(hc32_to_cpup(fotg210, &hw->hw_alt_next) >> 1) & 0x0f);
size -= temp;
next += temp;
/* hc may be modifying the list as we read it ... */
list_for_each_entry(td, &qh->qtd_list, qtd_list) {
scratch = hc32_to_cpup(fotg210, &td->hw_token);
mark = ' ';
if (hw_curr == td->qtd_dma)
mark = '*';
else if (hw->hw_qtd_next == cpu_to_hc32(fotg210, td->qtd_dma))
mark = '+';
else if (QTD_LENGTH(scratch)) {
if (td->hw_alt_next == fotg210->async->hw->hw_alt_next)
mark = '#';
else if (td->hw_alt_next != list_end)
mark = '/';
}
temp = snprintf(next, size,
"\n\t%p%c%s len=%d %08x urb %p",
td, mark, ({ char *tmp;
switch ((scratch>>8)&0x03) {
case 0:
tmp = "out";
break;
case 1:
tmp = "in";
break;
case 2:
tmp = "setup";
break;
default:
tmp = "?";
break;
} tmp; }),
(scratch >> 16) & 0x7fff,
scratch,
td->urb);
if (size < temp)
temp = size;
size -= temp;
next += temp;
if (temp == size)
goto done;
}
temp = snprintf(next, size, "\n");
if (size < temp)
temp = size;
size -= temp;
next += temp;
done:
*sizep = size;
*nextp = next;
}
static ssize_t fill_async_buffer(struct debug_buffer *buf)
{
struct usb_hcd *hcd;
struct fotg210_hcd *fotg210;
unsigned long flags;
unsigned temp, size;
char *next;
struct fotg210_qh *qh;
hcd = bus_to_hcd(buf->bus);
fotg210 = hcd_to_fotg210(hcd);
next = buf->output_buf;
size = buf->alloc_size;
*next = 0;
/* dumps a snapshot of the async schedule.
* usually empty except for long-term bulk reads, or head.
* one QH per line, and TDs we know about
*/
spin_lock_irqsave(&fotg210->lock, flags);
for (qh = fotg210->async->qh_next.qh; size > 0 && qh;
qh = qh->qh_next.qh)
qh_lines(fotg210, qh, &next, &size);
if (fotg210->async_unlink && size > 0) {
temp = scnprintf(next, size, "\nunlink =\n");
size -= temp;
next += temp;
for (qh = fotg210->async_unlink; size > 0 && qh;
qh = qh->unlink_next)
qh_lines(fotg210, qh, &next, &size);
}
spin_unlock_irqrestore(&fotg210->lock, flags);
return strlen(buf->output_buf);
}
/* count tds, get ep direction */
static unsigned output_buf_tds_dir(char *buf, struct fotg210_hcd *fotg210,
struct fotg210_qh_hw *hw, struct fotg210_qh *qh, unsigned size)
{
u32 scratch = hc32_to_cpup(fotg210, &hw->hw_info1);
struct fotg210_qtd *qtd;
char *type = "";
unsigned temp = 0;
/* count tds, get ep direction */
list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
temp++;
switch ((hc32_to_cpu(fotg210, qtd->hw_token) >> 8) & 0x03) {
case 0:
type = "out";
continue;
case 1:
type = "in";
continue;
}
}
return scnprintf(buf, size, "(%c%d ep%d%s [%d/%d] q%d p%d)",
speed_char(scratch), scratch & 0x007f,
(scratch >> 8) & 0x000f, type, qh->usecs,
qh->c_usecs, temp, (scratch >> 16) & 0x7ff);
}
#define DBG_SCHED_LIMIT 64
static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
{
struct usb_hcd *hcd;
struct fotg210_hcd *fotg210;
unsigned long flags;
union fotg210_shadow p, *seen;
unsigned temp, size, seen_count;
char *next;
unsigned i;
__hc32 tag;
seen = kmalloc_array(DBG_SCHED_LIMIT, sizeof(*seen), GFP_ATOMIC);
if (!seen)
return 0;
seen_count = 0;
hcd = bus_to_hcd(buf->bus);
fotg210 = hcd_to_fotg210(hcd);
next = buf->output_buf;
size = buf->alloc_size;
temp = scnprintf(next, size, "size = %d\n", fotg210->periodic_size);
size -= temp;
next += temp;
/* dump a snapshot of the periodic schedule.
* iso changes, interrupt usually doesn't.
*/
spin_lock_irqsave(&fotg210->lock, flags);
for (i = 0; i < fotg210->periodic_size; i++) {
p = fotg210->pshadow[i];
if (likely(!p.ptr))
continue;
tag = Q_NEXT_TYPE(fotg210, fotg210->periodic[i]);
temp = scnprintf(next, size, "%4d: ", i);
size -= temp;
next += temp;
do {
struct fotg210_qh_hw *hw;
switch (hc32_to_cpu(fotg210, tag)) {
case Q_TYPE_QH:
hw = p.qh->hw;
temp = scnprintf(next, size, " qh%d-%04x/%p",
p.qh->period,
hc32_to_cpup(fotg210,
&hw->hw_info2)
/* uframe masks */
& (QH_CMASK | QH_SMASK),
p.qh);
size -= temp;
next += temp;
/* don't repeat what follows this qh */
for (temp = 0; temp < seen_count; temp++) {
if (seen[temp].ptr != p.ptr)
continue;
if (p.qh->qh_next.ptr) {
temp = scnprintf(next, size,
" ...");
size -= temp;
next += temp;
}
break;
}
/* show more info the first time around */
if (temp == seen_count) {
temp = output_buf_tds_dir(next,
fotg210, hw,
p.qh, size);
if (seen_count < DBG_SCHED_LIMIT)
seen[seen_count++].qh = p.qh;
} else
temp = 0;
tag = Q_NEXT_TYPE(fotg210, hw->hw_next);
p = p.qh->qh_next;
break;
case Q_TYPE_FSTN:
temp = scnprintf(next, size,
" fstn-%8x/%p",
p.fstn->hw_prev, p.fstn);
tag = Q_NEXT_TYPE(fotg210, p.fstn->hw_next);
p = p.fstn->fstn_next;
break;
case Q_TYPE_ITD:
temp = scnprintf(next, size,
" itd/%p", p.itd);
tag = Q_NEXT_TYPE(fotg210, p.itd->hw_next);
p = p.itd->itd_next;
break;
}
size -= temp;
next += temp;
} while (p.ptr);
temp = scnprintf(next, size, "\n");
size -= temp;
next += temp;
}
spin_unlock_irqrestore(&fotg210->lock, flags);
kfree(seen);
return buf->alloc_size - size;
}
#undef DBG_SCHED_LIMIT
static const char *rh_state_string(struct fotg210_hcd *fotg210)
{
switch (fotg210->rh_state) {
case FOTG210_RH_HALTED:
return "halted";
case FOTG210_RH_SUSPENDED:
return "suspended";
case FOTG210_RH_RUNNING:
return "running";
case FOTG210_RH_STOPPING:
return "stopping";
}
return "?";
}
static ssize_t fill_registers_buffer(struct debug_buffer *buf)
{
struct usb_hcd *hcd;
struct fotg210_hcd *fotg210;
unsigned long flags;
unsigned temp, size, i;
char *next, scratch[80];
static const char fmt[] = "%*s\n";
static const char label[] = "";
hcd = bus_to_hcd(buf->bus);
fotg210 = hcd_to_fotg210(hcd);
next = buf->output_buf;
size = buf->alloc_size;
spin_lock_irqsave(&fotg210->lock, flags);
if (!HCD_HW_ACCESSIBLE(hcd)) {
size = scnprintf(next, size,
"bus %s, device %s\n"
"%s\n"
"SUSPENDED(no register access)\n",
hcd->self.controller->bus->name,
dev_name(hcd->self.controller),
hcd->product_desc);
goto done;
}
/* Capability Registers */
i = HC_VERSION(fotg210, fotg210_readl(fotg210,
&fotg210->caps->hc_capbase));
temp = scnprintf(next, size,
"bus %s, device %s\n"
"%s\n"
"EHCI %x.%02x, rh state %s\n",
hcd->self.controller->bus->name,
dev_name(hcd->self.controller),
hcd->product_desc,
i >> 8, i & 0x0ff, rh_state_string(fotg210));
size -= temp;
next += temp;
/* FIXME interpret both types of params */
i = fotg210_readl(fotg210, &fotg210->caps->hcs_params);
temp = scnprintf(next, size, "structural params 0x%08x\n", i);
size -= temp;
next += temp;
i = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
temp = scnprintf(next, size, "capability params 0x%08x\n", i);
size -= temp;
next += temp;
/* Operational Registers */
temp = dbg_status_buf(scratch, sizeof(scratch), label,
fotg210_readl(fotg210, &fotg210->regs->status));
temp = scnprintf(next, size, fmt, temp, scratch);
size -= temp;
next += temp;
temp = dbg_command_buf(scratch, sizeof(scratch), label,
fotg210_readl(fotg210, &fotg210->regs->command));
temp = scnprintf(next, size, fmt, temp, scratch);
size -= temp;
next += temp;
temp = dbg_intr_buf(scratch, sizeof(scratch), label,
fotg210_readl(fotg210, &fotg210->regs->intr_enable));
temp = scnprintf(next, size, fmt, temp, scratch);
size -= temp;
next += temp;
temp = scnprintf(next, size, "uframe %04x\n",
fotg210_read_frame_index(fotg210));
size -= temp;
next += temp;
if (fotg210->async_unlink) {
temp = scnprintf(next, size, "async unlink qh %p\n",
fotg210->async_unlink);
size -= temp;
next += temp;
}
#ifdef FOTG210_STATS
temp = scnprintf(next, size,
"irq normal %ld err %ld iaa %ld(lost %ld)\n",
fotg210->stats.normal, fotg210->stats.error,
fotg210->stats.iaa, fotg210->stats.lost_iaa);
size -= temp;
next += temp;
temp = scnprintf(next, size, "complete %ld unlink %ld\n",
fotg210->stats.complete, fotg210->stats.unlink);
size -= temp;
next += temp;
#endif
done:
spin_unlock_irqrestore(&fotg210->lock, flags);
return buf->alloc_size - size;
}
static struct debug_buffer
*alloc_buffer(struct usb_bus *bus, ssize_t (*fill_func)(struct debug_buffer *))
{
struct debug_buffer *buf;
buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
if (buf) {
buf->bus = bus;
buf->fill_func = fill_func;
mutex_init(&buf->mutex);
buf->alloc_size = PAGE_SIZE;
}
return buf;
}
static int fill_buffer(struct debug_buffer *buf)
{
int ret = 0;
if (!buf->output_buf)
buf->output_buf = vmalloc(buf->alloc_size);
if (!buf->output_buf) {
ret = -ENOMEM;
goto out;
}
ret = buf->fill_func(buf);
if (ret >= 0) {
buf->count = ret;
ret = 0;
}
out:
return ret;
}
static ssize_t debug_output(struct file *file, char __user *user_buf,
size_t len, loff_t *offset)
{
struct debug_buffer *buf = file->private_data;
int ret = 0;
mutex_lock(&buf->mutex);
if (buf->count == 0) {
ret = fill_buffer(buf);
if (ret != 0) {
mutex_unlock(&buf->mutex);
goto out;
}
}
mutex_unlock(&buf->mutex);
ret = simple_read_from_buffer(user_buf, len, offset,
buf->output_buf, buf->count);
out:
return ret;
}
static int debug_close(struct inode *inode, struct file *file)
{
struct debug_buffer *buf = file->private_data;
if (buf) {
vfree(buf->output_buf);
kfree(buf);
}
return 0;
}
static int debug_async_open(struct inode *inode, struct file *file)
{
file->private_data = alloc_buffer(inode->i_private, fill_async_buffer);
return file->private_data ? 0 : -ENOMEM;
}
static int debug_periodic_open(struct inode *inode, struct file *file)
{
struct debug_buffer *buf;
buf = alloc_buffer(inode->i_private, fill_periodic_buffer);
if (!buf)
return -ENOMEM;
buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8)*PAGE_SIZE;
file->private_data = buf;
return 0;
}
static int debug_registers_open(struct inode *inode, struct file *file)
{
file->private_data = alloc_buffer(inode->i_private,
fill_registers_buffer);
return file->private_data ? 0 : -ENOMEM;
}
static inline void create_debug_files(struct fotg210_hcd *fotg210)
{
struct usb_bus *bus = &fotg210_to_hcd(fotg210)->self;
struct dentry *root;
root = debugfs_create_dir(bus->bus_name, fotg210_debug_root);
debugfs_create_file("async", S_IRUGO, root, bus, &debug_async_fops);
debugfs_create_file("periodic", S_IRUGO, root, bus,
&debug_periodic_fops);
debugfs_create_file("registers", S_IRUGO, root, bus,
&debug_registers_fops);
}
static inline void remove_debug_files(struct fotg210_hcd *fotg210)
{
struct usb_bus *bus = &fotg210_to_hcd(fotg210)->self;
debugfs_lookup_and_remove(bus->bus_name, fotg210_debug_root);
}
/* handshake - spin reading hc until handshake completes or fails
* @ptr: address of hc register to be read
* @mask: bits to look at in result of read
* @done: value of those bits when handshake succeeds
* @usec: timeout in microseconds
*
* Returns negative errno, or zero on success
*
* Success happens when the "mask" bits have the specified value (hardware
* handshake done). There are two failure modes: "usec" have passed (major
* hardware flakeout), or the register reads as all-ones (hardware removed).
*
* That last failure should_only happen in cases like physical cardbus eject
* before driver shutdown. But it also seems to be caused by bugs in cardbus
* bridge shutdown: shutting down the bridge before the devices using it.
*/
static int handshake(struct fotg210_hcd *fotg210, void __iomem *ptr,
u32 mask, u32 done, int usec)
{
u32 result;
int ret;
ret = readl_poll_timeout_atomic(ptr, result,
((result & mask) == done ||
result == U32_MAX), 1, usec);
if (result == U32_MAX) /* card removed */
return -ENODEV;
return ret;
}
/* Force HC to halt state from unknown (EHCI spec section 2.3).
* Must be called with interrupts enabled and the lock not held.
*/
static int fotg210_halt(struct fotg210_hcd *fotg210)
{
u32 temp;
spin_lock_irq(&fotg210->lock);
/* disable any irqs left enabled by previous code */
fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable);
/*
* This routine gets called during probe before fotg210->command
* has been initialized, so we can't rely on its value.
*/
fotg210->command &= ~CMD_RUN;
temp = fotg210_readl(fotg210, &fotg210->regs->command);
temp &= ~(CMD_RUN | CMD_IAAD);
fotg210_writel(fotg210, temp, &fotg210->regs->command);
spin_unlock_irq(&fotg210->lock);
synchronize_irq(fotg210_to_hcd(fotg210)->irq);
return handshake(fotg210, &fotg210->regs->status,
STS_HALT, STS_HALT, 16 * 125);
}
/* Reset a non-running (STS_HALT == 1) controller.
* Must be called with interrupts enabled and the lock not held.
*/
static int fotg210_reset(struct fotg210_hcd *fotg210)
{
int retval;
u32 command = fotg210_readl(fotg210, &fotg210->regs->command);
/* If the EHCI debug controller is active, special care must be
* taken before and after a host controller reset
*/
if (fotg210->debug && !dbgp_reset_prep(fotg210_to_hcd(fotg210)))
fotg210->debug = NULL;
command |= CMD_RESET;
dbg_cmd(fotg210, "reset", command);
fotg210_writel(fotg210, command, &fotg210->regs->command);
fotg210->rh_state = FOTG210_RH_HALTED;
fotg210->next_statechange = jiffies;
retval = handshake(fotg210, &fotg210->regs->command,
CMD_RESET, 0, 250 * 1000);
if (retval)
return retval;
if (fotg210->debug)
dbgp_external_startup(fotg210_to_hcd(fotg210));
fotg210->port_c_suspend = fotg210->suspended_ports =
fotg210->resuming_ports = 0;
return retval;
}
/* Idle the controller (turn off the schedules).
* Must be called with interrupts enabled and the lock not held.
*/
static void fotg210_quiesce(struct fotg210_hcd *fotg210)
{
u32 temp;
if (fotg210->rh_state != FOTG210_RH_RUNNING)
return;
/* wait for any schedule enables/disables to take effect */
temp = (fotg210->command << 10) & (STS_ASS | STS_PSS);
handshake(fotg210, &fotg210->regs->status, STS_ASS | STS_PSS, temp,
16 * 125);
/* then disable anything that's still active */
spin_lock_irq(&fotg210->lock);
fotg210->command &= ~(CMD_ASE | CMD_PSE);
fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
spin_unlock_irq(&fotg210->lock);
/* hardware can take 16 microframes to turn off ... */
handshake(fotg210, &fotg210->regs->status, STS_ASS | STS_PSS, 0,
16 * 125);
}
static void end_unlink_async(struct fotg210_hcd *fotg210);
static void unlink_empty_async(struct fotg210_hcd *fotg210);
static void fotg210_work(struct fotg210_hcd *fotg210);
static void start_unlink_intr(struct fotg210_hcd *fotg210,
struct fotg210_qh *qh);
static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
/* Set a bit in the USBCMD register */
static void fotg210_set_command_bit(struct fotg210_hcd *fotg210, u32 bit)
{
fotg210->command |= bit;
fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
/* unblock posted write */
fotg210_readl(fotg210, &fotg210->regs->command);
}
/* Clear a bit in the USBCMD register */
static void fotg210_clear_command_bit(struct fotg210_hcd *fotg210, u32 bit)
{
fotg210->command &= ~bit;
fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
/* unblock posted write */
fotg210_readl(fotg210, &fotg210->regs->command);
}
/* EHCI timer support... Now using hrtimers.
*
* Lots of different events are triggered from fotg210->hrtimer. Whenever
* the timer routine runs, it checks each possible event; events that are
* currently enabled and whose expiration time has passed get handled.
* The set of enabled events is stored as a collection of bitflags in
* fotg210->enabled_hrtimer_events, and they are numbered in order of
* increasing delay values (ranging between 1 ms and 100 ms).
*
* Rather than implementing a sorted list or tree of all pending events,
* we keep track only of the lowest-numbered pending event, in
* fotg210->next_hrtimer_event. Whenever fotg210->hrtimer gets restarted, its
* expiration time is set to the timeout value for this event.
*
* As a result, events might not get handled right away; the actual delay
* could be anywhere up to twice the requested delay. This doesn't
* matter, because none of the events are especially time-critical. The
* ones that matter most all have a delay of 1 ms, so they will be
* handled after 2 ms at most, which is okay. In addition to this, we
* allow for an expiration range of 1 ms.
*/
/* Delay lengths for the hrtimer event types.
* Keep this list sorted by delay length, in the same order as
* the event types indexed by enum fotg210_hrtimer_event in fotg210.h.
*/
static unsigned event_delays_ns[] = {
1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_ASS */
1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_PSS */
1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_DEAD */
1125 * NSEC_PER_USEC, /* FOTG210_HRTIMER_UNLINK_INTR */
2 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_FREE_ITDS */
6 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_ASYNC_UNLINKS */
10 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_IAA_WATCHDOG */
10 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_DISABLE_PERIODIC */
15 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_DISABLE_ASYNC */
100 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_IO_WATCHDOG */
};
/* Enable a pending hrtimer event */
static void fotg210_enable_event(struct fotg210_hcd *fotg210, unsigned event,
bool resched)
{
ktime_t *timeout = &fotg210->hr_timeouts[event];
if (resched)
*timeout = ktime_add(ktime_get(), event_delays_ns[event]);
fotg210->enabled_hrtimer_events |= (1 << event);
/* Track only the lowest-numbered pending event */
if (event < fotg210->next_hrtimer_event) {
fotg210->next_hrtimer_event = event;
hrtimer_start_range_ns(&fotg210->hrtimer, *timeout,
NSEC_PER_MSEC, HRTIMER_MODE_ABS);
}
}
/* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */
static void fotg210_poll_ASS(struct fotg210_hcd *fotg210)
{
unsigned actual, want;
/* Don't enable anything if the controller isn't running (e.g., died) */
if (fotg210->rh_state != FOTG210_RH_RUNNING)
return;
want = (fotg210->command & CMD_ASE) ? STS_ASS : 0;
actual = fotg210_readl(fotg210, &fotg210->regs->status) & STS_ASS;
if (want != actual) {
/* Poll again later, but give up after about 20 ms */
if (fotg210->ASS_poll_count++ < 20) {
fotg210_enable_event(fotg210, FOTG210_HRTIMER_POLL_ASS,
true);
return;
}
fotg210_dbg(fotg210, "Waited too long for the async schedule status (%x/%x), giving up\n",
want, actual);
}
fotg210->ASS_poll_count = 0;
/* The status is up-to-date; restart or stop the schedule as needed */
if (want == 0) { /* Stopped */
if (fotg210->async_count > 0)
fotg210_set_command_bit(fotg210, CMD_ASE);
} else { /* Running */
if (fotg210->async_count == 0) {
/* Turn off the schedule after a while */
fotg210_enable_event(fotg210,
FOTG210_HRTIMER_DISABLE_ASYNC,
true);
}
}
}
/* Turn off the async schedule after a brief delay */
static void fotg210_disable_ASE(struct fotg210_hcd *fotg210)
{
fotg210_clear_command_bit(fotg210, CMD_ASE);
}
/* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */
static void fotg210_poll_PSS(struct fotg210_hcd *fotg210)
{
unsigned actual, want;
/* Don't do anything if the controller isn't running (e.g., died) */
if (fotg210->rh_state != FOTG210_RH_RUNNING)
return;
want = (fotg210->command & CMD_PSE) ? STS_PSS : 0;
actual = fotg210_readl(fotg210, &fotg210->regs->status) & STS_PSS;
if (want != actual) {
/* Poll again later, but give up after about 20 ms */
if (fotg210->PSS_poll_count++ < 20) {
fotg210_enable_event(fotg210, FOTG210_HRTIMER_POLL_PSS,
true);
return;
}
fotg210_dbg(fotg210, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
want, actual);
}
fotg210->PSS_poll_count = 0;
/* The status is up-to-date; restart or stop the schedule as needed */
if (want == 0) { /* Stopped */
if (fotg210->periodic_count > 0)
fotg210_set_command_bit(fotg210, CMD_PSE);
} else { /* Running */
if (fotg210->periodic_count == 0) {
/* Turn off the schedule after a while */
fotg210_enable_event(fotg210,
FOTG210_HRTIMER_DISABLE_PERIODIC,
true);
}
}
}
/* Turn off the periodic schedule after a brief delay */
static void fotg210_disable_PSE(struct fotg210_hcd *fotg210)
{
fotg210_clear_command_bit(fotg210, CMD_PSE);
}
/* Poll the STS_HALT status bit; see when a dead controller stops */
static void fotg210_handle_controller_death(struct fotg210_hcd *fotg210)
{
if (!(fotg210_readl(fotg210, &fotg210->regs->status) & STS_HALT)) {
/* Give up after a few milliseconds */
if (fotg210->died_poll_count++ < 5) {
/* Try again later */
fotg210_enable_event(fotg210,
FOTG210_HRTIMER_POLL_DEAD, true);
return;
}
fotg210_warn(fotg210, "Waited too long for the controller to stop, giving up\n");
}
/* Clean up the mess */
fotg210->rh_state = FOTG210_RH_HALTED;
fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable);
fotg210_work(fotg210);
end_unlink_async(fotg210);
/* Not in process context, so don't try to reset the controller */
}
/* Handle unlinked interrupt QHs once they are gone from the hardware */
static void fotg210_handle_intr_unlinks(struct fotg210_hcd *fotg210)
{
bool stopped = (fotg210->rh_state < FOTG210_RH_RUNNING);
/*
* Process all the QHs on the intr_unlink list that were added
* before the current unlink cycle began. The list is in
* temporal order, so stop when we reach the first entry in the
* current cycle. But if the root hub isn't running then
* process all the QHs on the list.
*/
fotg210->intr_unlinking = true;
while (fotg210->intr_unlink) {
struct fotg210_qh *qh = fotg210->intr_unlink;
if (!stopped && qh->unlink_cycle == fotg210->intr_unlink_cycle)
break;
fotg210->intr_unlink = qh->unlink_next;
qh->unlink_next = NULL;
end_unlink_intr(fotg210, qh);
}
/* Handle remaining entries later */
if (fotg210->intr_unlink) {
fotg210_enable_event(fotg210, FOTG210_HRTIMER_UNLINK_INTR,
true);
++fotg210->intr_unlink_cycle;
}
fotg210->intr_unlinking = false;
}
/* Start another free-iTDs/siTDs cycle */
static void start_free_itds(struct fotg210_hcd *fotg210)
{
if (!(fotg210->enabled_hrtimer_events &
BIT(FOTG210_HRTIMER_FREE_ITDS))) {
fotg210->last_itd_to_free = list_entry(
fotg210->cached_itd_list.prev,
struct fotg210_itd, itd_list);
fotg210_enable_event(fotg210, FOTG210_HRTIMER_FREE_ITDS, true);
}
}
/* Wait for controller to stop using old iTDs and siTDs */
static void end_free_itds(struct fotg210_hcd *fotg210)
{
struct fotg210_itd *itd, *n;
if (fotg210->rh_state < FOTG210_RH_RUNNING)
fotg210->last_itd_to_free = NULL;
list_for_each_entry_safe(itd, n, &fotg210->cached_itd_list, itd_list) {
list_del(&itd->itd_list);
dma_pool_free(fotg210->itd_pool, itd, itd->itd_dma);
if (itd == fotg210->last_itd_to_free)
break;
}
if (!list_empty(&fotg210->cached_itd_list))
start_free_itds(fotg210);
}
/* Handle lost (or very late) IAA interrupts */
static void fotg210_iaa_watchdog(struct fotg210_hcd *fotg210)
{
if (fotg210->rh_state != FOTG210_RH_RUNNING)
return;
/*
* Lost IAA irqs wedge things badly; seen first with a vt8235.
* So we need this watchdog, but must protect it against both
* (a) SMP races against real IAA firing and retriggering, and
* (b) clean HC shutdown, when IAA watchdog was pending.
*/
if (fotg210->async_iaa) {
u32 cmd, status;
/* If we get here, IAA is *REALLY* late. It's barely
* conceivable that the system is so busy that CMD_IAAD
* is still legitimately set, so let's be sure it's
* clear before we read STS_IAA. (The HC should clear
* CMD_IAAD when it sets STS_IAA.)
*/
cmd = fotg210_readl(fotg210, &fotg210->regs->command);
/*
* If IAA is set here it either legitimately triggered
* after the watchdog timer expired (_way_ late, so we'll
* still count it as lost) ... or a silicon erratum:
* - VIA seems to set IAA without triggering the IRQ;
* - IAAD potentially cleared without setting IAA.
*/
status = fotg210_readl(fotg210, &fotg210->regs->status);
if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
INCR(fotg210->stats.lost_iaa);
fotg210_writel(fotg210, STS_IAA,
&fotg210->regs->status);
}
fotg210_dbg(fotg210, "IAA watchdog: status %x cmd %x\n",
status, cmd);
end_unlink_async(fotg210);
}
}
/* Enable the I/O watchdog, if appropriate */
static void turn_on_io_watchdog(struct fotg210_hcd *fotg210)
{
/* Not needed if the controller isn't running or it's already enabled */
if (fotg210->rh_state != FOTG210_RH_RUNNING ||
(fotg210->enabled_hrtimer_events &
BIT(FOTG210_HRTIMER_IO_WATCHDOG)))
return;
/*
* Isochronous transfers always need the watchdog.
* For other sorts we use it only if the flag is set.
*/
if (fotg210->isoc_count > 0 || (fotg210->need_io_watchdog &&
fotg210->async_count + fotg210->intr_count > 0))
fotg210_enable_event(fotg210, FOTG210_HRTIMER_IO_WATCHDOG,
true);
}
/* Handler functions for the hrtimer event types.
* Keep this array in the same order as the event types indexed by
* enum fotg210_hrtimer_event in fotg210.h.
*/
static void (*event_handlers[])(struct fotg210_hcd *) = {
fotg210_poll_ASS, /* FOTG210_HRTIMER_POLL_ASS */
fotg210_poll_PSS, /* FOTG210_HRTIMER_POLL_PSS */
fotg210_handle_controller_death, /* FOTG210_HRTIMER_POLL_DEAD */
fotg210_handle_intr_unlinks, /* FOTG210_HRTIMER_UNLINK_INTR */
end_free_itds, /* FOTG210_HRTIMER_FREE_ITDS */
unlink_empty_async, /* FOTG210_HRTIMER_ASYNC_UNLINKS */
fotg210_iaa_watchdog, /* FOTG210_HRTIMER_IAA_WATCHDOG */
fotg210_disable_PSE, /* FOTG210_HRTIMER_DISABLE_PERIODIC */
fotg210_disable_ASE, /* FOTG210_HRTIMER_DISABLE_ASYNC */
fotg210_work, /* FOTG210_HRTIMER_IO_WATCHDOG */
};
static enum hrtimer_restart fotg210_hrtimer_func(struct hrtimer *t)
{
struct fotg210_hcd *fotg210 =
container_of(t, struct fotg210_hcd, hrtimer);
ktime_t now;
unsigned long events;
unsigned long flags;
unsigned e;
spin_lock_irqsave(&fotg210->lock, flags);
events = fotg210->enabled_hrtimer_events;
fotg210->enabled_hrtimer_events = 0;
fotg210->next_hrtimer_event = FOTG210_HRTIMER_NO_EVENT;
/*
* Check each pending event. If its time has expired, handle
* the event; otherwise re-enable it.
*/
now = ktime_get();
for_each_set_bit(e, &events, FOTG210_HRTIMER_NUM_EVENTS) {
if (ktime_compare(now, fotg210->hr_timeouts[e]) >= 0)
event_handlers[e](fotg210);
else
fotg210_enable_event(fotg210, e, false);
}
spin_unlock_irqrestore(&fotg210->lock, flags);
return HRTIMER_NORESTART;
}
#define fotg210_bus_suspend NULL
#define fotg210_bus_resume NULL
static int check_reset_complete(struct fotg210_hcd *fotg210, int index,
u32 __iomem *status_reg, int port_status)
{
if (!(port_status & PORT_CONNECT))
return port_status;
/* if reset finished and it's still not enabled -- handoff */
if (!(port_status & PORT_PE))
/* with integrated TT, there's nobody to hand it to! */
fotg210_dbg(fotg210, "Failed to enable port %d on root hub TT\n",
index + 1);
else
fotg210_dbg(fotg210, "port %d reset complete, port enabled\n",
index + 1);
return port_status;
}
/* build "status change" packet (one or two bytes) from HC registers */
static int fotg210_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
u32 temp, status;
u32 mask;
int retval = 1;
unsigned long flags;
/* init status to no-changes */
buf[0] = 0;
/* Inform the core about resumes-in-progress by returning
* a non-zero value even if there are no status changes.
*/
status = fotg210->resuming_ports;
mask = PORT_CSC | PORT_PEC;
/* PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND */
/* no hub change reports (bit 0) for now (power, ...) */
/* port N changes (bit N)? */
spin_lock_irqsave(&fotg210->lock, flags);
temp = fotg210_readl(fotg210, &fotg210->regs->port_status);
/*
* Return status information even for ports with OWNER set.
* Otherwise hub_wq wouldn't see the disconnect event when a
* high-speed device is switched over to the companion
* controller by the user.
*/
if ((temp & mask) != 0 || test_bit(0, &fotg210->port_c_suspend) ||
(fotg210->reset_done[0] &&
time_after_eq(jiffies, fotg210->reset_done[0]))) {
buf[0] |= 1 << 1;
status = STS_PCD;
}
/* FIXME autosuspend idle root hubs */
spin_unlock_irqrestore(&fotg210->lock, flags);
return status ? retval : 0;
}
static void fotg210_hub_descriptor(struct fotg210_hcd *fotg210,
struct usb_hub_descriptor *desc)
{
int ports = HCS_N_PORTS(fotg210->hcs_params);
u16 temp;
desc->bDescriptorType = USB_DT_HUB;
desc->bPwrOn2PwrGood = 10; /* fotg210 1.0, 2.3.9 says 20ms max */
desc->bHubContrCurrent = 0;
desc->bNbrPorts = ports;
temp = 1 + (ports / 8);
desc->bDescLength = 7 + 2 * temp;
/* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
temp = HUB_CHAR_INDV_PORT_OCPM; /* per-port overcurrent reporting */
temp |= HUB_CHAR_NO_LPSM; /* no power switching */
desc->wHubCharacteristics = cpu_to_le16(temp);
}
static int fotg210_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
int ports = HCS_N_PORTS(fotg210->hcs_params);
u32 __iomem *status_reg = &fotg210->regs->port_status;
u32 temp, temp1, status;
unsigned long flags;
int retval = 0;
unsigned selector;
/*
* FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
* HCS_INDICATOR may say we can change LEDs to off/amber/green.
* (track current state ourselves) ... blink for diagnostics,
* power, "this is the one", etc. EHCI spec supports this.
*/
spin_lock_irqsave(&fotg210->lock, flags);
switch (typeReq) {
case ClearHubFeature:
switch (wValue) {
case C_HUB_LOCAL_POWER:
case C_HUB_OVER_CURRENT:
/* no hub-wide feature/status flags */
break;
default:
goto error;
}
break;
case ClearPortFeature:
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
temp = fotg210_readl(fotg210, status_reg);
temp &= ~PORT_RWC_BITS;
/*
* Even if OWNER is set, so the port is owned by the
* companion controller, hub_wq needs to be able to clear
* the port-change status bits (especially
* USB_PORT_STAT_C_CONNECTION).
*/
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
fotg210_writel(fotg210, temp & ~PORT_PE, status_reg);
break;
case USB_PORT_FEAT_C_ENABLE:
fotg210_writel(fotg210, temp | PORT_PEC, status_reg);
break;
case USB_PORT_FEAT_SUSPEND:
if (temp & PORT_RESET)
goto error;
if (!(temp & PORT_SUSPEND))
break;
if ((temp & PORT_PE) == 0)
goto error;
/* resume signaling for 20 msec */
fotg210_writel(fotg210, temp | PORT_RESUME, status_reg);
fotg210->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
break;
case USB_PORT_FEAT_C_SUSPEND:
clear_bit(wIndex, &fotg210->port_c_suspend);
break;
case USB_PORT_FEAT_C_CONNECTION:
fotg210_writel(fotg210, temp | PORT_CSC, status_reg);
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
fotg210_writel(fotg210, temp | OTGISR_OVC,
&fotg210->regs->otgisr);
break;
case USB_PORT_FEAT_C_RESET:
/* GetPortStatus clears reset */
break;
default:
goto error;
}
fotg210_readl(fotg210, &fotg210->regs->command);
break;
case GetHubDescriptor:
fotg210_hub_descriptor(fotg210, (struct usb_hub_descriptor *)
buf);
break;
case GetHubStatus:
/* no hub-wide feature/status flags */
memset(buf, 0, 4);
/*cpu_to_le32s ((u32 *) buf); */
break;
case GetPortStatus:
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
status = 0;
temp = fotg210_readl(fotg210, status_reg);
/* wPortChange bits */
if (temp & PORT_CSC)
status |= USB_PORT_STAT_C_CONNECTION << 16;
if (temp & PORT_PEC)
status |= USB_PORT_STAT_C_ENABLE << 16;
temp1 = fotg210_readl(fotg210, &fotg210->regs->otgisr);
if (temp1 & OTGISR_OVC)
status |= USB_PORT_STAT_C_OVERCURRENT << 16;
/* whoever resumes must GetPortStatus to complete it!! */
if (temp & PORT_RESUME) {
/* Remote Wakeup received? */
if (!fotg210->reset_done[wIndex]) {
/* resume signaling for 20 msec */
fotg210->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(20);
/* check the port again */
mod_timer(&fotg210_to_hcd(fotg210)->rh_timer,
fotg210->reset_done[wIndex]);
}
/* resume completed? */
else if (time_after_eq(jiffies,
fotg210->reset_done[wIndex])) {
clear_bit(wIndex, &fotg210->suspended_ports);
set_bit(wIndex, &fotg210->port_c_suspend);
fotg210->reset_done[wIndex] = 0;
/* stop resume signaling */
temp = fotg210_readl(fotg210, status_reg);
fotg210_writel(fotg210, temp &
~(PORT_RWC_BITS | PORT_RESUME),
status_reg);
clear_bit(wIndex, &fotg210->resuming_ports);
retval = handshake(fotg210, status_reg,
PORT_RESUME, 0, 2000);/* 2ms */
if (retval != 0) {
fotg210_err(fotg210,
"port %d resume error %d\n",
wIndex + 1, retval);
goto error;
}
temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
}
}
/* whoever resets must GetPortStatus to complete it!! */
if ((temp & PORT_RESET) && time_after_eq(jiffies,
fotg210->reset_done[wIndex])) {
status |= USB_PORT_STAT_C_RESET << 16;
fotg210->reset_done[wIndex] = 0;
clear_bit(wIndex, &fotg210->resuming_ports);
/* force reset to complete */
fotg210_writel(fotg210,
temp & ~(PORT_RWC_BITS | PORT_RESET),
status_reg);
/* REVISIT: some hardware needs 550+ usec to clear
* this bit; seems too long to spin routinely...
*/
retval = handshake(fotg210, status_reg,
PORT_RESET, 0, 1000);
if (retval != 0) {
fotg210_err(fotg210, "port %d reset error %d\n",
wIndex + 1, retval);
goto error;
}
/* see what we found out */
temp = check_reset_complete(fotg210, wIndex, status_reg,
fotg210_readl(fotg210, status_reg));
/* restart schedule */
fotg210->command |= CMD_RUN;
fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
}
if (!(temp & (PORT_RESUME|PORT_RESET))) {
fotg210->reset_done[wIndex] = 0;
clear_bit(wIndex, &fotg210->resuming_ports);
}
/* transfer dedicated ports to the companion hc */
if ((temp & PORT_CONNECT) &&
test_bit(wIndex, &fotg210->companion_ports)) {
temp &= ~PORT_RWC_BITS;
fotg210_writel(fotg210, temp, status_reg);
fotg210_dbg(fotg210, "port %d --> companion\n",
wIndex + 1);
temp = fotg210_readl(fotg210, status_reg);
}
/*
* Even if OWNER is set, there's no harm letting hub_wq
* see the wPortStatus values (they should all be 0 except
* for PORT_POWER anyway).
*/
if (temp & PORT_CONNECT) {
status |= USB_PORT_STAT_CONNECTION;
status |= fotg210_port_speed(fotg210, temp);
}
if (temp & PORT_PE)
status |= USB_PORT_STAT_ENABLE;
/* maybe the port was unsuspended without our knowledge */
if (temp & (PORT_SUSPEND|PORT_RESUME)) {
status |= USB_PORT_STAT_SUSPEND;
} else if (test_bit(wIndex, &fotg210->suspended_ports)) {
clear_bit(wIndex, &fotg210->suspended_ports);
clear_bit(wIndex, &fotg210->resuming_ports);
fotg210->reset_done[wIndex] = 0;
if (temp & PORT_PE)
set_bit(wIndex, &fotg210->port_c_suspend);
}
temp1 = fotg210_readl(fotg210, &fotg210->regs->otgisr);
if (temp1 & OTGISR_OVC)
status |= USB_PORT_STAT_OVERCURRENT;
if (temp & PORT_RESET)
status |= USB_PORT_STAT_RESET;
if (test_bit(wIndex, &fotg210->port_c_suspend))
status |= USB_PORT_STAT_C_SUSPEND << 16;
if (status & ~0xffff) /* only if wPortChange is interesting */
dbg_port(fotg210, "GetStatus", wIndex + 1, temp);
put_unaligned_le32(status, buf);
break;
case SetHubFeature:
switch (wValue) {
case C_HUB_LOCAL_POWER:
case C_HUB_OVER_CURRENT:
/* no hub-wide feature/status flags */
break;
default:
goto error;
}
break;
case SetPortFeature:
selector = wIndex >> 8;
wIndex &= 0xff;
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
temp = fotg210_readl(fotg210, status_reg);
temp &= ~PORT_RWC_BITS;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
if ((temp & PORT_PE) == 0
|| (temp & PORT_RESET) != 0)
goto error;
/* After above check the port must be connected.
* Set appropriate bit thus could put phy into low power
* mode if we have hostpc feature
*/
fotg210_writel(fotg210, temp | PORT_SUSPEND,
status_reg);
set_bit(wIndex, &fotg210->suspended_ports);
break;
case USB_PORT_FEAT_RESET:
if (temp & PORT_RESUME)
goto error;
/* line status bits may report this as low speed,
* which can be fine if this root hub has a
* transaction translator built in.
*/
fotg210_dbg(fotg210, "port %d reset\n", wIndex + 1);
temp |= PORT_RESET;
temp &= ~PORT_PE;
/*
* caller must wait, then call GetPortStatus
* usb 2.0 spec says 50 ms resets on root
*/
fotg210->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(50);
fotg210_writel(fotg210, temp, status_reg);
break;
/* For downstream facing ports (these): one hub port is put
* into test mode according to USB2 11.24.2.13, then the hub
* must be reset (which for root hub now means rmmod+modprobe,
* or else system reboot). See EHCI 2.3.9 and 4.14 for info
* about the EHCI-specific stuff.
*/
case USB_PORT_FEAT_TEST:
if (!selector || selector > 5)
goto error;
spin_unlock_irqrestore(&fotg210->lock, flags);
fotg210_quiesce(fotg210);
spin_lock_irqsave(&fotg210->lock, flags);
/* Put all enabled ports into suspend */
temp = fotg210_readl(fotg210, status_reg) &
~PORT_RWC_BITS;
if (temp & PORT_PE)
fotg210_writel(fotg210, temp | PORT_SUSPEND,
status_reg);
spin_unlock_irqrestore(&fotg210->lock, flags);
fotg210_halt(fotg210);
spin_lock_irqsave(&fotg210->lock, flags);
temp = fotg210_readl(fotg210, status_reg);
temp |= selector << 16;
fotg210_writel(fotg210, temp, status_reg);
break;
default:
goto error;
}
fotg210_readl(fotg210, &fotg210->regs->command);
break;
default:
error:
/* "stall" on error */
retval = -EPIPE;
}
spin_unlock_irqrestore(&fotg210->lock, flags);
return retval;
}
static void __maybe_unused fotg210_relinquish_port(struct usb_hcd *hcd,
int portnum)
{
return;
}
static int __maybe_unused fotg210_port_handed_over(struct usb_hcd *hcd,
int portnum)
{
return 0;
}
/* There's basically three types of memory:
* - data used only by the HCD ... kmalloc is fine
* - async and periodic schedules, shared by HC and HCD ... these
* need to use dma_pool or dma_alloc_coherent
* - driver buffers, read/written by HC ... single shot DMA mapped
*
* There's also "register" data (e.g. PCI or SOC), which is memory mapped.
* No memory seen by this driver is pageable.
*/
/* Allocate the key transfer structures from the previously allocated pool */
static inline void fotg210_qtd_init(struct fotg210_hcd *fotg210,
struct fotg210_qtd *qtd, dma_addr_t dma)
{
memset(qtd, 0, sizeof(*qtd));
qtd->qtd_dma = dma;
qtd->hw_token = cpu_to_hc32(fotg210, QTD_STS_HALT);
qtd->hw_next = FOTG210_LIST_END(fotg210);
qtd->hw_alt_next = FOTG210_LIST_END(fotg210);
INIT_LIST_HEAD(&qtd->qtd_list);
}
static struct fotg210_qtd *fotg210_qtd_alloc(struct fotg210_hcd *fotg210,
gfp_t flags)
{
struct fotg210_qtd *qtd;
dma_addr_t dma;
qtd = dma_pool_alloc(fotg210->qtd_pool, flags, &dma);
if (qtd != NULL)
fotg210_qtd_init(fotg210, qtd, dma);
return qtd;
}
static inline void fotg210_qtd_free(struct fotg210_hcd *fotg210,
struct fotg210_qtd *qtd)
{
dma_pool_free(fotg210->qtd_pool, qtd, qtd->qtd_dma);
}
static void qh_destroy(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
{
/* clean qtds first, and know this is not linked */
if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) {
fotg210_dbg(fotg210, "unused qh not empty!\n");
BUG();
}
if (qh->dummy)
fotg210_qtd_free(fotg210, qh->dummy);
dma_pool_free(fotg210->qh_pool, qh->hw, qh->qh_dma);
kfree(qh);
}
static struct fotg210_qh *fotg210_qh_alloc(struct fotg210_hcd *fotg210,
gfp_t flags)
{
struct fotg210_qh *qh;
dma_addr_t dma;
qh = kzalloc(sizeof(*qh), GFP_ATOMIC);
if (!qh)
goto done;
qh->hw = (struct fotg210_qh_hw *)
dma_pool_zalloc(fotg210->qh_pool, flags, &dma);
if (!qh->hw)
goto fail;
qh->qh_dma = dma;
INIT_LIST_HEAD(&qh->qtd_list);
/* dummy td enables safe urb queuing */
qh->dummy = fotg210_qtd_alloc(fotg210, flags);
if (qh->dummy == NULL) {
fotg210_dbg(fotg210, "no dummy td\n");
goto fail1;
}
done:
return qh;
fail1:
dma_pool_free(fotg210->qh_pool, qh->hw, qh->qh_dma);
fail:
kfree(qh);
return NULL;
}
/* The queue heads and transfer descriptors are managed from pools tied
* to each of the "per device" structures.
* This is the initialisation and cleanup code.
*/
static void fotg210_mem_cleanup(struct fotg210_hcd *fotg210)
{
if (fotg210->async)
qh_destroy(fotg210, fotg210->async);
fotg210->async = NULL;
if (fotg210->dummy)
qh_destroy(fotg210, fotg210->dummy);
fotg210->dummy = NULL;
/* DMA consistent memory and pools */
dma_pool_destroy(fotg210->qtd_pool);
fotg210->qtd_pool = NULL;
dma_pool_destroy(fotg210->qh_pool);
fotg210->qh_pool = NULL;
dma_pool_destroy(fotg210->itd_pool);
fotg210->itd_pool = NULL;
if (fotg210->periodic)
dma_free_coherent(fotg210_to_hcd(fotg210)->self.controller,
fotg210->periodic_size * sizeof(u32),
fotg210->periodic, fotg210->periodic_dma);
fotg210->periodic = NULL;
/* shadow periodic table */
kfree(fotg210->pshadow);
fotg210->pshadow = NULL;
}
/* remember to add cleanup code (above) if you add anything here */
static int fotg210_mem_init(struct fotg210_hcd *fotg210, gfp_t flags)
{
int i;
/* QTDs for control/bulk/intr transfers */
fotg210->qtd_pool = dma_pool_create("fotg210_qtd",
fotg210_to_hcd(fotg210)->self.controller,
sizeof(struct fotg210_qtd),
32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */);
if (!fotg210->qtd_pool)
goto fail;
/* QHs for control/bulk/intr transfers */
fotg210->qh_pool = dma_pool_create("fotg210_qh",
fotg210_to_hcd(fotg210)->self.controller,
sizeof(struct fotg210_qh_hw),
32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */);
if (!fotg210->qh_pool)
goto fail;
fotg210->async = fotg210_qh_alloc(fotg210, flags);
if (!fotg210->async)
goto fail;
/* ITD for high speed ISO transfers */
fotg210->itd_pool = dma_pool_create("fotg210_itd",
fotg210_to_hcd(fotg210)->self.controller,
sizeof(struct fotg210_itd),
64 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */);
if (!fotg210->itd_pool)
goto fail;
/* Hardware periodic table */
fotg210->periodic =
dma_alloc_coherent(fotg210_to_hcd(fotg210)->self.controller,
fotg210->periodic_size * sizeof(__le32),
&fotg210->periodic_dma, 0);
if (fotg210->periodic == NULL)
goto fail;
for (i = 0; i < fotg210->periodic_size; i++)
fotg210->periodic[i] = FOTG210_LIST_END(fotg210);
/* software shadow of hardware table */
fotg210->pshadow = kcalloc(fotg210->periodic_size, sizeof(void *),
flags);
if (fotg210->pshadow != NULL)
return 0;
fail:
fotg210_dbg(fotg210, "couldn't init memory\n");
fotg210_mem_cleanup(fotg210);
return -ENOMEM;
}
/* EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
*
* Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
* entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
* buffers needed for the larger number). We use one QH per endpoint, queue
* multiple urbs (all three types) per endpoint. URBs may need several qtds.
*
* ISO traffic uses "ISO TD" (itd) records, and (along with
* interrupts) needs careful scheduling. Performance improvements can be
* an ongoing challenge. That's in "ehci-sched.c".
*
* USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
* or otherwise through transaction translators (TTs) in USB 2.0 hubs using
* (b) special fields in qh entries or (c) split iso entries. TTs will
* buffer low/full speed data so the host collects it at high speed.
*/
/* fill a qtd, returning how much of the buffer we were able to queue up */
static int qtd_fill(struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd,
dma_addr_t buf, size_t len, int token, int maxpacket)
{
int i, count;
u64 addr = buf;
/* one buffer entry per 4K ... first might be short or unaligned */
qtd->hw_buf[0] = cpu_to_hc32(fotg210, (u32)addr);
qtd->hw_buf_hi[0] = cpu_to_hc32(fotg210, (u32)(addr >> 32));
count = 0x1000 - (buf & 0x0fff); /* rest of that page */
if (likely(len < count)) /* ... iff needed */
count = len;
else {
buf += 0x1000;
buf &= ~0x0fff;
/* per-qtd limit: from 16K to 20K (best alignment) */
for (i = 1; count < len && i < 5; i++) {
addr = buf;
qtd->hw_buf[i] = cpu_to_hc32(fotg210, (u32)addr);
qtd->hw_buf_hi[i] = cpu_to_hc32(fotg210,
(u32)(addr >> 32));
buf += 0x1000;
if ((count + 0x1000) < len)
count += 0x1000;
else
count = len;
}
/* short packets may only terminate transfers */
if (count != len)
count -= (count % maxpacket);
}
qtd->hw_token = cpu_to_hc32(fotg210, (count << 16) | token);
qtd->length = count;
return count;
}
static inline void qh_update(struct fotg210_hcd *fotg210,
struct fotg210_qh *qh, struct fotg210_qtd *qtd)
{
struct fotg210_qh_hw *hw = qh->hw;
/* writes to an active overlay are unsafe */
BUG_ON(qh->qh_state != QH_STATE_IDLE);
hw->hw_qtd_next = QTD_NEXT(fotg210, qtd->qtd_dma);
hw->hw_alt_next = FOTG210_LIST_END(fotg210);
/* Except for control endpoints, we make hardware maintain data
* toggle (like OHCI) ... here (re)initialize the toggle in the QH,
* and set the pseudo-toggle in udev. Only usb_clear_halt() will
* ever clear it.
*/
if (!(hw->hw_info1 & cpu_to_hc32(fotg210, QH_TOGGLE_CTL))) {
unsigned is_out, epnum;
is_out = qh->is_out;
epnum = (hc32_to_cpup(fotg210, &hw->hw_info1) >> 8) & 0x0f;
if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) {
hw->hw_token &= ~cpu_to_hc32(fotg210, QTD_TOGGLE);
usb_settoggle(qh->dev, epnum, is_out, 1);
}
}
hw->hw_token &= cpu_to_hc32(fotg210, QTD_TOGGLE | QTD_STS_PING);
}
/* if it weren't for a common silicon quirk (writing the dummy into the qh
* overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
* recovery (including urb dequeue) would need software changes to a QH...
*/
static void qh_refresh(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
{
struct fotg210_qtd *qtd;
if (list_empty(&qh->qtd_list))
qtd = qh->dummy;
else {
qtd = list_entry(qh->qtd_list.next,
struct fotg210_qtd, qtd_list);
/*
* first qtd may already be partially processed.
* If we come here during unlink, the QH overlay region
* might have reference to the just unlinked qtd. The
* qtd is updated in qh_completions(). Update the QH
* overlay here.
*/
if (cpu_to_hc32(fotg210, qtd->qtd_dma) == qh->hw->hw_current) {
qh->hw->hw_qtd_next = qtd->hw_next;
qtd = NULL;
}
}
if (qtd)
qh_update(fotg210, qh, qtd);
}
static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
static void fotg210_clear_tt_buffer_complete(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
struct fotg210_qh *qh = ep->hcpriv;
unsigned long flags;
spin_lock_irqsave(&fotg210->lock, flags);
qh->clearing_tt = 0;
if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
&& fotg210->rh_state == FOTG210_RH_RUNNING)
qh_link_async(fotg210, qh);
spin_unlock_irqrestore(&fotg210->lock, flags);
}
static void fotg210_clear_tt_buffer(struct fotg210_hcd *fotg210,
struct fotg210_qh *qh, struct urb *urb, u32 token)
{
/* If an async split transaction gets an error or is unlinked,
* the TT buffer may be left in an indeterminate state. We
* have to clear the TT buffer.
*
* Note: this routine is never called for Isochronous transfers.
*/
if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
struct usb_device *tt = urb->dev->tt->hub;
dev_dbg(&tt->dev,
"clear tt buffer port %d, a%d ep%d t%08x\n",
urb->dev->ttport, urb->dev->devnum,
usb_pipeendpoint(urb->pipe), token);
if (urb->dev->tt->hub !=
fotg210_to_hcd(fotg210)->self.root_hub) {
if (usb_hub_clear_tt_buffer(urb) == 0)
qh->clearing_tt = 1;
}
}
}
static int qtd_copy_status(struct fotg210_hcd *fotg210, struct urb *urb,
size_t length, u32 token)
{
int status = -EINPROGRESS;
/* count IN/OUT bytes, not SETUP (even short packets) */
if (likely(QTD_PID(token) != 2))
urb->actual_length += length - QTD_LENGTH(token);
/* don't modify error codes */
if (unlikely(urb->unlinked))
return status;
/* force cleanup after short read; not always an error */
if (unlikely(IS_SHORT_READ(token)))
status = -EREMOTEIO;
/* serious "can't proceed" faults reported by the hardware */
if (token & QTD_STS_HALT) {
if (token & QTD_STS_BABBLE) {
/* FIXME "must" disable babbling device's port too */
status = -EOVERFLOW;
/* CERR nonzero + halt --> stall */
} else if (QTD_CERR(token)) {
status = -EPIPE;
/* In theory, more than one of the following bits can be set
* since they are sticky and the transaction is retried.
* Which to test first is rather arbitrary.
*/
} else if (token & QTD_STS_MMF) {
/* fs/ls interrupt xfer missed the complete-split */
status = -EPROTO;
} else if (token & QTD_STS_DBE) {
status = (QTD_PID(token) == 1) /* IN ? */
? -ENOSR /* hc couldn't read data */
: -ECOMM; /* hc couldn't write data */
} else if (token & QTD_STS_XACT) {
/* timeout, bad CRC, wrong PID, etc */
fotg210_dbg(fotg210, "devpath %s ep%d%s 3strikes\n",
urb->dev->devpath,
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out");
status = -EPROTO;
} else { /* unknown */
status = -EPROTO;
}
fotg210_dbg(fotg210,
"dev%d ep%d%s qtd token %08x --> status %d\n",
usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out",
token, status);
}
return status;
}
static void fotg210_urb_done(struct fotg210_hcd *fotg210, struct urb *urb,
int status)
__releases(fotg210->lock)
__acquires(fotg210->lock)
{
if (likely(urb->hcpriv != NULL)) {
struct fotg210_qh *qh = (struct fotg210_qh *) urb->hcpriv;
/* S-mask in a QH means it's an interrupt urb */
if ((qh->hw->hw_info2 & cpu_to_hc32(fotg210, QH_SMASK)) != 0) {
/* ... update hc-wide periodic stats (for usbfs) */
fotg210_to_hcd(fotg210)->self.bandwidth_int_reqs--;
}
}
if (unlikely(urb->unlinked)) {
INCR(fotg210->stats.unlink);
} else {
/* report non-error and short read status as zero */
if (status == -EINPROGRESS || status == -EREMOTEIO)
status = 0;
INCR(fotg210->stats.complete);
}
#ifdef FOTG210_URB_TRACE
fotg210_dbg(fotg210,
"%s %s urb %p ep%d%s status %d len %d/%d\n",
__func__, urb->dev->devpath, urb,
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out",
status,
urb->actual_length, urb->transfer_buffer_length);
#endif
/* complete() can reenter this HCD */
usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
spin_unlock(&fotg210->lock);
usb_hcd_giveback_urb(fotg210_to_hcd(fotg210), urb, status);
spin_lock(&fotg210->lock);
}
static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
/* Process and free completed qtds for a qh, returning URBs to drivers.
* Chases up to qh->hw_current. Returns number of completions called,
* indicating how much "real" work we did.
*/
static unsigned qh_completions(struct fotg210_hcd *fotg210,
struct fotg210_qh *qh)
{
struct fotg210_qtd *last, *end = qh->dummy;
struct fotg210_qtd *qtd, *tmp;
int last_status;
int stopped;
unsigned count = 0;
u8 state;
struct fotg210_qh_hw *hw = qh->hw;
if (unlikely(list_empty(&qh->qtd_list)))
return count;
/* completions (or tasks on other cpus) must never clobber HALT
* till we've gone through and cleaned everything up, even when
* they add urbs to this qh's queue or mark them for unlinking.
*
* NOTE: unlinking expects to be done in queue order.
*
* It's a bug for qh->qh_state to be anything other than
* QH_STATE_IDLE, unless our caller is scan_async() or
* scan_intr().
*/
state = qh->qh_state;
qh->qh_state = QH_STATE_COMPLETING;
stopped = (state == QH_STATE_IDLE);
rescan:
last = NULL;
last_status = -EINPROGRESS;
qh->needs_rescan = 0;
/* remove de-activated QTDs from front of queue.
* after faults (including short reads), cleanup this urb
* then let the queue advance.
* if queue is stopped, handles unlinks.
*/
list_for_each_entry_safe(qtd, tmp, &qh->qtd_list, qtd_list) {
struct urb *urb;
u32 token = 0;
urb = qtd->urb;
/* clean up any state from previous QTD ...*/
if (last) {
if (likely(last->urb != urb)) {
fotg210_urb_done(fotg210, last->urb,
last_status);
count++;
last_status = -EINPROGRESS;
}
fotg210_qtd_free(fotg210, last);
last = NULL;
}
/* ignore urbs submitted during completions we reported */
if (qtd == end)
break;
/* hardware copies qtd out of qh overlay */
rmb();
token = hc32_to_cpu(fotg210, qtd->hw_token);
/* always clean up qtds the hc de-activated */
retry_xacterr:
if ((token & QTD_STS_ACTIVE) == 0) {
/* Report Data Buffer Error: non-fatal but useful */
if (token & QTD_STS_DBE)
fotg210_dbg(fotg210,
"detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
urb, usb_endpoint_num(&urb->ep->desc),
usb_endpoint_dir_in(&urb->ep->desc)
? "in" : "out",
urb->transfer_buffer_length, qtd, qh);
/* on STALL, error, and short reads this urb must
* complete and all its qtds must be recycled.
*/
if ((token & QTD_STS_HALT) != 0) {
/* retry transaction errors until we
* reach the software xacterr limit
*/
if ((token & QTD_STS_XACT) &&
QTD_CERR(token) == 0 &&
++qh->xacterrs < QH_XACTERR_MAX &&
!urb->unlinked) {
fotg210_dbg(fotg210,
"detected XactErr len %zu/%zu retry %d\n",
qtd->length - QTD_LENGTH(token),
qtd->length,
qh->xacterrs);
/* reset the token in the qtd and the
* qh overlay (which still contains
* the qtd) so that we pick up from
* where we left off
*/
token &= ~QTD_STS_HALT;
token |= QTD_STS_ACTIVE |
(FOTG210_TUNE_CERR << 10);
qtd->hw_token = cpu_to_hc32(fotg210,
token);
wmb();
hw->hw_token = cpu_to_hc32(fotg210,
token);
goto retry_xacterr;
}
stopped = 1;
/* magic dummy for some short reads; qh won't advance.
* that silicon quirk can kick in with this dummy too.
*
* other short reads won't stop the queue, including
* control transfers (status stage handles that) or
* most other single-qtd reads ... the queue stops if
* URB_SHORT_NOT_OK was set so the driver submitting
* the urbs could clean it up.
*/
} else if (IS_SHORT_READ(token) &&
!(qtd->hw_alt_next &
FOTG210_LIST_END(fotg210))) {
stopped = 1;
}
/* stop scanning when we reach qtds the hc is using */
} else if (likely(!stopped
&& fotg210->rh_state >= FOTG210_RH_RUNNING)) {
break;
/* scan the whole queue for unlinks whenever it stops */
} else {
stopped = 1;
/* cancel everything if we halt, suspend, etc */
if (fotg210->rh_state < FOTG210_RH_RUNNING)
last_status = -ESHUTDOWN;
/* this qtd is active; skip it unless a previous qtd
* for its urb faulted, or its urb was canceled.
*/
else if (last_status == -EINPROGRESS && !urb->unlinked)
continue;
/* qh unlinked; token in overlay may be most current */
if (state == QH_STATE_IDLE &&
cpu_to_hc32(fotg210, qtd->qtd_dma)
== hw->hw_current) {
token = hc32_to_cpu(fotg210, hw->hw_token);
/* An unlink may leave an incomplete
* async transaction in the TT buffer.
* We have to clear it.
*/
fotg210_clear_tt_buffer(fotg210, qh, urb,
token);
}
}
/* unless we already know the urb's status, collect qtd status
* and update count of bytes transferred. in common short read
* cases with only one data qtd (including control transfers),
* queue processing won't halt. but with two or more qtds (for
* example, with a 32 KB transfer), when the first qtd gets a
* short read the second must be removed by hand.
*/
if (last_status == -EINPROGRESS) {
last_status = qtd_copy_status(fotg210, urb,
qtd->length, token);
if (last_status == -EREMOTEIO &&
(qtd->hw_alt_next &
FOTG210_LIST_END(fotg210)))
last_status = -EINPROGRESS;
/* As part of low/full-speed endpoint-halt processing
* we must clear the TT buffer (11.17.5).
*/
if (unlikely(last_status != -EINPROGRESS &&
last_status != -EREMOTEIO)) {
/* The TT's in some hubs malfunction when they
* receive this request following a STALL (they
* stop sending isochronous packets). Since a
* STALL can't leave the TT buffer in a busy
* state (if you believe Figures 11-48 - 11-51
* in the USB 2.0 spec), we won't clear the TT
* buffer in this case. Strictly speaking this
* is a violation of the spec.
*/
if (last_status != -EPIPE)
fotg210_clear_tt_buffer(fotg210, qh,
urb, token);
}
}
/* if we're removing something not at the queue head,
* patch the hardware queue pointer.
*/
if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
last = list_entry(qtd->qtd_list.prev,
struct fotg210_qtd, qtd_list);
last->hw_next = qtd->hw_next;
}
/* remove qtd; it's recycled after possible urb completion */
list_del(&qtd->qtd_list);
last = qtd;
/* reinit the xacterr counter for the next qtd */
qh->xacterrs = 0;
}
/* last urb's completion might still need calling */
if (likely(last != NULL)) {
fotg210_urb_done(fotg210, last->urb, last_status);
count++;
fotg210_qtd_free(fotg210, last);
}
/* Do we need to rescan for URBs dequeued during a giveback? */
if (unlikely(qh->needs_rescan)) {
/* If the QH is already unlinked, do the rescan now. */
if (state == QH_STATE_IDLE)
goto rescan;
/* Otherwise we have to wait until the QH is fully unlinked.
* Our caller will start an unlink if qh->needs_rescan is
* set. But if an unlink has already started, nothing needs
* to be done.
*/
if (state != QH_STATE_LINKED)
qh->needs_rescan = 0;
}
/* restore original state; caller must unlink or relink */
qh->qh_state = state;
/* be sure the hardware's done with the qh before refreshing
* it after fault cleanup, or recovering from silicon wrongly
* overlaying the dummy qtd (which reduces DMA chatter).
*/
if (stopped != 0 || hw->hw_qtd_next == FOTG210_LIST_END(fotg210)) {
switch (state) {
case QH_STATE_IDLE:
qh_refresh(fotg210, qh);
break;
case QH_STATE_LINKED:
/* We won't refresh a QH that's linked (after the HC
* stopped the queue). That avoids a race:
* - HC reads first part of QH;
* - CPU updates that first part and the token;
* - HC reads rest of that QH, including token
* Result: HC gets an inconsistent image, and then
* DMAs to/from the wrong memory (corrupting it).
*
* That should be rare for interrupt transfers,
* except maybe high bandwidth ...
*/
/* Tell the caller to start an unlink */
qh->needs_rescan = 1;
break;
/* otherwise, unlink already started */
}
}
return count;
}
/* reverse of qh_urb_transaction: free a list of TDs.
* used for cleanup after errors, before HC sees an URB's TDs.
*/
static void qtd_list_free(struct fotg210_hcd *fotg210, struct urb *urb,
struct list_head *head)
{
struct fotg210_qtd *qtd, *temp;
list_for_each_entry_safe(qtd, temp, head, qtd_list) {
list_del(&qtd->qtd_list);
fotg210_qtd_free(fotg210, qtd);
}
}
/* create a list of filled qtds for this URB; won't link into qh.
*/
static struct list_head *qh_urb_transaction(struct fotg210_hcd *fotg210,
struct urb *urb, struct list_head *head, gfp_t flags)
{
struct fotg210_qtd *qtd, *qtd_prev;
dma_addr_t buf;
int len, this_sg_len, maxpacket;
int is_input;
u32 token;
int i;
struct scatterlist *sg;
/*
* URBs map to sequences of QTDs: one logical transaction
*/
qtd = fotg210_qtd_alloc(fotg210, flags);
if (unlikely(!qtd))
return NULL;
list_add_tail(&qtd->qtd_list, head);
qtd->urb = urb;
token = QTD_STS_ACTIVE;
token |= (FOTG210_TUNE_CERR << 10);
/* for split transactions, SplitXState initialized to zero */
len = urb->transfer_buffer_length;
is_input = usb_pipein(urb->pipe);
if (usb_pipecontrol(urb->pipe)) {
/* SETUP pid */
qtd_fill(fotg210, qtd, urb->setup_dma,
sizeof(struct usb_ctrlrequest),
token | (2 /* "setup" */ << 8), 8);
/* ... and always at least one more pid */
token ^= QTD_TOGGLE;
qtd_prev = qtd;
qtd = fotg210_qtd_alloc(fotg210, flags);
if (unlikely(!qtd))
goto cleanup;
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
list_add_tail(&qtd->qtd_list, head);
/* for zero length DATA stages, STATUS is always IN */
if (len == 0)
token |= (1 /* "in" */ << 8);
}
/*
* data transfer stage: buffer setup
*/
i = urb->num_mapped_sgs;
if (len > 0 && i > 0) {
sg = urb->sg;
buf = sg_dma_address(sg);
/* urb->transfer_buffer_length may be smaller than the
* size of the scatterlist (or vice versa)
*/
this_sg_len = min_t(int, sg_dma_len(sg), len);
} else {
sg = NULL;
buf = urb->transfer_dma;
this_sg_len = len;
}
if (is_input)
token |= (1 /* "in" */ << 8);
/* else it's already initted to "out" pid (0 << 8) */
maxpacket = usb_maxpacket(urb->dev, urb->pipe);
/*
* buffer gets wrapped in one or more qtds;
* last one may be "short" (including zero len)
* and may serve as a control status ack
*/
for (;;) {
int this_qtd_len;
this_qtd_len = qtd_fill(fotg210, qtd, buf, this_sg_len, token,
maxpacket);
this_sg_len -= this_qtd_len;
len -= this_qtd_len;
buf += this_qtd_len;
/*
* short reads advance to a "magic" dummy instead of the next
* qtd ... that forces the queue to stop, for manual cleanup.
* (this will usually be overridden later.)
*/
if (is_input)
qtd->hw_alt_next = fotg210->async->hw->hw_alt_next;
/* qh makes control packets use qtd toggle; maybe switch it */
if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
token ^= QTD_TOGGLE;
if (likely(this_sg_len <= 0)) {
if (--i <= 0 || len <= 0)
break;
sg = sg_next(sg);
buf = sg_dma_address(sg);
this_sg_len = min_t(int, sg_dma_len(sg), len);
}
qtd_prev = qtd;
qtd = fotg210_qtd_alloc(fotg210, flags);
if (unlikely(!qtd))
goto cleanup;
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
list_add_tail(&qtd->qtd_list, head);
}
/*
* unless the caller requires manual cleanup after short reads,
* have the alt_next mechanism keep the queue running after the
* last data qtd (the only one, for control and most other cases).
*/
if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 ||
usb_pipecontrol(urb->pipe)))
qtd->hw_alt_next = FOTG210_LIST_END(fotg210);
/*
* control requests may need a terminating data "status" ack;
* other OUT ones may need a terminating short packet
* (zero length).
*/
if (likely(urb->transfer_buffer_length != 0)) {
int one_more = 0;
if (usb_pipecontrol(urb->pipe)) {
one_more = 1;
token ^= 0x0100; /* "in" <--> "out" */
token |= QTD_TOGGLE; /* force DATA1 */
} else if (usb_pipeout(urb->pipe)
&& (urb->transfer_flags & URB_ZERO_PACKET)
&& !(urb->transfer_buffer_length % maxpacket)) {
one_more = 1;
}
if (one_more) {
qtd_prev = qtd;
qtd = fotg210_qtd_alloc(fotg210, flags);
if (unlikely(!qtd))
goto cleanup;
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
list_add_tail(&qtd->qtd_list, head);
/* never any data in such packets */
qtd_fill(fotg210, qtd, 0, 0, token, 0);
}
}
/* by default, enable interrupt on urb completion */
if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT)))
qtd->hw_token |= cpu_to_hc32(fotg210, QTD_IOC);
return head;
cleanup:
qtd_list_free(fotg210, urb, head);
return NULL;
}
/* Would be best to create all qh's from config descriptors,
* when each interface/altsetting is established. Unlink
* any previous qh and cancel its urbs first; endpoints are
* implicitly reset then (data toggle too).
* That'd mean updating how usbcore talks to HCDs. (2.7?)
*/
/* Each QH holds a qtd list; a QH is used for everything except iso.
*
* For interrupt urbs, the scheduler must set the microframe scheduling
* mask(s) each time the QH gets scheduled. For highspeed, that's
* just one microframe in the s-mask. For split interrupt transactions
* there are additional complications: c-mask, maybe FSTNs.
*/
static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
gfp_t flags)
{
struct fotg210_qh *qh = fotg210_qh_alloc(fotg210, flags);
struct usb_host_endpoint *ep;
u32 info1 = 0, info2 = 0;
int is_input, type;
int maxp = 0;
int mult;
struct usb_tt *tt = urb->dev->tt;
struct fotg210_qh_hw *hw;
if (!qh)
return qh;
/*
* init endpoint/device data for this QH
*/
info1 |= usb_pipeendpoint(urb->pipe) << 8;
info1 |= usb_pipedevice(urb->pipe) << 0;
is_input = usb_pipein(urb->pipe);
type = usb_pipetype(urb->pipe);
ep = usb_pipe_endpoint(urb->dev, urb->pipe);
maxp = usb_endpoint_maxp(&ep->desc);
mult = usb_endpoint_maxp_mult(&ep->desc);
/* 1024 byte maxpacket is a hardware ceiling. High bandwidth
* acts like up to 3KB, but is built from smaller packets.
*/
if (maxp > 1024) {
fotg210_dbg(fotg210, "bogus qh maxpacket %d\n", maxp);
goto done;
}
/* Compute interrupt scheduling parameters just once, and save.
* - allowing for high bandwidth, how many nsec/uframe are used?
* - split transactions need a second CSPLIT uframe; same question
* - splits also need a schedule gap (for full/low speed I/O)
* - qh has a polling interval
*
* For control/bulk requests, the HC or TT handles these.
*/
if (type == PIPE_INTERRUPT) {
qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
is_input, 0, mult * maxp));
qh->start = NO_FRAME;
if (urb->dev->speed == USB_SPEED_HIGH) {
qh->c_usecs = 0;
qh->gap_uf = 0;
qh->period = urb->interval >> 3;
if (qh->period == 0 && urb->interval != 1) {
/* NOTE interval 2 or 4 uframes could work.
* But interval 1 scheduling is simpler, and
* includes high bandwidth.
*/
urb->interval = 1;
} else if (qh->period > fotg210->periodic_size) {
qh->period = fotg210->periodic_size;
urb->interval = qh->period << 3;
}
} else {
int think_time;
/* gap is f(FS/LS transfer times) */
qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed,
is_input, 0, maxp) / (125 * 1000);
/* FIXME this just approximates SPLIT/CSPLIT times */
if (is_input) { /* SPLIT, gap, CSPLIT+DATA */
qh->c_usecs = qh->usecs + HS_USECS(0);
qh->usecs = HS_USECS(1);
} else { /* SPLIT+DATA, gap, CSPLIT */
qh->usecs += HS_USECS(1);
qh->c_usecs = HS_USECS(0);
}
think_time = tt ? tt->think_time : 0;
qh->tt_usecs = NS_TO_US(think_time +
usb_calc_bus_time(urb->dev->speed,
is_input, 0, maxp));
qh->period = urb->interval;
if (qh->period > fotg210->periodic_size) {
qh->period = fotg210->periodic_size;
urb->interval = qh->period;
}
}
}
/* support for tt scheduling, and access to toggles */
qh->dev = urb->dev;
/* using TT? */
switch (urb->dev->speed) {
case USB_SPEED_LOW:
info1 |= QH_LOW_SPEED;
fallthrough;
case USB_SPEED_FULL:
/* EPS 0 means "full" */
if (type != PIPE_INTERRUPT)
info1 |= (FOTG210_TUNE_RL_TT << 28);
if (type == PIPE_CONTROL) {
info1 |= QH_CONTROL_EP; /* for TT */
info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
}
info1 |= maxp << 16;
info2 |= (FOTG210_TUNE_MULT_TT << 30);
/* Some Freescale processors have an erratum in which the
* port number in the queue head was 0..N-1 instead of 1..N.
*/
if (fotg210_has_fsl_portno_bug(fotg210))
info2 |= (urb->dev->ttport-1) << 23;
else
info2 |= urb->dev->ttport << 23;
/* set the address of the TT; for TDI's integrated
* root hub tt, leave it zeroed.
*/
if (tt && tt->hub != fotg210_to_hcd(fotg210)->self.root_hub)
info2 |= tt->hub->devnum << 16;
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
break;
case USB_SPEED_HIGH: /* no TT involved */
info1 |= QH_HIGH_SPEED;
if (type == PIPE_CONTROL) {
info1 |= (FOTG210_TUNE_RL_HS << 28);
info1 |= 64 << 16; /* usb2 fixed maxpacket */
info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
info2 |= (FOTG210_TUNE_MULT_HS << 30);
} else if (type == PIPE_BULK) {
info1 |= (FOTG210_TUNE_RL_HS << 28);
/* The USB spec says that high speed bulk endpoints
* always use 512 byte maxpacket. But some device
* vendors decided to ignore that, and MSFT is happy
* to help them do so. So now people expect to use
* such nonconformant devices with Linux too; sigh.
*/
info1 |= maxp << 16;
info2 |= (FOTG210_TUNE_MULT_HS << 30);
} else { /* PIPE_INTERRUPT */
info1 |= maxp << 16;
info2 |= mult << 30;
}
break;
default:
fotg210_dbg(fotg210, "bogus dev %p speed %d\n", urb->dev,
urb->dev->speed);
done:
qh_destroy(fotg210, qh);
return NULL;
}
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
/* init as live, toggle clear, advance to dummy */
qh->qh_state = QH_STATE_IDLE;
hw = qh->hw;
hw->hw_info1 = cpu_to_hc32(fotg210, info1);
hw->hw_info2 = cpu_to_hc32(fotg210, info2);
qh->is_out = !is_input;
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1);
qh_refresh(fotg210, qh);
return qh;
}
static void enable_async(struct fotg210_hcd *fotg210)
{
if (fotg210->async_count++)
return;
/* Stop waiting to turn off the async schedule */
fotg210->enabled_hrtimer_events &= ~BIT(FOTG210_HRTIMER_DISABLE_ASYNC);
/* Don't start the schedule until ASS is 0 */
fotg210_poll_ASS(fotg210);
turn_on_io_watchdog(fotg210);
}
static void disable_async(struct fotg210_hcd *fotg210)
{
if (--fotg210->async_count)
return;
/* The async schedule and async_unlink list are supposed to be empty */
WARN_ON(fotg210->async->qh_next.qh || fotg210->async_unlink);
/* Don't turn off the schedule until ASS is 1 */
fotg210_poll_ASS(fotg210);
}
/* move qh (and its qtds) onto async queue; maybe enable queue. */
static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
{
__hc32 dma = QH_NEXT(fotg210, qh->qh_dma);
struct fotg210_qh *head;
/* Don't link a QH if there's a Clear-TT-Buffer pending */
if (unlikely(qh->clearing_tt))
return;
WARN_ON(qh->qh_state != QH_STATE_IDLE);
/* clear halt and/or toggle; and maybe recover from silicon quirk */
qh_refresh(fotg210, qh);
/* splice right after start */
head = fotg210->async;
qh->qh_next = head->qh_next;
qh->hw->hw_next = head->hw->hw_next;
wmb();
head->qh_next.qh = qh;
head->hw->hw_next = dma;
qh->xacterrs = 0;
qh->qh_state = QH_STATE_LINKED;
/* qtd completions reported later by interrupt */
enable_async(fotg210);
}
/* For control/bulk/interrupt, return QH with these TDs appended.
* Allocates and initializes the QH if necessary.
* Returns null if it can't allocate a QH it needs to.
* If the QH has TDs (urbs) already, that's great.
*/
static struct fotg210_qh *qh_append_tds(struct fotg210_hcd *fotg210,
struct urb *urb, struct list_head *qtd_list,
int epnum, void **ptr)
{
struct fotg210_qh *qh = NULL;
__hc32 qh_addr_mask = cpu_to_hc32(fotg210, 0x7f);
qh = (struct fotg210_qh *) *ptr;
if (unlikely(qh == NULL)) {
/* can't sleep here, we have fotg210->lock... */
qh = qh_make(fotg210, urb, GFP_ATOMIC);
*ptr = qh;
}
if (likely(qh != NULL)) {
struct fotg210_qtd *qtd;
if (unlikely(list_empty(qtd_list)))
qtd = NULL;
else
qtd = list_entry(qtd_list->next, struct fotg210_qtd,
qtd_list);
/* control qh may need patching ... */
if (unlikely(epnum == 0)) {
/* usb_reset_device() briefly reverts to address 0 */
if (usb_pipedevice(urb->pipe) == 0)
qh->hw->hw_info1 &= ~qh_addr_mask;
}
/* just one way to queue requests: swap with the dummy qtd.
* only hc or qh_refresh() ever modify the overlay.
*/
if (likely(qtd != NULL)) {
struct fotg210_qtd *dummy;
dma_addr_t dma;
__hc32 token;
/* to avoid racing the HC, use the dummy td instead of
* the first td of our list (becomes new dummy). both
* tds stay deactivated until we're done, when the
* HC is allowed to fetch the old dummy (4.10.2).
*/
token = qtd->hw_token;
qtd->hw_token = HALT_BIT(fotg210);
dummy = qh->dummy;
dma = dummy->qtd_dma;
*dummy = *qtd;
dummy->qtd_dma = dma;
list_del(&qtd->qtd_list);
list_add(&dummy->qtd_list, qtd_list);
list_splice_tail(qtd_list, &qh->qtd_list);
fotg210_qtd_init(fotg210, qtd, qtd->qtd_dma);
qh->dummy = qtd;
/* hc must see the new dummy at list end */
dma = qtd->qtd_dma;
qtd = list_entry(qh->qtd_list.prev,
struct fotg210_qtd, qtd_list);
qtd->hw_next = QTD_NEXT(fotg210, dma);
/* let the hc process these next qtds */
wmb();
dummy->hw_token = token;
urb->hcpriv = qh;
}
}
return qh;
}
static int submit_async(struct fotg210_hcd *fotg210, struct urb *urb,
struct list_head *qtd_list, gfp_t mem_flags)
{
int epnum;
unsigned long flags;
struct fotg210_qh *qh = NULL;
int rc;
epnum = urb->ep->desc.bEndpointAddress;
#ifdef FOTG210_URB_TRACE
{
struct fotg210_qtd *qtd;
qtd = list_entry(qtd_list->next, struct fotg210_qtd, qtd_list);
fotg210_dbg(fotg210,
"%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
__func__, urb->dev->devpath, urb,
epnum & 0x0f, (epnum & USB_DIR_IN)
? "in" : "out",
urb->transfer_buffer_length,
qtd, urb->ep->hcpriv);
}
#endif
spin_lock_irqsave(&fotg210->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) {
rc = -ESHUTDOWN;
goto done;
}
rc = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb);
if (unlikely(rc))
goto done;
qh = qh_append_tds(fotg210, urb, qtd_list, epnum, &urb->ep->hcpriv);
if (unlikely(qh == NULL)) {
usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
rc = -ENOMEM;
goto done;
}
/* Control/bulk operations through TTs don't need scheduling,
* the HC and TT handle it when the TT has a buffer ready.
*/
if (likely(qh->qh_state == QH_STATE_IDLE))
qh_link_async(fotg210, qh);
done:
spin_unlock_irqrestore(&fotg210->lock, flags);
if (unlikely(qh == NULL))
qtd_list_free(fotg210, urb, qtd_list);
return rc;
}
static void single_unlink_async(struct fotg210_hcd *fotg210,
struct fotg210_qh *qh)
{
struct fotg210_qh *prev;
/* Add to the end of the list of QHs waiting for the next IAAD */
qh->qh_state = QH_STATE_UNLINK;
if (fotg210->async_unlink)
fotg210->async_unlink_last->unlink_next = qh;
else
fotg210->async_unlink = qh;
fotg210->async_unlink_last = qh;
/* Unlink it from the schedule */
prev = fotg210->async;
while (prev->qh_next.qh != qh)
prev = prev->qh_next.qh;
prev->hw->hw_next = qh->hw->hw_next;
prev->qh_next = qh->qh_next;
if (fotg210->qh_scan_next == qh)
fotg210->qh_scan_next = qh->qh_next.qh;
}
static void start_iaa_cycle(struct fotg210_hcd *fotg210, bool nested)
{
/*
* Do nothing if an IAA cycle is already running or
* if one will be started shortly.
*/
if (fotg210->async_iaa || fotg210->async_unlinking)
return;
/* Do all the waiting QHs at once */
fotg210->async_iaa = fotg210->async_unlink;
fotg210->async_unlink = NULL;
/* If the controller isn't running, we don't have to wait for it */
if (unlikely(fotg210->rh_state < FOTG210_RH_RUNNING)) {
if (!nested) /* Avoid recursion */
end_unlink_async(fotg210);
/* Otherwise start a new IAA cycle */
} else if (likely(fotg210->rh_state == FOTG210_RH_RUNNING)) {
/* Make sure the unlinks are all visible to the hardware */
wmb();
fotg210_writel(fotg210, fotg210->command | CMD_IAAD,
&fotg210->regs->command);
fotg210_readl(fotg210, &fotg210->regs->command);
fotg210_enable_event(fotg210, FOTG210_HRTIMER_IAA_WATCHDOG,
true);
}
}
/* the async qh for the qtds being unlinked are now gone from the HC */
static void end_unlink_async(struct fotg210_hcd *fotg210)
{
struct fotg210_qh *qh;
/* Process the idle QHs */
restart:
fotg210->async_unlinking = true;
while (fotg210->async_iaa) {
qh = fotg210->async_iaa;
fotg210->async_iaa = qh->unlink_next;
qh->unlink_next = NULL;
qh->qh_state = QH_STATE_IDLE;
qh->qh_next.qh = NULL;
qh_completions(fotg210, qh);
if (!list_empty(&qh->qtd_list) &&
fotg210->rh_state == FOTG210_RH_RUNNING)
qh_link_async(fotg210, qh);
disable_async(fotg210);
}
fotg210->async_unlinking = false;
/* Start a new IAA cycle if any QHs are waiting for it */
if (fotg210->async_unlink) {
start_iaa_cycle(fotg210, true);
if (unlikely(fotg210->rh_state < FOTG210_RH_RUNNING))
goto restart;
}
}
static void unlink_empty_async(struct fotg210_hcd *fotg210)
{
struct fotg210_qh *qh, *next;
bool stopped = (fotg210->rh_state < FOTG210_RH_RUNNING);
bool check_unlinks_later = false;
/* Unlink all the async QHs that have been empty for a timer cycle */
next = fotg210->async->qh_next.qh;
while (next) {
qh = next;
next = qh->qh_next.qh;
if (list_empty(&qh->qtd_list) &&
qh->qh_state == QH_STATE_LINKED) {
if (!stopped && qh->unlink_cycle ==
fotg210->async_unlink_cycle)
check_unlinks_later = true;
else
single_unlink_async(fotg210, qh);
}
}
/* Start a new IAA cycle if any QHs are waiting for it */
if (fotg210->async_unlink)
start_iaa_cycle(fotg210, false);
/* QHs that haven't been empty for long enough will be handled later */
if (check_unlinks_later) {
fotg210_enable_event(fotg210, FOTG210_HRTIMER_ASYNC_UNLINKS,
true);
++fotg210->async_unlink_cycle;
}
}
/* makes sure the async qh will become idle */
/* caller must own fotg210->lock */
static void start_unlink_async(struct fotg210_hcd *fotg210,
struct fotg210_qh *qh)
{
/*
* If the QH isn't linked then there's nothing we can do
* unless we were called during a giveback, in which case
* qh_completions() has to deal with it.
*/
if (qh->qh_state != QH_STATE_LINKED) {
if (qh->qh_state == QH_STATE_COMPLETING)
qh->needs_rescan = 1;
return;
}
single_unlink_async(fotg210, qh);
start_iaa_cycle(fotg210, false);
}
static void scan_async(struct fotg210_hcd *fotg210)
{
struct fotg210_qh *qh;
bool check_unlinks_later = false;
fotg210->qh_scan_next = fotg210->async->qh_next.qh;
while (fotg210->qh_scan_next) {
qh = fotg210->qh_scan_next;
fotg210->qh_scan_next = qh->qh_next.qh;
rescan:
/* clean any finished work for this qh */
if (!list_empty(&qh->qtd_list)) {
int temp;
/*
* Unlinks could happen here; completion reporting
* drops the lock. That's why fotg210->qh_scan_next
* always holds the next qh to scan; if the next qh
* gets unlinked then fotg210->qh_scan_next is adjusted
* in single_unlink_async().
*/
temp = qh_completions(fotg210, qh);
if (qh->needs_rescan) {
start_unlink_async(fotg210, qh);
} else if (list_empty(&qh->qtd_list)
&& qh->qh_state == QH_STATE_LINKED) {
qh->unlink_cycle = fotg210->async_unlink_cycle;
check_unlinks_later = true;
} else if (temp != 0)
goto rescan;
}
}
/*
* Unlink empty entries, reducing DMA usage as well
* as HCD schedule-scanning costs. Delay for any qh
* we just scanned, there's a not-unusual case that it
* doesn't stay idle for long.
*/
if (check_unlinks_later && fotg210->rh_state == FOTG210_RH_RUNNING &&
!(fotg210->enabled_hrtimer_events &
BIT(FOTG210_HRTIMER_ASYNC_UNLINKS))) {
fotg210_enable_event(fotg210,
FOTG210_HRTIMER_ASYNC_UNLINKS, true);
++fotg210->async_unlink_cycle;
}
}
/* EHCI scheduled transaction support: interrupt, iso, split iso
* These are called "periodic" transactions in the EHCI spec.
*
* Note that for interrupt transfers, the QH/QTD manipulation is shared
* with the "asynchronous" transaction support (control/bulk transfers).
* The only real difference is in how interrupt transfers are scheduled.
*
* For ISO, we make an "iso_stream" head to serve the same role as a QH.
* It keeps track of every ITD (or SITD) that's linked, and holds enough
* pre-calculated schedule data to make appending to the queue be quick.
*/
static int fotg210_get_frame(struct usb_hcd *hcd);
/* periodic_next_shadow - return "next" pointer on shadow list
* @periodic: host pointer to qh/itd
* @tag: hardware tag for type of this record
*/
static union fotg210_shadow *periodic_next_shadow(struct fotg210_hcd *fotg210,
union fotg210_shadow *periodic, __hc32 tag)
{
switch (hc32_to_cpu(fotg210, tag)) {
case Q_TYPE_QH:
return &periodic->qh->qh_next;
case Q_TYPE_FSTN:
return &periodic->fstn->fstn_next;
default:
return &periodic->itd->itd_next;
}
}
static __hc32 *shadow_next_periodic(struct fotg210_hcd *fotg210,
union fotg210_shadow *periodic, __hc32 tag)
{
switch (hc32_to_cpu(fotg210, tag)) {
/* our fotg210_shadow.qh is actually software part */
case Q_TYPE_QH:
return &periodic->qh->hw->hw_next;
/* others are hw parts */
default:
return periodic->hw_next;
}
}
/* caller must hold fotg210->lock */
static void periodic_unlink(struct fotg210_hcd *fotg210, unsigned frame,
void *ptr)
{
union fotg210_shadow *prev_p = &fotg210->pshadow[frame];
__hc32 *hw_p = &fotg210->periodic[frame];
union fotg210_shadow here = *prev_p;
/* find predecessor of "ptr"; hw and shadow lists are in sync */
while (here.ptr && here.ptr != ptr) {
prev_p = periodic_next_shadow(fotg210, prev_p,
Q_NEXT_TYPE(fotg210, *hw_p));
hw_p = shadow_next_periodic(fotg210, &here,
Q_NEXT_TYPE(fotg210, *hw_p));
here = *prev_p;
}
/* an interrupt entry (at list end) could have been shared */
if (!here.ptr)
return;
/* update shadow and hardware lists ... the old "next" pointers
* from ptr may still be in use, the caller updates them.
*/
*prev_p = *periodic_next_shadow(fotg210, &here,
Q_NEXT_TYPE(fotg210, *hw_p));
*hw_p = *shadow_next_periodic(fotg210, &here,
Q_NEXT_TYPE(fotg210, *hw_p));
}
/* how many of the uframe's 125 usecs are allocated? */
static unsigned short periodic_usecs(struct fotg210_hcd *fotg210,
unsigned frame, unsigned uframe)
{
__hc32 *hw_p = &fotg210->periodic[frame];
union fotg210_shadow *q = &fotg210->pshadow[frame];
unsigned usecs = 0;
struct fotg210_qh_hw *hw;
while (q->ptr) {
switch (hc32_to_cpu(fotg210, Q_NEXT_TYPE(fotg210, *hw_p))) {
case Q_TYPE_QH:
hw = q->qh->hw;
/* is it in the S-mask? */
if (hw->hw_info2 & cpu_to_hc32(fotg210, 1 << uframe))
usecs += q->qh->usecs;
/* ... or C-mask? */
if (hw->hw_info2 & cpu_to_hc32(fotg210,
1 << (8 + uframe)))
usecs += q->qh->c_usecs;
hw_p = &hw->hw_next;
q = &q->qh->qh_next;
break;
/* case Q_TYPE_FSTN: */
default:
/* for "save place" FSTNs, count the relevant INTR
* bandwidth from the previous frame
*/
if (q->fstn->hw_prev != FOTG210_LIST_END(fotg210))
fotg210_dbg(fotg210, "ignoring FSTN cost ...\n");
hw_p = &q->fstn->hw_next;
q = &q->fstn->fstn_next;
break;
case Q_TYPE_ITD:
if (q->itd->hw_transaction[uframe])
usecs += q->itd->stream->usecs;
hw_p = &q->itd->hw_next;
q = &q->itd->itd_next;
break;
}
}
if (usecs > fotg210->uframe_periodic_max)
fotg210_err(fotg210, "uframe %d sched overrun: %d usecs\n",
frame * 8 + uframe, usecs);
return usecs;
}
static int same_tt(struct usb_device *dev1, struct usb_device *dev2)
{
if (!dev1->tt || !dev2->tt)
return 0;
if (dev1->tt != dev2->tt)
return 0;
if (dev1->tt->multi)
return dev1->ttport == dev2->ttport;
else
return 1;
}
/* return true iff the device's transaction translator is available
* for a periodic transfer starting at the specified frame, using
* all the uframes in the mask.
*/
static int tt_no_collision(struct fotg210_hcd *fotg210, unsigned period,
struct usb_device *dev, unsigned frame, u32 uf_mask)
{
if (period == 0) /* error */
return 0;
/* note bandwidth wastage: split never follows csplit
* (different dev or endpoint) until the next uframe.
* calling convention doesn't make that distinction.
*/
for (; frame < fotg210->periodic_size; frame += period) {
union fotg210_shadow here;
__hc32 type;
struct fotg210_qh_hw *hw;
here = fotg210->pshadow[frame];
type = Q_NEXT_TYPE(fotg210, fotg210->periodic[frame]);
while (here.ptr) {
switch (hc32_to_cpu(fotg210, type)) {
case Q_TYPE_ITD:
type = Q_NEXT_TYPE(fotg210, here.itd->hw_next);
here = here.itd->itd_next;
continue;
case Q_TYPE_QH:
hw = here.qh->hw;
if (same_tt(dev, here.qh->dev)) {
u32 mask;
mask = hc32_to_cpu(fotg210,
hw->hw_info2);
/* "knows" no gap is needed */
mask |= mask >> 8;
if (mask & uf_mask)
break;
}
type = Q_NEXT_TYPE(fotg210, hw->hw_next);
here = here.qh->qh_next;
continue;
/* case Q_TYPE_FSTN: */
default:
fotg210_dbg(fotg210,
"periodic frame %d bogus type %d\n",
frame, type);
}
/* collision or error */
return 0;
}
}
/* no collision */
return 1;
}
static void enable_periodic(struct fotg210_hcd *fotg210)
{
if (fotg210->periodic_count++)
return;
/* Stop waiting to turn off the periodic schedule */
fotg210->enabled_hrtimer_events &=
~BIT(FOTG210_HRTIMER_DISABLE_PERIODIC);
/* Don't start the schedule until PSS is 0 */
fotg210_poll_PSS(fotg210);
turn_on_io_watchdog(fotg210);
}
static void disable_periodic(struct fotg210_hcd *fotg210)
{
if (--fotg210->periodic_count)
return;
/* Don't turn off the schedule until PSS is 1 */
fotg210_poll_PSS(fotg210);
}
/* periodic schedule slots have iso tds (normal or split) first, then a
* sparse tree for active interrupt transfers.
*
* this just links in a qh; caller guarantees uframe masks are set right.
* no FSTN support (yet; fotg210 0.96+)
*/
static void qh_link_periodic(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
{
unsigned i;
unsigned period = qh->period;
dev_dbg(&qh->dev->dev,
"link qh%d-%04x/%p start %d [%d/%d us]\n", period,
hc32_to_cpup(fotg210, &qh->hw->hw_info2) &
(QH_CMASK | QH_SMASK), qh, qh->start, qh->usecs,
qh->c_usecs);
/* high bandwidth, or otherwise every microframe */
if (period == 0)
period = 1;
for (i = qh->start; i < fotg210->periodic_size; i += period) {
union fotg210_shadow *prev = &fotg210->pshadow[i];
__hc32 *hw_p = &fotg210->periodic[i];
union fotg210_shadow here = *prev;
__hc32 type = 0;
/* skip the iso nodes at list head */
while (here.ptr) {
type = Q_NEXT_TYPE(fotg210, *hw_p);
if (type == cpu_to_hc32(fotg210, Q_TYPE_QH))
break;
prev = periodic_next_shadow(fotg210, prev, type);
hw_p = shadow_next_periodic(fotg210, &here, type);
here = *prev;
}
/* sorting each branch by period (slow-->fast)
* enables sharing interior tree nodes
*/
while (here.ptr && qh != here.qh) {
if (qh->period > here.qh->period)
break;
prev = &here.qh->qh_next;
hw_p = &here.qh->hw->hw_next;
here = *prev;
}
/* link in this qh, unless some earlier pass did that */
if (qh != here.qh) {
qh->qh_next = here;
if (here.qh)
qh->hw->hw_next = *hw_p;
wmb();
prev->qh = qh;
*hw_p = QH_NEXT(fotg210, qh->qh_dma);
}
}
qh->qh_state = QH_STATE_LINKED;
qh->xacterrs = 0;
/* update per-qh bandwidth for usbfs */
fotg210_to_hcd(fotg210)->self.bandwidth_allocated += qh->period
? ((qh->usecs + qh->c_usecs) / qh->period)
: (qh->usecs * 8);
list_add(&qh->intr_node, &fotg210->intr_qh_list);
/* maybe enable periodic schedule processing */
++fotg210->intr_count;
enable_periodic(fotg210);
}
static void qh_unlink_periodic(struct fotg210_hcd *fotg210,
struct fotg210_qh *qh)
{
unsigned i;
unsigned period;
/*
* If qh is for a low/full-speed device, simply unlinking it
* could interfere with an ongoing split transaction. To unlink
* it safely would require setting the QH_INACTIVATE bit and
* waiting at least one frame, as described in EHCI 4.12.2.5.
*
* We won't bother with any of this. Instead, we assume that the
* only reason for unlinking an interrupt QH while the current URB
* is still active is to dequeue all the URBs (flush the whole
* endpoint queue).
*
* If rebalancing the periodic schedule is ever implemented, this
* approach will no longer be valid.
*/
/* high bandwidth, or otherwise part of every microframe */
period = qh->period;
if (!period)
period = 1;
for (i = qh->start; i < fotg210->periodic_size; i += period)
periodic_unlink(fotg210, i, qh);
/* update per-qh bandwidth for usbfs */
fotg210_to_hcd(fotg210)->self.bandwidth_allocated -= qh->period
? ((qh->usecs + qh->c_usecs) / qh->period)
: (qh->usecs * 8);
dev_dbg(&qh->dev->dev,
"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
qh->period, hc32_to_cpup(fotg210, &qh->hw->hw_info2) &
(QH_CMASK | QH_SMASK), qh, qh->start, qh->usecs,
qh->c_usecs);
/* qh->qh_next still "live" to HC */
qh->qh_state = QH_STATE_UNLINK;
qh->qh_next.ptr = NULL;
if (fotg210->qh_scan_next == qh)
fotg210->qh_scan_next = list_entry(qh->intr_node.next,
struct fotg210_qh, intr_node);
list_del(&qh->intr_node);
}
static void start_unlink_intr(struct fotg210_hcd *fotg210,
struct fotg210_qh *qh)
{
/* If the QH isn't linked then there's nothing we can do
* unless we were called during a giveback, in which case
* qh_completions() has to deal with it.
*/
if (qh->qh_state != QH_STATE_LINKED) {
if (qh->qh_state == QH_STATE_COMPLETING)
qh->needs_rescan = 1;
return;
}
qh_unlink_periodic(fotg210, qh);
/* Make sure the unlinks are visible before starting the timer */
wmb();
/*
* The EHCI spec doesn't say how long it takes the controller to
* stop accessing an unlinked interrupt QH. The timer delay is
* 9 uframes; presumably that will be long enough.
*/
qh->unlink_cycle = fotg210->intr_unlink_cycle;
/* New entries go at the end of the intr_unlink list */
if (fotg210->intr_unlink)
fotg210->intr_unlink_last->unlink_next = qh;
else
fotg210->intr_unlink = qh;
fotg210->intr_unlink_last = qh;
if (fotg210->intr_unlinking)
; /* Avoid recursive calls */
else if (fotg210->rh_state < FOTG210_RH_RUNNING)
fotg210_handle_intr_unlinks(fotg210);
else if (fotg210->intr_unlink == qh) {
fotg210_enable_event(fotg210, FOTG210_HRTIMER_UNLINK_INTR,
true);
++fotg210->intr_unlink_cycle;
}
}
static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
{
struct fotg210_qh_hw *hw = qh->hw;
int rc;
qh->qh_state = QH_STATE_IDLE;
hw->hw_next = FOTG210_LIST_END(fotg210);
qh_completions(fotg210, qh);
/* reschedule QH iff another request is queued */
if (!list_empty(&qh->qtd_list) &&
fotg210->rh_state == FOTG210_RH_RUNNING) {
rc = qh_schedule(fotg210, qh);
/* An error here likely indicates handshake failure
* or no space left in the schedule. Neither fault
* should happen often ...
*
* FIXME kill the now-dysfunctional queued urbs
*/
if (rc != 0)
fotg210_err(fotg210, "can't reschedule qh %p, err %d\n",
qh, rc);
}
/* maybe turn off periodic schedule */
--fotg210->intr_count;
disable_periodic(fotg210);
}
static int check_period(struct fotg210_hcd *fotg210, unsigned frame,
unsigned uframe, unsigned period, unsigned usecs)
{
int claimed;
/* complete split running into next frame?
* given FSTN support, we could sometimes check...
*/
if (uframe >= 8)
return 0;
/* convert "usecs we need" to "max already claimed" */
usecs = fotg210->uframe_periodic_max - usecs;
/* we "know" 2 and 4 uframe intervals were rejected; so
* for period 0, check _every_ microframe in the schedule.
*/
if (unlikely(period == 0)) {
do {
for (uframe = 0; uframe < 7; uframe++) {
claimed = periodic_usecs(fotg210, frame,
uframe);
if (claimed > usecs)
return 0;
}
} while ((frame += 1) < fotg210->periodic_size);
/* just check the specified uframe, at that period */
} else {
do {
claimed = periodic_usecs(fotg210, frame, uframe);
if (claimed > usecs)
return 0;
} while ((frame += period) < fotg210->periodic_size);
}
/* success! */
return 1;
}
static int check_intr_schedule(struct fotg210_hcd *fotg210, unsigned frame,
unsigned uframe, const struct fotg210_qh *qh, __hc32 *c_maskp)
{
int retval = -ENOSPC;
u8 mask = 0;
if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
goto done;
if (!check_period(fotg210, frame, uframe, qh->period, qh->usecs))
goto done;
if (!qh->c_usecs) {
retval = 0;
*c_maskp = 0;
goto done;
}
/* Make sure this tt's buffer is also available for CSPLITs.
* We pessimize a bit; probably the typical full speed case
* doesn't need the second CSPLIT.
*
* NOTE: both SPLIT and CSPLIT could be checked in just
* one smart pass...
*/
mask = 0x03 << (uframe + qh->gap_uf);
*c_maskp = cpu_to_hc32(fotg210, mask << 8);
mask |= 1 << uframe;
if (tt_no_collision(fotg210, qh->period, qh->dev, frame, mask)) {
if (!check_period(fotg210, frame, uframe + qh->gap_uf + 1,
qh->period, qh->c_usecs))
goto done;
if (!check_period(fotg210, frame, uframe + qh->gap_uf,
qh->period, qh->c_usecs))
goto done;
retval = 0;
}
done:
return retval;
}
/* "first fit" scheduling policy used the first time through,
* or when the previous schedule slot can't be re-used.
*/
static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
{
int status;
unsigned uframe;
__hc32 c_mask;
unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
struct fotg210_qh_hw *hw = qh->hw;
qh_refresh(fotg210, qh);
hw->hw_next = FOTG210_LIST_END(fotg210);
frame = qh->start;
/* reuse the previous schedule slots, if we can */
if (frame < qh->period) {
uframe = ffs(hc32_to_cpup(fotg210, &hw->hw_info2) & QH_SMASK);
status = check_intr_schedule(fotg210, frame, --uframe,
qh, &c_mask);
} else {
uframe = 0;
c_mask = 0;
status = -ENOSPC;
}
/* else scan the schedule to find a group of slots such that all
* uframes have enough periodic bandwidth available.
*/
if (status) {
/* "normal" case, uframing flexible except with splits */
if (qh->period) {
int i;
for (i = qh->period; status && i > 0; --i) {
frame = ++fotg210->random_frame % qh->period;
for (uframe = 0; uframe < 8; uframe++) {
status = check_intr_schedule(fotg210,
frame, uframe, qh,
&c_mask);
if (status == 0)
break;
}
}
/* qh->period == 0 means every uframe */
} else {
frame = 0;
status = check_intr_schedule(fotg210, 0, 0, qh,
&c_mask);
}
if (status)
goto done;
qh->start = frame;
/* reset S-frame and (maybe) C-frame masks */
hw->hw_info2 &= cpu_to_hc32(fotg210, ~(QH_CMASK | QH_SMASK));
hw->hw_info2 |= qh->period
? cpu_to_hc32(fotg210, 1 << uframe)
: cpu_to_hc32(fotg210, QH_SMASK);
hw->hw_info2 |= c_mask;
} else
fotg210_dbg(fotg210, "reused qh %p schedule\n", qh);
/* stuff into the periodic schedule */
qh_link_periodic(fotg210, qh);
done:
return status;
}
static int intr_submit(struct fotg210_hcd *fotg210, struct urb *urb,
struct list_head *qtd_list, gfp_t mem_flags)
{
unsigned epnum;
unsigned long flags;
struct fotg210_qh *qh;
int status;
struct list_head empty;
/* get endpoint and transfer/schedule data */
epnum = urb->ep->desc.bEndpointAddress;
spin_lock_irqsave(&fotg210->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) {
status = -ESHUTDOWN;
goto done_not_linked;
}
status = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb);
if (unlikely(status))
goto done_not_linked;
/* get qh and force any scheduling errors */
INIT_LIST_HEAD(&empty);
qh = qh_append_tds(fotg210, urb, &empty, epnum, &urb->ep->hcpriv);
if (qh == NULL) {
status = -ENOMEM;
goto done;
}
if (qh->qh_state == QH_STATE_IDLE) {
status = qh_schedule(fotg210, qh);
if (status)
goto done;
}
/* then queue the urb's tds to the qh */
qh = qh_append_tds(fotg210, urb, qtd_list, epnum, &urb->ep->hcpriv);
BUG_ON(qh == NULL);
/* ... update usbfs periodic stats */
fotg210_to_hcd(fotg210)->self.bandwidth_int_reqs++;
done:
if (unlikely(status))
usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
done_not_linked:
spin_unlock_irqrestore(&fotg210->lock, flags);
if (status)
qtd_list_free(fotg210, urb, qtd_list);
return status;
}
static void scan_intr(struct fotg210_hcd *fotg210)
{
struct fotg210_qh *qh;
list_for_each_entry_safe(qh, fotg210->qh_scan_next,
&fotg210->intr_qh_list, intr_node) {
rescan:
/* clean any finished work for this qh */
if (!list_empty(&qh->qtd_list)) {
int temp;
/*
* Unlinks could happen here; completion reporting
* drops the lock. That's why fotg210->qh_scan_next
* always holds the next qh to scan; if the next qh
* gets unlinked then fotg210->qh_scan_next is adjusted
* in qh_unlink_periodic().
*/
temp = qh_completions(fotg210, qh);
if (unlikely(qh->needs_rescan ||
(list_empty(&qh->qtd_list) &&
qh->qh_state == QH_STATE_LINKED)))
start_unlink_intr(fotg210, qh);
else if (temp != 0)
goto rescan;
}
}
}
/* fotg210_iso_stream ops work with both ITD and SITD */
static struct fotg210_iso_stream *iso_stream_alloc(gfp_t mem_flags)
{
struct fotg210_iso_stream *stream;
stream = kzalloc(sizeof(*stream), mem_flags);
if (likely(stream != NULL)) {
INIT_LIST_HEAD(&stream->td_list);
INIT_LIST_HEAD(&stream->free_list);
stream->next_uframe = -1;
}
return stream;
}
static void iso_stream_init(struct fotg210_hcd *fotg210,
struct fotg210_iso_stream *stream, struct usb_device *dev,
int pipe, unsigned interval)
{
u32 buf1;
unsigned epnum, maxp;
int is_input;
long bandwidth;
unsigned multi;
struct usb_host_endpoint *ep;
/*
* this might be a "high bandwidth" highspeed endpoint,
* as encoded in the ep descriptor's wMaxPacket field
*/
epnum = usb_pipeendpoint(pipe);
is_input = usb_pipein(pipe) ? USB_DIR_IN : 0;
ep = usb_pipe_endpoint(dev, pipe);
maxp = usb_endpoint_maxp(&ep->desc);
if (is_input)
buf1 = (1 << 11);
else
buf1 = 0;
multi = usb_endpoint_maxp_mult(&ep->desc);
buf1 |= maxp;
maxp *= multi;
stream->buf0 = cpu_to_hc32(fotg210, (epnum << 8) | dev->devnum);
stream->buf1 = cpu_to_hc32(fotg210, buf1);
stream->buf2 = cpu_to_hc32(fotg210, multi);
/* usbfs wants to report the average usecs per frame tied up
* when transfers on this endpoint are scheduled ...
*/
if (dev->speed == USB_SPEED_FULL) {
interval <<= 3;
stream->usecs = NS_TO_US(usb_calc_bus_time(dev->speed,
is_input, 1, maxp));
stream->usecs /= 8;
} else {
stream->highspeed = 1;
stream->usecs = HS_USECS_ISO(maxp);
}
bandwidth = stream->usecs * 8;
bandwidth /= interval;
stream->bandwidth = bandwidth;
stream->udev = dev;
stream->bEndpointAddress = is_input | epnum;
stream->interval = interval;
stream->maxp = maxp;
}
static struct fotg210_iso_stream *iso_stream_find(struct fotg210_hcd *fotg210,
struct urb *urb)
{
unsigned epnum;
struct fotg210_iso_stream *stream;
struct usb_host_endpoint *ep;
unsigned long flags;
epnum = usb_pipeendpoint(urb->pipe);
if (usb_pipein(urb->pipe))
ep = urb->dev->ep_in[epnum];
else
ep = urb->dev->ep_out[epnum];
spin_lock_irqsave(&fotg210->lock, flags);
stream = ep->hcpriv;
if (unlikely(stream == NULL)) {
stream = iso_stream_alloc(GFP_ATOMIC);
if (likely(stream != NULL)) {
ep->hcpriv = stream;
stream->ep = ep;
iso_stream_init(fotg210, stream, urb->dev, urb->pipe,
urb->interval);
}
/* if dev->ep[epnum] is a QH, hw is set */
} else if (unlikely(stream->hw != NULL)) {
fotg210_dbg(fotg210, "dev %s ep%d%s, not iso??\n",
urb->dev->devpath, epnum,
usb_pipein(urb->pipe) ? "in" : "out");
stream = NULL;
}
spin_unlock_irqrestore(&fotg210->lock, flags);
return stream;
}
/* fotg210_iso_sched ops can be ITD-only or SITD-only */
static struct fotg210_iso_sched *iso_sched_alloc(unsigned packets,
gfp_t mem_flags)
{
struct fotg210_iso_sched *iso_sched;
iso_sched = kzalloc(struct_size(iso_sched, packet, packets), mem_flags);
if (likely(iso_sched != NULL))
INIT_LIST_HEAD(&iso_sched->td_list);
return iso_sched;
}
static inline void itd_sched_init(struct fotg210_hcd *fotg210,
struct fotg210_iso_sched *iso_sched,
struct fotg210_iso_stream *stream, struct urb *urb)
{
unsigned i;
dma_addr_t dma = urb->transfer_dma;
/* how many uframes are needed for these transfers */
iso_sched->span = urb->number_of_packets * stream->interval;
/* figure out per-uframe itd fields that we'll need later
* when we fit new itds into the schedule.
*/
for (i = 0; i < urb->number_of_packets; i++) {
struct fotg210_iso_packet *uframe = &iso_sched->packet[i];
unsigned length;
dma_addr_t buf;
u32 trans;
length = urb->iso_frame_desc[i].length;
buf = dma + urb->iso_frame_desc[i].offset;
trans = FOTG210_ISOC_ACTIVE;
trans |= buf & 0x0fff;
if (unlikely(((i + 1) == urb->number_of_packets))
&& !(urb->transfer_flags & URB_NO_INTERRUPT))
trans |= FOTG210_ITD_IOC;
trans |= length << 16;
uframe->transaction = cpu_to_hc32(fotg210, trans);
/* might need to cross a buffer page within a uframe */
uframe->bufp = (buf & ~(u64)0x0fff);
buf += length;
if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff))))
uframe->cross = 1;
}
}
static void iso_sched_free(struct fotg210_iso_stream *stream,
struct fotg210_iso_sched *iso_sched)
{
if (!iso_sched)
return;
/* caller must hold fotg210->lock!*/
list_splice(&iso_sched->td_list, &stream->free_list);
kfree(iso_sched);
}
static int itd_urb_transaction(struct fotg210_iso_stream *stream,
struct fotg210_hcd *fotg210, struct urb *urb, gfp_t mem_flags)
{
struct fotg210_itd *itd;
dma_addr_t itd_dma;
int i;
unsigned num_itds;
struct fotg210_iso_sched *sched;
unsigned long flags;
sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
if (unlikely(sched == NULL))
return -ENOMEM;
itd_sched_init(fotg210, sched, stream, urb);
if (urb->interval < 8)
num_itds = 1 + (sched->span + 7) / 8;
else
num_itds = urb->number_of_packets;
/* allocate/init ITDs */
spin_lock_irqsave(&fotg210->lock, flags);
for (i = 0; i < num_itds; i++) {
/*
* Use iTDs from the free list, but not iTDs that may
* still be in use by the hardware.
*/
if (likely(!list_empty(&stream->free_list))) {
itd = list_first_entry(&stream->free_list,
struct fotg210_itd, itd_list);
if (itd->frame == fotg210->now_frame)
goto alloc_itd;
list_del(&itd->itd_list);
itd_dma = itd->itd_dma;
} else {
alloc_itd:
spin_unlock_irqrestore(&fotg210->lock, flags);
itd = dma_pool_alloc(fotg210->itd_pool, mem_flags,
&itd_dma);
spin_lock_irqsave(&fotg210->lock, flags);
if (!itd) {
iso_sched_free(stream, sched);
spin_unlock_irqrestore(&fotg210->lock, flags);
return -ENOMEM;
}
}
memset(itd, 0, sizeof(*itd));
itd->itd_dma = itd_dma;
list_add(&itd->itd_list, &sched->td_list);
}
spin_unlock_irqrestore(&fotg210->lock, flags);
/* temporarily store schedule info in hcpriv */
urb->hcpriv = sched;
urb->error_count = 0;
return 0;
}
static inline int itd_slot_ok(struct fotg210_hcd *fotg210, u32 mod, u32 uframe,
u8 usecs, u32 period)
{
uframe %= period;
do {
/* can't commit more than uframe_periodic_max usec */
if (periodic_usecs(fotg210, uframe >> 3, uframe & 0x7)
> (fotg210->uframe_periodic_max - usecs))
return 0;
/* we know urb->interval is 2^N uframes */
uframe += period;
} while (uframe < mod);
return 1;
}
/* This scheduler plans almost as far into the future as it has actual
* periodic schedule slots. (Affected by TUNE_FLS, which defaults to
* "as small as possible" to be cache-friendlier.) That limits the size
* transfers you can stream reliably; avoid more than 64 msec per urb.
* Also avoid queue depths of less than fotg210's worst irq latency (affected
* by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
* and other factors); or more than about 230 msec total (for portability,
* given FOTG210_TUNE_FLS and the slop). Or, write a smarter scheduler!
*/
#define SCHEDULE_SLOP 80 /* microframes */
static int iso_stream_schedule(struct fotg210_hcd *fotg210, struct urb *urb,
struct fotg210_iso_stream *stream)
{
u32 now, next, start, period, span;
int status;
unsigned mod = fotg210->periodic_size << 3;
struct fotg210_iso_sched *sched = urb->hcpriv;
period = urb->interval;
span = sched->span;
if (span > mod - SCHEDULE_SLOP) {
fotg210_dbg(fotg210, "iso request %p too long\n", urb);
status = -EFBIG;
goto fail;
}
now = fotg210_read_frame_index(fotg210) & (mod - 1);
/* Typical case: reuse current schedule, stream is still active.
* Hopefully there are no gaps from the host falling behind
* (irq delays etc), but if there are we'll take the next
* slot in the schedule, implicitly assuming URB_ISO_ASAP.
*/
if (likely(!list_empty(&stream->td_list))) {
u32 excess;
/* For high speed devices, allow scheduling within the
* isochronous scheduling threshold. For full speed devices
* and Intel PCI-based controllers, don't (work around for
* Intel ICH9 bug).
*/
if (!stream->highspeed && fotg210->fs_i_thresh)
next = now + fotg210->i_thresh;
else
next = now;
/* Fell behind (by up to twice the slop amount)?
* We decide based on the time of the last currently-scheduled
* slot, not the time of the next available slot.
*/
excess = (stream->next_uframe - period - next) & (mod - 1);
if (excess >= mod - 2 * SCHEDULE_SLOP)
start = next + excess - mod + period *
DIV_ROUND_UP(mod - excess, period);
else
start = next + excess + period;
if (start - now >= mod) {
fotg210_dbg(fotg210, "request %p would overflow (%d+%d >= %d)\n",
urb, start - now - period, period,
mod);
status = -EFBIG;
goto fail;
}
}
/* need to schedule; when's the next (u)frame we could start?
* this is bigger than fotg210->i_thresh allows; scheduling itself
* isn't free, the slop should handle reasonably slow cpus. it
* can also help high bandwidth if the dma and irq loads don't
* jump until after the queue is primed.
*/
else {
int done = 0;
start = SCHEDULE_SLOP + (now & ~0x07);
/* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
/* find a uframe slot with enough bandwidth.
* Early uframes are more precious because full-speed
* iso IN transfers can't use late uframes,
* and therefore they should be allocated last.
*/
next = start;
start += period;
do {
start--;
/* check schedule: enough space? */
if (itd_slot_ok(fotg210, mod, start,
stream->usecs, period))
done = 1;
} while (start > next && !done);
/* no room in the schedule */
if (!done) {
fotg210_dbg(fotg210, "iso resched full %p (now %d max %d)\n",
urb, now, now + mod);
status = -ENOSPC;
goto fail;
}
}
/* Tried to schedule too far into the future? */
if (unlikely(start - now + span - period >=
mod - 2 * SCHEDULE_SLOP)) {
fotg210_dbg(fotg210, "request %p would overflow (%d+%d >= %d)\n",
urb, start - now, span - period,
mod - 2 * SCHEDULE_SLOP);
status = -EFBIG;
goto fail;
}
stream->next_uframe = start & (mod - 1);
/* report high speed start in uframes; full speed, in frames */
urb->start_frame = stream->next_uframe;
if (!stream->highspeed)
urb->start_frame >>= 3;
/* Make sure scan_isoc() sees these */
if (fotg210->isoc_count == 0)
fotg210->next_frame = now >> 3;
return 0;
fail:
iso_sched_free(stream, sched);
urb->hcpriv = NULL;
return status;
}
static inline void itd_init(struct fotg210_hcd *fotg210,
struct fotg210_iso_stream *stream, struct fotg210_itd *itd)
{
int i;
/* it's been recently zeroed */
itd->hw_next = FOTG210_LIST_END(fotg210);
itd->hw_bufp[0] = stream->buf0;
itd->hw_bufp[1] = stream->buf1;
itd->hw_bufp[2] = stream->buf2;
for (i = 0; i < 8; i++)
itd->index[i] = -1;
/* All other fields are filled when scheduling */
}
static inline void itd_patch(struct fotg210_hcd *fotg210,
struct fotg210_itd *itd, struct fotg210_iso_sched *iso_sched,
unsigned index, u16 uframe)
{
struct fotg210_iso_packet *uf = &iso_sched->packet[index];
unsigned pg = itd->pg;
uframe &= 0x07;
itd->index[uframe] = index;
itd->hw_transaction[uframe] = uf->transaction;
itd->hw_transaction[uframe] |= cpu_to_hc32(fotg210, pg << 12);
itd->hw_bufp[pg] |= cpu_to_hc32(fotg210, uf->bufp & ~(u32)0);
itd->hw_bufp_hi[pg] |= cpu_to_hc32(fotg210, (u32)(uf->bufp >> 32));
/* iso_frame_desc[].offset must be strictly increasing */
if (unlikely(uf->cross)) {
u64 bufp = uf->bufp + 4096;
itd->pg = ++pg;
itd->hw_bufp[pg] |= cpu_to_hc32(fotg210, bufp & ~(u32)0);
itd->hw_bufp_hi[pg] |= cpu_to_hc32(fotg210, (u32)(bufp >> 32));
}
}
static inline void itd_link(struct fotg210_hcd *fotg210, unsigned frame,
struct fotg210_itd *itd)
{
union fotg210_shadow *prev = &fotg210->pshadow[frame];
__hc32 *hw_p = &fotg210->periodic[frame];
union fotg210_shadow here = *prev;
__hc32 type = 0;
/* skip any iso nodes which might belong to previous microframes */
while (here.ptr) {
type = Q_NEXT_TYPE(fotg210, *hw_p);
if (type == cpu_to_hc32(fotg210, Q_TYPE_QH))
break;
prev = periodic_next_shadow(fotg210, prev, type);
hw_p = shadow_next_periodic(fotg210, &here, type);
here = *prev;
}
itd->itd_next = here;
itd->hw_next = *hw_p;
prev->itd = itd;
itd->frame = frame;
wmb();
*hw_p = cpu_to_hc32(fotg210, itd->itd_dma | Q_TYPE_ITD);
}
/* fit urb's itds into the selected schedule slot; activate as needed */
static void itd_link_urb(struct fotg210_hcd *fotg210, struct urb *urb,
unsigned mod, struct fotg210_iso_stream *stream)
{
int packet;
unsigned next_uframe, uframe, frame;
struct fotg210_iso_sched *iso_sched = urb->hcpriv;
struct fotg210_itd *itd;
next_uframe = stream->next_uframe & (mod - 1);
if (unlikely(list_empty(&stream->td_list))) {
fotg210_to_hcd(fotg210)->self.bandwidth_allocated
+= stream->bandwidth;
fotg210_dbg(fotg210,
"schedule devp %s ep%d%s-iso period %d start %d.%d\n",
urb->dev->devpath, stream->bEndpointAddress & 0x0f,
(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
urb->interval,
next_uframe >> 3, next_uframe & 0x7);
}
/* fill iTDs uframe by uframe */
for (packet = 0, itd = NULL; packet < urb->number_of_packets;) {
if (itd == NULL) {
/* ASSERT: we have all necessary itds */
/* ASSERT: no itds for this endpoint in this uframe */
itd = list_entry(iso_sched->td_list.next,
struct fotg210_itd, itd_list);
list_move_tail(&itd->itd_list, &stream->td_list);
itd->stream = stream;
itd->urb = urb;
itd_init(fotg210, stream, itd);
}
uframe = next_uframe & 0x07;
frame = next_uframe >> 3;
itd_patch(fotg210, itd, iso_sched, packet, uframe);
next_uframe += stream->interval;
next_uframe &= mod - 1;
packet++;
/* link completed itds into the schedule */
if (((next_uframe >> 3) != frame)
|| packet == urb->number_of_packets) {
itd_link(fotg210, frame & (fotg210->periodic_size - 1),
itd);
itd = NULL;
}
}
stream->next_uframe = next_uframe;
/* don't need that schedule data any more */
iso_sched_free(stream, iso_sched);
urb->hcpriv = NULL;
++fotg210->isoc_count;
enable_periodic(fotg210);
}
#define ISO_ERRS (FOTG210_ISOC_BUF_ERR | FOTG210_ISOC_BABBLE |\
FOTG210_ISOC_XACTERR)
/* Process and recycle a completed ITD. Return true iff its urb completed,
* and hence its completion callback probably added things to the hardware
* schedule.
*
* Note that we carefully avoid recycling this descriptor until after any
* completion callback runs, so that it won't be reused quickly. That is,
* assuming (a) no more than two urbs per frame on this endpoint, and also
* (b) only this endpoint's completions submit URBs. It seems some silicon
* corrupts things if you reuse completed descriptors very quickly...
*/
static bool itd_complete(struct fotg210_hcd *fotg210, struct fotg210_itd *itd)
{
struct urb *urb = itd->urb;
struct usb_iso_packet_descriptor *desc;
u32 t;
unsigned uframe;
int urb_index = -1;
struct fotg210_iso_stream *stream = itd->stream;
struct usb_device *dev;
bool retval = false;
/* for each uframe with a packet */
for (uframe = 0; uframe < 8; uframe++) {
if (likely(itd->index[uframe] == -1))
continue;
urb_index = itd->index[uframe];
desc = &urb->iso_frame_desc[urb_index];
t = hc32_to_cpup(fotg210, &itd->hw_transaction[uframe]);
itd->hw_transaction[uframe] = 0;
/* report transfer status */
if (unlikely(t & ISO_ERRS)) {
urb->error_count++;
if (t & FOTG210_ISOC_BUF_ERR)
desc->status = usb_pipein(urb->pipe)
? -ENOSR /* hc couldn't read */
: -ECOMM; /* hc couldn't write */
else if (t & FOTG210_ISOC_BABBLE)
desc->status = -EOVERFLOW;
else /* (t & FOTG210_ISOC_XACTERR) */
desc->status = -EPROTO;
/* HC need not update length with this error */
if (!(t & FOTG210_ISOC_BABBLE)) {
desc->actual_length = FOTG210_ITD_LENGTH(t);
urb->actual_length += desc->actual_length;
}
} else if (likely((t & FOTG210_ISOC_ACTIVE) == 0)) {
desc->status = 0;
desc->actual_length = FOTG210_ITD_LENGTH(t);
urb->actual_length += desc->actual_length;
} else {
/* URB was too late */
desc->status = -EXDEV;
}
}
/* handle completion now? */
if (likely((urb_index + 1) != urb->number_of_packets))
goto done;
/* ASSERT: it's really the last itd for this urb
* list_for_each_entry (itd, &stream->td_list, itd_list)
* BUG_ON (itd->urb == urb);
*/
/* give urb back to the driver; completion often (re)submits */
dev = urb->dev;
fotg210_urb_done(fotg210, urb, 0);
retval = true;
urb = NULL;
--fotg210->isoc_count;
disable_periodic(fotg210);
if (unlikely(list_is_singular(&stream->td_list))) {
fotg210_to_hcd(fotg210)->self.bandwidth_allocated
-= stream->bandwidth;
fotg210_dbg(fotg210,
"deschedule devp %s ep%d%s-iso\n",
dev->devpath, stream->bEndpointAddress & 0x0f,
(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
}
done:
itd->urb = NULL;
/* Add to the end of the free list for later reuse */
list_move_tail(&itd->itd_list, &stream->free_list);
/* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
if (list_empty(&stream->td_list)) {
list_splice_tail_init(&stream->free_list,
&fotg210->cached_itd_list);
start_free_itds(fotg210);
}
return retval;
}
static int itd_submit(struct fotg210_hcd *fotg210, struct urb *urb,
gfp_t mem_flags)
{
int status = -EINVAL;
unsigned long flags;
struct fotg210_iso_stream *stream;
/* Get iso_stream head */
stream = iso_stream_find(fotg210, urb);
if (unlikely(stream == NULL)) {
fotg210_dbg(fotg210, "can't get iso stream\n");
return -ENOMEM;
}
if (unlikely(urb->interval != stream->interval &&
fotg210_port_speed(fotg210, 0) ==
USB_PORT_STAT_HIGH_SPEED)) {
fotg210_dbg(fotg210, "can't change iso interval %d --> %d\n",
stream->interval, urb->interval);
goto done;
}
#ifdef FOTG210_URB_TRACE
fotg210_dbg(fotg210,
"%s %s urb %p ep%d%s len %d, %d pkts %d uframes[%p]\n",
__func__, urb->dev->devpath, urb,
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out",
urb->transfer_buffer_length,
urb->number_of_packets, urb->interval,
stream);
#endif
/* allocate ITDs w/o locking anything */
status = itd_urb_transaction(stream, fotg210, urb, mem_flags);
if (unlikely(status < 0)) {
fotg210_dbg(fotg210, "can't init itds\n");
goto done;
}
/* schedule ... need to lock */
spin_lock_irqsave(&fotg210->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) {
status = -ESHUTDOWN;
goto done_not_linked;
}
status = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb);
if (unlikely(status))
goto done_not_linked;
status = iso_stream_schedule(fotg210, urb, stream);
if (likely(status == 0))
itd_link_urb(fotg210, urb, fotg210->periodic_size << 3, stream);
else
usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
done_not_linked:
spin_unlock_irqrestore(&fotg210->lock, flags);
done:
return status;
}
static inline int scan_frame_queue(struct fotg210_hcd *fotg210, unsigned frame,
unsigned now_frame, bool live)
{
unsigned uf;
bool modified;
union fotg210_shadow q, *q_p;
__hc32 type, *hw_p;
/* scan each element in frame's queue for completions */
q_p = &fotg210->pshadow[frame];
hw_p = &fotg210->periodic[frame];
q.ptr = q_p->ptr;
type = Q_NEXT_TYPE(fotg210, *hw_p);
modified = false;
while (q.ptr) {
switch (hc32_to_cpu(fotg210, type)) {
case Q_TYPE_ITD:
/* If this ITD is still active, leave it for
* later processing ... check the next entry.
* No need to check for activity unless the
* frame is current.
*/
if (frame == now_frame && live) {
rmb();
for (uf = 0; uf < 8; uf++) {
if (q.itd->hw_transaction[uf] &
ITD_ACTIVE(fotg210))
break;
}
if (uf < 8) {
q_p = &q.itd->itd_next;
hw_p = &q.itd->hw_next;
type = Q_NEXT_TYPE(fotg210,
q.itd->hw_next);
q = *q_p;
break;
}
}
/* Take finished ITDs out of the schedule
* and process them: recycle, maybe report
* URB completion. HC won't cache the
* pointer for much longer, if at all.
*/
*q_p = q.itd->itd_next;
*hw_p = q.itd->hw_next;
type = Q_NEXT_TYPE(fotg210, q.itd->hw_next);
wmb();
modified = itd_complete(fotg210, q.itd);
q = *q_p;
break;
default:
fotg210_dbg(fotg210, "corrupt type %d frame %d shadow %p\n",
type, frame, q.ptr);
fallthrough;
case Q_TYPE_QH:
case Q_TYPE_FSTN:
/* End of the iTDs and siTDs */
q.ptr = NULL;
break;
}
/* assume completion callbacks modify the queue */
if (unlikely(modified && fotg210->isoc_count > 0))
return -EINVAL;
}
return 0;
}
static void scan_isoc(struct fotg210_hcd *fotg210)
{
unsigned uf, now_frame, frame, ret;
unsigned fmask = fotg210->periodic_size - 1;
bool live;
/*
* When running, scan from last scan point up to "now"
* else clean up by scanning everything that's left.
* Touches as few pages as possible: cache-friendly.
*/
if (fotg210->rh_state >= FOTG210_RH_RUNNING) {
uf = fotg210_read_frame_index(fotg210);
now_frame = (uf >> 3) & fmask;
live = true;
} else {
now_frame = (fotg210->next_frame - 1) & fmask;
live = false;
}
fotg210->now_frame = now_frame;
frame = fotg210->next_frame;
for (;;) {
ret = 1;
while (ret != 0)
ret = scan_frame_queue(fotg210, frame,
now_frame, live);
/* Stop when we have reached the current frame */
if (frame == now_frame)
break;
frame = (frame + 1) & fmask;
}
fotg210->next_frame = now_frame;
}
/* Display / Set uframe_periodic_max
*/
static ssize_t uframe_periodic_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fotg210_hcd *fotg210;
fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev)));
return sysfs_emit(buf, "%d\n", fotg210->uframe_periodic_max);
}
static ssize_t uframe_periodic_max_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct fotg210_hcd *fotg210;
unsigned uframe_periodic_max;
unsigned frame, uframe;
unsigned short allocated_max;
unsigned long flags;
ssize_t ret;
fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev)));
ret = kstrtouint(buf, 0, &uframe_periodic_max);
if (ret)
return ret;
if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) {
fotg210_info(fotg210, "rejecting invalid request for uframe_periodic_max=%u\n",
uframe_periodic_max);
return -EINVAL;
}
ret = -EINVAL;
/*
* lock, so that our checking does not race with possible periodic
* bandwidth allocation through submitting new urbs.
*/
spin_lock_irqsave(&fotg210->lock, flags);
/*
* for request to decrease max periodic bandwidth, we have to check
* every microframe in the schedule to see whether the decrease is
* possible.
*/
if (uframe_periodic_max < fotg210->uframe_periodic_max) {
allocated_max = 0;
for (frame = 0; frame < fotg210->periodic_size; ++frame)
for (uframe = 0; uframe < 7; ++uframe)
allocated_max = max(allocated_max,
periodic_usecs(fotg210, frame,
uframe));
if (allocated_max > uframe_periodic_max) {
fotg210_info(fotg210,
"cannot decrease uframe_periodic_max because periodic bandwidth is already allocated (%u > %u)\n",
allocated_max, uframe_periodic_max);
goto out_unlock;
}
}
/* increasing is always ok */
fotg210_info(fotg210,
"setting max periodic bandwidth to %u%% (== %u usec/uframe)\n",
100 * uframe_periodic_max/125, uframe_periodic_max);
if (uframe_periodic_max != 100)
fotg210_warn(fotg210, "max periodic bandwidth set is non-standard\n");
fotg210->uframe_periodic_max = uframe_periodic_max;
ret = count;
out_unlock:
spin_unlock_irqrestore(&fotg210->lock, flags);
return ret;
}
static DEVICE_ATTR_RW(uframe_periodic_max);
static inline int create_sysfs_files(struct fotg210_hcd *fotg210)
{
struct device *controller = fotg210_to_hcd(fotg210)->self.controller;
return device_create_file(controller, &dev_attr_uframe_periodic_max);
}
static inline void remove_sysfs_files(struct fotg210_hcd *fotg210)
{
struct device *controller = fotg210_to_hcd(fotg210)->self.controller;
device_remove_file(controller, &dev_attr_uframe_periodic_max);
}
/* On some systems, leaving remote wakeup enabled prevents system shutdown.
* The firmware seems to think that powering off is a wakeup event!
* This routine turns off remote wakeup and everything else, on all ports.
*/
static void fotg210_turn_off_all_ports(struct fotg210_hcd *fotg210)
{
u32 __iomem *status_reg = &fotg210->regs->port_status;
fotg210_writel(fotg210, PORT_RWC_BITS, status_reg);
}
/* Halt HC, turn off all ports, and let the BIOS use the companion controllers.
* Must be called with interrupts enabled and the lock not held.
*/
static void fotg210_silence_controller(struct fotg210_hcd *fotg210)
{
fotg210_halt(fotg210);
spin_lock_irq(&fotg210->lock);
fotg210->rh_state = FOTG210_RH_HALTED;
fotg210_turn_off_all_ports(fotg210);
spin_unlock_irq(&fotg210->lock);
}
/* fotg210_shutdown kick in for silicon on any bus (not just pci, etc).
* This forcibly disables dma and IRQs, helping kexec and other cases
* where the next system software may expect clean state.
*/
static void fotg210_shutdown(struct usb_hcd *hcd)
{
struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
spin_lock_irq(&fotg210->lock);
fotg210->shutdown = true;
fotg210->rh_state = FOTG210_RH_STOPPING;
fotg210->enabled_hrtimer_events = 0;
spin_unlock_irq(&fotg210->lock);
fotg210_silence_controller(fotg210);
hrtimer_cancel(&fotg210->hrtimer);
}
/* fotg210_work is called from some interrupts, timers, and so on.
* it calls driver completion functions, after dropping fotg210->lock.
*/
static void fotg210_work(struct fotg210_hcd *fotg210)
{
/* another CPU may drop fotg210->lock during a schedule scan while
* it reports urb completions. this flag guards against bogus
* attempts at re-entrant schedule scanning.
*/
if (fotg210->scanning) {
fotg210->need_rescan = true;
return;
}
fotg210->scanning = true;
rescan:
fotg210->need_rescan = false;
if (fotg210->async_count)
scan_async(fotg210);
if (fotg210->intr_count > 0)
scan_intr(fotg210);
if (fotg210->isoc_count > 0)
scan_isoc(fotg210);
if (fotg210->need_rescan)
goto rescan;
fotg210->scanning = false;
/* the IO watchdog guards against hardware or driver bugs that
* misplace IRQs, and should let us run completely without IRQs.
* such lossage has been observed on both VT6202 and VT8235.
*/
turn_on_io_watchdog(fotg210);
}
/* Called when the fotg210_hcd module is removed.
*/
static void fotg210_stop(struct usb_hcd *hcd)
{
struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
fotg210_dbg(fotg210, "stop\n");
/* no more interrupts ... */
spin_lock_irq(&fotg210->lock);
fotg210->enabled_hrtimer_events = 0;
spin_unlock_irq(&fotg210->lock);
fotg210_quiesce(fotg210);
fotg210_silence_controller(fotg210);
fotg210_reset(fotg210);
hrtimer_cancel(&fotg210->hrtimer);
remove_sysfs_files(fotg210);
remove_debug_files(fotg210);
/* root hub is shut down separately (first, when possible) */
spin_lock_irq(&fotg210->lock);
end_free_itds(fotg210);
spin_unlock_irq(&fotg210->lock);
fotg210_mem_cleanup(fotg210);
#ifdef FOTG210_STATS
fotg210_dbg(fotg210, "irq normal %ld err %ld iaa %ld (lost %ld)\n",
fotg210->stats.normal, fotg210->stats.error,
fotg210->stats.iaa, fotg210->stats.lost_iaa);
fotg210_dbg(fotg210, "complete %ld unlink %ld\n",
fotg210->stats.complete, fotg210->stats.unlink);
#endif
dbg_status(fotg210, "fotg210_stop completed",
fotg210_readl(fotg210, &fotg210->regs->status));
}
/* one-time init, only for memory state */
static int hcd_fotg210_init(struct usb_hcd *hcd)
{
struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
u32 temp;
int retval;
u32 hcc_params;
struct fotg210_qh_hw *hw;
spin_lock_init(&fotg210->lock);
/*
* keep io watchdog by default, those good HCDs could turn off it later
*/
fotg210->need_io_watchdog = 1;
hrtimer_init(&fotg210->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
fotg210->hrtimer.function = fotg210_hrtimer_func;
fotg210->next_hrtimer_event = FOTG210_HRTIMER_NO_EVENT;
hcc_params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
/*
* by default set standard 80% (== 100 usec/uframe) max periodic
* bandwidth as required by USB 2.0
*/
fotg210->uframe_periodic_max = 100;
/*
* hw default: 1K periodic list heads, one per frame.
* periodic_size can shrink by USBCMD update if hcc_params allows.
*/
fotg210->periodic_size = DEFAULT_I_TDPS;
INIT_LIST_HEAD(&fotg210->intr_qh_list);
INIT_LIST_HEAD(&fotg210->cached_itd_list);
if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
/* periodic schedule size can be smaller than default */
switch (FOTG210_TUNE_FLS) {
case 0:
fotg210->periodic_size = 1024;
break;
case 1:
fotg210->periodic_size = 512;
break;
case 2:
fotg210->periodic_size = 256;
break;
default:
BUG();
}
}
retval = fotg210_mem_init(fotg210, GFP_KERNEL);
if (retval < 0)
return retval;
/* controllers may cache some of the periodic schedule ... */
fotg210->i_thresh = 2;
/*
* dedicate a qh for the async ring head, since we couldn't unlink
* a 'real' qh without stopping the async schedule [4.8]. use it
* as the 'reclamation list head' too.
* its dummy is used in hw_alt_next of many tds, to prevent the qh
* from automatically advancing to the next td after short reads.
*/
fotg210->async->qh_next.qh = NULL;
hw = fotg210->async->hw;
hw->hw_next = QH_NEXT(fotg210, fotg210->async->qh_dma);
hw->hw_info1 = cpu_to_hc32(fotg210, QH_HEAD);
hw->hw_token = cpu_to_hc32(fotg210, QTD_STS_HALT);
hw->hw_qtd_next = FOTG210_LIST_END(fotg210);
fotg210->async->qh_state = QH_STATE_LINKED;
hw->hw_alt_next = QTD_NEXT(fotg210, fotg210->async->dummy->qtd_dma);
/* clear interrupt enables, set irq latency */
if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
log2_irq_thresh = 0;
temp = 1 << (16 + log2_irq_thresh);
if (HCC_CANPARK(hcc_params)) {
/* HW default park == 3, on hardware that supports it (like
* NVidia and ALI silicon), maximizes throughput on the async
* schedule by avoiding QH fetches between transfers.
*
* With fast usb storage devices and NForce2, "park" seems to
* make problems: throughput reduction (!), data errors...
*/
if (park) {
park = min_t(unsigned, park, 3);
temp |= CMD_PARK;
temp |= park << 8;
}
fotg210_dbg(fotg210, "park %d\n", park);
}
if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
/* periodic schedule size can be smaller than default */
temp &= ~(3 << 2);
temp |= (FOTG210_TUNE_FLS << 2);
}
fotg210->command = temp;
/* Accept arbitrarily long scatter-gather lists */
if (!hcd->localmem_pool)
hcd->self.sg_tablesize = ~0;
return 0;
}
/* start HC running; it's halted, hcd_fotg210_init() has been run (once) */
static int fotg210_run(struct usb_hcd *hcd)
{
struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
u32 temp;
hcd->uses_new_polling = 1;
/* EHCI spec section 4.1 */
fotg210_writel(fotg210, fotg210->periodic_dma,
&fotg210->regs->frame_list);
fotg210_writel(fotg210, (u32)fotg210->async->qh_dma,
&fotg210->regs->async_next);
/*
* hcc_params controls whether fotg210->regs->segment must (!!!)
* be used; it constrains QH/ITD/SITD and QTD locations.
* dma_pool consistent memory always uses segment zero.
* streaming mappings for I/O buffers, like dma_map_single(),
* can return segments above 4GB, if the device allows.
*
* NOTE: the dma mask is visible through dev->dma_mask, so
* drivers can pass this info along ... like NETIF_F_HIGHDMA,
* Scsi_Host.highmem_io, and so forth. It's readonly to all
* host side drivers though.
*/
fotg210_readl(fotg210, &fotg210->caps->hcc_params);
/*
* Philips, Intel, and maybe others need CMD_RUN before the
* root hub will detect new devices (why?); NEC doesn't
*/
fotg210->command &= ~(CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
fotg210->command |= CMD_RUN;
fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
dbg_cmd(fotg210, "init", fotg210->command);
/*
* Start, enabling full USB 2.0 functionality ... usb 1.1 devices
* are explicitly handed to companion controller(s), so no TT is
* involved with the root hub. (Except where one is integrated,
* and there's no companion controller unless maybe for USB OTG.)
*
* Turning on the CF flag will transfer ownership of all ports
* from the companions to the EHCI controller. If any of the
* companions are in the middle of a port reset at the time, it
* could cause trouble. Write-locking ehci_cf_port_reset_rwsem
* guarantees that no resets are in progress. After we set CF,
* a short delay lets the hardware catch up; new resets shouldn't
* be started before the port switching actions could complete.
*/
down_write(&ehci_cf_port_reset_rwsem);
fotg210->rh_state = FOTG210_RH_RUNNING;
/* unblock posted writes */
fotg210_readl(fotg210, &fotg210->regs->command);
usleep_range(5000, 10000);
up_write(&ehci_cf_port_reset_rwsem);
fotg210->last_periodic_enable = ktime_get_real();
temp = HC_VERSION(fotg210,
fotg210_readl(fotg210, &fotg210->caps->hc_capbase));
fotg210_info(fotg210,
"USB %x.%x started, EHCI %x.%02x\n",
((fotg210->sbrn & 0xf0) >> 4), (fotg210->sbrn & 0x0f),
temp >> 8, temp & 0xff);
fotg210_writel(fotg210, INTR_MASK,
&fotg210->regs->intr_enable); /* Turn On Interrupts */
/* GRR this is run-once init(), being done every time the HC starts.
* So long as they're part of class devices, we can't do it init()
* since the class device isn't created that early.
*/
create_debug_files(fotg210);
create_sysfs_files(fotg210);
return 0;
}
static int fotg210_setup(struct usb_hcd *hcd)
{
struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
int retval;
fotg210->regs = (void __iomem *)fotg210->caps +
HC_LENGTH(fotg210,
fotg210_readl(fotg210, &fotg210->caps->hc_capbase));
dbg_hcs_params(fotg210, "reset");
dbg_hcc_params(fotg210, "reset");
/* cache this readonly data; minimize chip reads */
fotg210->hcs_params = fotg210_readl(fotg210,
&fotg210->caps->hcs_params);
fotg210->sbrn = HCD_USB2;
/* data structure init */
retval = hcd_fotg210_init(hcd);
if (retval)
return retval;
retval = fotg210_halt(fotg210);
if (retval)
return retval;
fotg210_reset(fotg210);
return 0;
}
static irqreturn_t fotg210_irq(struct usb_hcd *hcd)
{
struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
u32 status, masked_status, pcd_status = 0, cmd;
int bh;
spin_lock(&fotg210->lock);
status = fotg210_readl(fotg210, &fotg210->regs->status);
/* e.g. cardbus physical eject */
if (status == ~(u32) 0) {
fotg210_dbg(fotg210, "device removed\n");
goto dead;
}
/*
* We don't use STS_FLR, but some controllers don't like it to
* remain on, so mask it out along with the other status bits.
*/
masked_status = status & (INTR_MASK | STS_FLR);
/* Shared IRQ? */
if (!masked_status ||
unlikely(fotg210->rh_state == FOTG210_RH_HALTED)) {
spin_unlock(&fotg210->lock);
return IRQ_NONE;
}
/* clear (just) interrupts */
fotg210_writel(fotg210, masked_status, &fotg210->regs->status);
cmd = fotg210_readl(fotg210, &fotg210->regs->command);
bh = 0;
/* unrequested/ignored: Frame List Rollover */
dbg_status(fotg210, "irq", status);
/* INT, ERR, and IAA interrupt rates can be throttled */
/* normal [4.15.1.2] or error [4.15.1.1] completion */
if (likely((status & (STS_INT|STS_ERR)) != 0)) {
if (likely((status & STS_ERR) == 0))
INCR(fotg210->stats.normal);
else
INCR(fotg210->stats.error);
bh = 1;
}
/* complete the unlinking of some qh [4.15.2.3] */
if (status & STS_IAA) {
/* Turn off the IAA watchdog */
fotg210->enabled_hrtimer_events &=
~BIT(FOTG210_HRTIMER_IAA_WATCHDOG);
/*
* Mild optimization: Allow another IAAD to reset the
* hrtimer, if one occurs before the next expiration.
* In theory we could always cancel the hrtimer, but
* tests show that about half the time it will be reset
* for some other event anyway.
*/
if (fotg210->next_hrtimer_event == FOTG210_HRTIMER_IAA_WATCHDOG)
++fotg210->next_hrtimer_event;
/* guard against (alleged) silicon errata */
if (cmd & CMD_IAAD)
fotg210_dbg(fotg210, "IAA with IAAD still set?\n");
if (fotg210->async_iaa) {
INCR(fotg210->stats.iaa);
end_unlink_async(fotg210);
} else
fotg210_dbg(fotg210, "IAA with nothing unlinked?\n");
}
/* remote wakeup [4.3.1] */
if (status & STS_PCD) {
int pstatus;
u32 __iomem *status_reg = &fotg210->regs->port_status;
/* kick root hub later */
pcd_status = status;
/* resume root hub? */
if (fotg210->rh_state == FOTG210_RH_SUSPENDED)
usb_hcd_resume_root_hub(hcd);
pstatus = fotg210_readl(fotg210, status_reg);
if (test_bit(0, &fotg210->suspended_ports) &&
((pstatus & PORT_RESUME) ||
!(pstatus & PORT_SUSPEND)) &&
(pstatus & PORT_PE) &&
fotg210->reset_done[0] == 0) {
/* start 20 msec resume signaling from this port,
* and make hub_wq collect PORT_STAT_C_SUSPEND to
* stop that signaling. Use 5 ms extra for safety,
* like usb_port_resume() does.
*/
fotg210->reset_done[0] = jiffies + msecs_to_jiffies(25);
set_bit(0, &fotg210->resuming_ports);
fotg210_dbg(fotg210, "port 1 remote wakeup\n");
mod_timer(&hcd->rh_timer, fotg210->reset_done[0]);
}
}
/* PCI errors [4.15.2.4] */
if (unlikely((status & STS_FATAL) != 0)) {
fotg210_err(fotg210, "fatal error\n");
dbg_cmd(fotg210, "fatal", cmd);
dbg_status(fotg210, "fatal", status);
dead:
usb_hc_died(hcd);
/* Don't let the controller do anything more */
fotg210->shutdown = true;
fotg210->rh_state = FOTG210_RH_STOPPING;
fotg210->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE);
fotg210_writel(fotg210, fotg210->command,
&fotg210->regs->command);
fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable);
fotg210_handle_controller_death(fotg210);
/* Handle completions when the controller stops */
bh = 0;
}
if (bh)
fotg210_work(fotg210);
spin_unlock(&fotg210->lock);
if (pcd_status)
usb_hcd_poll_rh_status(hcd);
return IRQ_HANDLED;
}
/* non-error returns are a promise to giveback() the urb later
* we drop ownership so next owner (or urb unlink) can get it
*
* urb + dev is in hcd.self.controller.urb_list
* we're queueing TDs onto software and hardware lists
*
* hcd-specific init for hcpriv hasn't been done yet
*
* NOTE: control, bulk, and interrupt share the same code to append TDs
* to a (possibly active) QH, and the same QH scanning code.
*/
static int fotg210_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
struct list_head qtd_list;
INIT_LIST_HEAD(&qtd_list);
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
/* qh_completions() code doesn't handle all the fault cases
* in multi-TD control transfers. Even 1KB is rare anyway.
*/
if (urb->transfer_buffer_length > (16 * 1024))
return -EMSGSIZE;
fallthrough;
/* case PIPE_BULK: */
default:
if (!qh_urb_transaction(fotg210, urb, &qtd_list, mem_flags))
return -ENOMEM;
return submit_async(fotg210, urb, &qtd_list, mem_flags);
case PIPE_INTERRUPT:
if (!qh_urb_transaction(fotg210, urb, &qtd_list, mem_flags))
return -ENOMEM;
return intr_submit(fotg210, urb, &qtd_list, mem_flags);
case PIPE_ISOCHRONOUS:
return itd_submit(fotg210, urb, mem_flags);
}
}
/* remove from hardware lists
* completions normally happen asynchronously
*/
static int fotg210_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
struct fotg210_qh *qh;
unsigned long flags;
int rc;
spin_lock_irqsave(&fotg210->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc)
goto done;
switch (usb_pipetype(urb->pipe)) {
/* case PIPE_CONTROL: */
/* case PIPE_BULK:*/
default:
qh = (struct fotg210_qh *) urb->hcpriv;
if (!qh)
break;
switch (qh->qh_state) {
case QH_STATE_LINKED:
case QH_STATE_COMPLETING:
start_unlink_async(fotg210, qh);
break;
case QH_STATE_UNLINK:
case QH_STATE_UNLINK_WAIT:
/* already started */
break;
case QH_STATE_IDLE:
/* QH might be waiting for a Clear-TT-Buffer */
qh_completions(fotg210, qh);
break;
}
break;
case PIPE_INTERRUPT:
qh = (struct fotg210_qh *) urb->hcpriv;
if (!qh)
break;
switch (qh->qh_state) {
case QH_STATE_LINKED:
case QH_STATE_COMPLETING:
start_unlink_intr(fotg210, qh);
break;
case QH_STATE_IDLE:
qh_completions(fotg210, qh);
break;
default:
fotg210_dbg(fotg210, "bogus qh %p state %d\n",
qh, qh->qh_state);
goto done;
}
break;
case PIPE_ISOCHRONOUS:
/* itd... */
/* wait till next completion, do it then. */
/* completion irqs can wait up to 1024 msec, */
break;
}
done:
spin_unlock_irqrestore(&fotg210->lock, flags);
return rc;
}
/* bulk qh holds the data toggle */
static void fotg210_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
unsigned long flags;
struct fotg210_qh *qh, *tmp;
/* ASSERT: any requests/urbs are being unlinked */
/* ASSERT: nobody can be submitting urbs for this any more */
rescan:
spin_lock_irqsave(&fotg210->lock, flags);
qh = ep->hcpriv;
if (!qh)
goto done;
/* endpoints can be iso streams. for now, we don't
* accelerate iso completions ... so spin a while.
*/
if (qh->hw == NULL) {
struct fotg210_iso_stream *stream = ep->hcpriv;
if (!list_empty(&stream->td_list))
goto idle_timeout;
/* BUG_ON(!list_empty(&stream->free_list)); */
kfree(stream);
goto done;
}
if (fotg210->rh_state < FOTG210_RH_RUNNING)
qh->qh_state = QH_STATE_IDLE;
switch (qh->qh_state) {
case QH_STATE_LINKED:
case QH_STATE_COMPLETING:
for (tmp = fotg210->async->qh_next.qh;
tmp && tmp != qh;
tmp = tmp->qh_next.qh)
continue;
/* periodic qh self-unlinks on empty, and a COMPLETING qh
* may already be unlinked.
*/
if (tmp)
start_unlink_async(fotg210, qh);
fallthrough;
case QH_STATE_UNLINK: /* wait for hw to finish? */
case QH_STATE_UNLINK_WAIT:
idle_timeout:
spin_unlock_irqrestore(&fotg210->lock, flags);
schedule_timeout_uninterruptible(1);
goto rescan;
case QH_STATE_IDLE: /* fully unlinked */
if (qh->clearing_tt)
goto idle_timeout;
if (list_empty(&qh->qtd_list)) {
qh_destroy(fotg210, qh);
break;
}
fallthrough;
default:
/* caller was supposed to have unlinked any requests;
* that's not our job. just leak this memory.
*/
fotg210_err(fotg210, "qh %p (#%02x) state %d%s\n",
qh, ep->desc.bEndpointAddress, qh->qh_state,
list_empty(&qh->qtd_list) ? "" : "(has tds)");
break;
}
done:
ep->hcpriv = NULL;
spin_unlock_irqrestore(&fotg210->lock, flags);
}
static void fotg210_endpoint_reset(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
struct fotg210_qh *qh;
int eptype = usb_endpoint_type(&ep->desc);
int epnum = usb_endpoint_num(&ep->desc);
int is_out = usb_endpoint_dir_out(&ep->desc);
unsigned long flags;
if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
return;
spin_lock_irqsave(&fotg210->lock, flags);
qh = ep->hcpriv;
/* For Bulk and Interrupt endpoints we maintain the toggle state
* in the hardware; the toggle bits in udev aren't used at all.
* When an endpoint is reset by usb_clear_halt() we must reset
* the toggle bit in the QH.
*/
if (qh) {
usb_settoggle(qh->dev, epnum, is_out, 0);
if (!list_empty(&qh->qtd_list)) {
WARN_ONCE(1, "clear_halt for a busy endpoint\n");
} else if (qh->qh_state == QH_STATE_LINKED ||
qh->qh_state == QH_STATE_COMPLETING) {
/* The toggle value in the QH can't be updated
* while the QH is active. Unlink it now;
* re-linking will call qh_refresh().
*/
if (eptype == USB_ENDPOINT_XFER_BULK)
start_unlink_async(fotg210, qh);
else
start_unlink_intr(fotg210, qh);
}
}
spin_unlock_irqrestore(&fotg210->lock, flags);
}
static int fotg210_get_frame(struct usb_hcd *hcd)
{
struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
return (fotg210_read_frame_index(fotg210) >> 3) %
fotg210->periodic_size;
}
/* The EHCI in ChipIdea HDRC cannot be a separate module or device,
* because its registers (and irq) are shared between host/gadget/otg
* functions and in order to facilitate role switching we cannot
* give the fotg210 driver exclusive access to those.
*/
static const struct hc_driver fotg210_fotg210_hc_driver = {
.description = hcd_name,
.product_desc = "Faraday USB2.0 Host Controller",
.hcd_priv_size = sizeof(struct fotg210_hcd),
/*
* generic hardware linkage
*/
.irq = fotg210_irq,
.flags = HCD_MEMORY | HCD_DMA | HCD_USB2,
/*
* basic lifecycle operations
*/
.reset = hcd_fotg210_init,
.start = fotg210_run,
.stop = fotg210_stop,
.shutdown = fotg210_shutdown,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = fotg210_urb_enqueue,
.urb_dequeue = fotg210_urb_dequeue,
.endpoint_disable = fotg210_endpoint_disable,
.endpoint_reset = fotg210_endpoint_reset,
/*
* scheduling support
*/
.get_frame_number = fotg210_get_frame,
/*
* root hub support
*/
.hub_status_data = fotg210_hub_status_data,
.hub_control = fotg210_hub_control,
.bus_suspend = fotg210_bus_suspend,
.bus_resume = fotg210_bus_resume,
.relinquish_port = fotg210_relinquish_port,
.port_handed_over = fotg210_port_handed_over,
.clear_tt_buffer_complete = fotg210_clear_tt_buffer_complete,
};
static void fotg210_init(struct fotg210_hcd *fotg210)
{
u32 value;
iowrite32(GMIR_MDEV_INT | GMIR_MOTG_INT | GMIR_INT_POLARITY,
&fotg210->regs->gmir);
value = ioread32(&fotg210->regs->otgcsr);
value &= ~OTGCSR_A_BUS_DROP;
value |= OTGCSR_A_BUS_REQ;
iowrite32(value, &fotg210->regs->otgcsr);
}
/*
* fotg210_hcd_probe - initialize faraday FOTG210 HCDs
*
* Allocates basic resources for this USB host controller, and
* then invokes the start() method for the HCD associated with it
* through the hotplug entry's driver_data.
*/
int fotg210_hcd_probe(struct platform_device *pdev, struct fotg210 *fotg)
{
struct device *dev = &pdev->dev;
struct usb_hcd *hcd;
int irq;
int retval;
struct fotg210_hcd *fotg210;
if (usb_disabled())
return -ENODEV;
pdev->dev.power.power_state = PMSG_ON;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
dev_name(dev));
if (!hcd) {
retval = dev_err_probe(dev, -ENOMEM, "failed to create hcd\n");
goto fail_create_hcd;
}
hcd->has_tt = 1;
hcd->regs = fotg->base;
hcd->rsrc_start = fotg->res->start;
hcd->rsrc_len = resource_size(fotg->res);
fotg210 = hcd_to_fotg210(hcd);
fotg210->fotg = fotg;
fotg210->caps = hcd->regs;
retval = fotg210_setup(hcd);
if (retval)
goto failed_put_hcd;
fotg210_init(fotg210);
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval) {
dev_err_probe(dev, retval, "failed to add hcd\n");
goto failed_put_hcd;
}
device_wakeup_enable(hcd->self.controller);
platform_set_drvdata(pdev, hcd);
return retval;
failed_put_hcd:
usb_put_hcd(hcd);
fail_create_hcd:
return dev_err_probe(dev, retval, "init %s fail\n", dev_name(dev));
}
/*
* fotg210_hcd_remove - shutdown processing for EHCI HCDs
* @dev: USB Host Controller being removed
*
*/
int fotg210_hcd_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
return 0;
}
int __init fotg210_hcd_init(void)
{
if (usb_disabled())
return -ENODEV;
set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
pr_warn("Warning! fotg210_hcd should always be loaded before uhci_hcd and ohci_hcd, not after\n");
pr_debug("%s: block sizes: qh %zd qtd %zd itd %zd\n",
hcd_name, sizeof(struct fotg210_qh),
sizeof(struct fotg210_qtd),
sizeof(struct fotg210_itd));
fotg210_debug_root = debugfs_create_dir("fotg210", usb_debug_root);
return 0;
}
void __exit fotg210_hcd_cleanup(void)
{
debugfs_remove(fotg210_debug_root);
clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
}
| linux-master | drivers/usb/fotg210/fotg210-hcd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* STMicroelectronics STUSB160x Type-C controller family driver
*
* Copyright (C) 2020, STMicroelectronics
* Author(s): Amelie Delaunay <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/usb/role.h>
#include <linux/usb/typec.h>
#define STUSB160X_ALERT_STATUS 0x0B /* RC */
#define STUSB160X_ALERT_STATUS_MASK_CTRL 0x0C /* RW */
#define STUSB160X_CC_CONNECTION_STATUS_TRANS 0x0D /* RC */
#define STUSB160X_CC_CONNECTION_STATUS 0x0E /* RO */
#define STUSB160X_MONITORING_STATUS_TRANS 0x0F /* RC */
#define STUSB160X_MONITORING_STATUS 0x10 /* RO */
#define STUSB160X_CC_OPERATION_STATUS 0x11 /* RO */
#define STUSB160X_HW_FAULT_STATUS_TRANS 0x12 /* RC */
#define STUSB160X_HW_FAULT_STATUS 0x13 /* RO */
#define STUSB160X_CC_CAPABILITY_CTRL 0x18 /* RW */
#define STUSB160X_CC_VCONN_SWITCH_CTRL 0x1E /* RW */
#define STUSB160X_VCONN_MONITORING_CTRL 0x20 /* RW */
#define STUSB160X_VBUS_MONITORING_RANGE_CTRL 0x22 /* RW */
#define STUSB160X_RESET_CTRL 0x23 /* RW */
#define STUSB160X_VBUS_DISCHARGE_TIME_CTRL 0x25 /* RW */
#define STUSB160X_VBUS_DISCHARGE_STATUS 0x26 /* RO */
#define STUSB160X_VBUS_ENABLE_STATUS 0x27 /* RO */
#define STUSB160X_CC_POWER_MODE_CTRL 0x28 /* RW */
#define STUSB160X_VBUS_MONITORING_CTRL 0x2E /* RW */
#define STUSB1600_REG_MAX 0x2F /* RO - Reserved */
/* STUSB160X_ALERT_STATUS/STUSB160X_ALERT_STATUS_MASK_CTRL bitfields */
#define STUSB160X_HW_FAULT BIT(4)
#define STUSB160X_MONITORING BIT(5)
#define STUSB160X_CC_CONNECTION BIT(6)
#define STUSB160X_ALL_ALERTS GENMASK(6, 4)
/* STUSB160X_CC_CONNECTION_STATUS_TRANS bitfields */
#define STUSB160X_CC_ATTACH_TRANS BIT(0)
/* STUSB160X_CC_CONNECTION_STATUS bitfields */
#define STUSB160X_CC_ATTACH BIT(0)
#define STUSB160X_CC_VCONN_SUPPLY BIT(1)
#define STUSB160X_CC_DATA_ROLE(s) (!!((s) & BIT(2)))
#define STUSB160X_CC_POWER_ROLE(s) (!!((s) & BIT(3)))
#define STUSB160X_CC_ATTACHED_MODE GENMASK(7, 5)
/* STUSB160X_MONITORING_STATUS_TRANS bitfields */
#define STUSB160X_VCONN_PRESENCE_TRANS BIT(0)
#define STUSB160X_VBUS_PRESENCE_TRANS BIT(1)
#define STUSB160X_VBUS_VSAFE0V_TRANS BIT(2)
#define STUSB160X_VBUS_VALID_TRANS BIT(3)
/* STUSB160X_MONITORING_STATUS bitfields */
#define STUSB160X_VCONN_PRESENCE BIT(0)
#define STUSB160X_VBUS_PRESENCE BIT(1)
#define STUSB160X_VBUS_VSAFE0V BIT(2)
#define STUSB160X_VBUS_VALID BIT(3)
/* STUSB160X_CC_OPERATION_STATUS bitfields */
#define STUSB160X_TYPEC_FSM_STATE GENMASK(4, 0)
#define STUSB160X_SINK_POWER_STATE GENMASK(6, 5)
#define STUSB160X_CC_ATTACHED BIT(7)
/* STUSB160X_HW_FAULT_STATUS_TRANS bitfields */
#define STUSB160X_VCONN_SW_OVP_FAULT_TRANS BIT(0)
#define STUSB160X_VCONN_SW_OCP_FAULT_TRANS BIT(1)
#define STUSB160X_VCONN_SW_RVP_FAULT_TRANS BIT(2)
#define STUSB160X_VPU_VALID_TRANS BIT(4)
#define STUSB160X_VPU_OVP_FAULT_TRANS BIT(5)
#define STUSB160X_THERMAL_FAULT BIT(7)
/* STUSB160X_HW_FAULT_STATUS bitfields */
#define STUSB160X_VCONN_SW_OVP_FAULT_CC2 BIT(0)
#define STUSB160X_VCONN_SW_OVP_FAULT_CC1 BIT(1)
#define STUSB160X_VCONN_SW_OCP_FAULT_CC2 BIT(2)
#define STUSB160X_VCONN_SW_OCP_FAULT_CC1 BIT(3)
#define STUSB160X_VCONN_SW_RVP_FAULT_CC2 BIT(4)
#define STUSB160X_VCONN_SW_RVP_FAULT_CC1 BIT(5)
#define STUSB160X_VPU_VALID BIT(6)
#define STUSB160X_VPU_OVP_FAULT BIT(7)
/* STUSB160X_CC_CAPABILITY_CTRL bitfields */
#define STUSB160X_CC_VCONN_SUPPLY_EN BIT(0)
#define STUSB160X_CC_VCONN_DISCHARGE_EN BIT(4)
#define STUSB160X_CC_CURRENT_ADVERTISED GENMASK(7, 6)
/* STUSB160X_VCONN_SWITCH_CTRL bitfields */
#define STUSB160X_CC_VCONN_SWITCH_ILIM GENMASK(3, 0)
/* STUSB160X_VCONN_MONITORING_CTRL bitfields */
#define STUSB160X_VCONN_UVLO_THRESHOLD BIT(6)
#define STUSB160X_VCONN_MONITORING_EN BIT(7)
/* STUSB160X_VBUS_MONITORING_RANGE_CTRL bitfields */
#define STUSB160X_SHIFT_LOW_VBUS_LIMIT GENMASK(3, 0)
#define STUSB160X_SHIFT_HIGH_VBUS_LIMIT GENMASK(7, 4)
/* STUSB160X_RESET_CTRL bitfields */
#define STUSB160X_SW_RESET_EN BIT(0)
/* STUSB160X_VBUS_DISCHARGE_TIME_CTRL bitfields */
#define STUSBXX02_VBUS_DISCHARGE_TIME_TO_PDO GENMASK(3, 0)
#define STUSB160X_VBUS_DISCHARGE_TIME_TO_0V GENMASK(7, 4)
/* STUSB160X_VBUS_DISCHARGE_STATUS bitfields */
#define STUSB160X_VBUS_DISCHARGE_EN BIT(7)
/* STUSB160X_VBUS_ENABLE_STATUS bitfields */
#define STUSB160X_VBUS_SOURCE_EN BIT(0)
#define STUSB160X_VBUS_SINK_EN BIT(1)
/* STUSB160X_CC_POWER_MODE_CTRL bitfields */
#define STUSB160X_CC_POWER_MODE GENMASK(2, 0)
/* STUSB160X_VBUS_MONITORING_CTRL bitfields */
#define STUSB160X_VDD_UVLO_DISABLE BIT(0)
#define STUSB160X_VBUS_VSAFE0V_THRESHOLD GENMASK(2, 1)
#define STUSB160X_VBUS_RANGE_DISABLE BIT(4)
#define STUSB160X_VDD_OVLO_DISABLE BIT(6)
enum stusb160x_pwr_mode {
SOURCE_WITH_ACCESSORY,
SINK_WITH_ACCESSORY,
SINK_WITHOUT_ACCESSORY,
DUAL_WITH_ACCESSORY,
DUAL_WITH_ACCESSORY_AND_TRY_SRC,
DUAL_WITH_ACCESSORY_AND_TRY_SNK,
};
enum stusb160x_attached_mode {
NO_DEVICE_ATTACHED,
SINK_ATTACHED,
SOURCE_ATTACHED,
DEBUG_ACCESSORY_ATTACHED,
AUDIO_ACCESSORY_ATTACHED,
};
struct stusb160x {
struct device *dev;
struct regmap *regmap;
struct regulator *vdd_supply;
struct regulator *vsys_supply;
struct regulator *vconn_supply;
struct regulator *main_supply;
struct typec_port *port;
struct typec_capability capability;
struct typec_partner *partner;
enum typec_port_type port_type;
enum typec_pwr_opmode pwr_opmode;
bool vbus_on;
struct usb_role_switch *role_sw;
};
static bool stusb160x_reg_writeable(struct device *dev, unsigned int reg)
{
switch (reg) {
case STUSB160X_ALERT_STATUS_MASK_CTRL:
case STUSB160X_CC_CAPABILITY_CTRL:
case STUSB160X_CC_VCONN_SWITCH_CTRL:
case STUSB160X_VCONN_MONITORING_CTRL:
case STUSB160X_VBUS_MONITORING_RANGE_CTRL:
case STUSB160X_RESET_CTRL:
case STUSB160X_VBUS_DISCHARGE_TIME_CTRL:
case STUSB160X_CC_POWER_MODE_CTRL:
case STUSB160X_VBUS_MONITORING_CTRL:
return true;
default:
return false;
}
}
static bool stusb160x_reg_readable(struct device *dev, unsigned int reg)
{
if (reg <= 0x0A ||
(reg >= 0x14 && reg <= 0x17) ||
(reg >= 0x19 && reg <= 0x1D) ||
(reg >= 0x29 && reg <= 0x2D) ||
(reg == 0x1F || reg == 0x21 || reg == 0x24 || reg == 0x2F))
return false;
else
return true;
}
static bool stusb160x_reg_volatile(struct device *dev, unsigned int reg)
{
switch (reg) {
case STUSB160X_ALERT_STATUS:
case STUSB160X_CC_CONNECTION_STATUS_TRANS:
case STUSB160X_CC_CONNECTION_STATUS:
case STUSB160X_MONITORING_STATUS_TRANS:
case STUSB160X_MONITORING_STATUS:
case STUSB160X_CC_OPERATION_STATUS:
case STUSB160X_HW_FAULT_STATUS_TRANS:
case STUSB160X_HW_FAULT_STATUS:
case STUSB160X_VBUS_DISCHARGE_STATUS:
case STUSB160X_VBUS_ENABLE_STATUS:
return true;
default:
return false;
}
}
static bool stusb160x_reg_precious(struct device *dev, unsigned int reg)
{
switch (reg) {
case STUSB160X_ALERT_STATUS:
case STUSB160X_CC_CONNECTION_STATUS_TRANS:
case STUSB160X_MONITORING_STATUS_TRANS:
case STUSB160X_HW_FAULT_STATUS_TRANS:
return true;
default:
return false;
}
}
static const struct regmap_config stusb1600_regmap_config = {
.reg_bits = 8,
.reg_stride = 1,
.val_bits = 8,
.max_register = STUSB1600_REG_MAX,
.writeable_reg = stusb160x_reg_writeable,
.readable_reg = stusb160x_reg_readable,
.volatile_reg = stusb160x_reg_volatile,
.precious_reg = stusb160x_reg_precious,
.cache_type = REGCACHE_RBTREE,
};
static bool stusb160x_get_vconn(struct stusb160x *chip)
{
u32 val;
int ret;
ret = regmap_read(chip->regmap, STUSB160X_CC_CAPABILITY_CTRL, &val);
if (ret) {
dev_err(chip->dev, "Unable to get Vconn status: %d\n", ret);
return false;
}
return !!FIELD_GET(STUSB160X_CC_VCONN_SUPPLY_EN, val);
}
static int stusb160x_set_vconn(struct stusb160x *chip, bool on)
{
int ret;
/* Manage VCONN input supply */
if (chip->vconn_supply) {
if (on) {
ret = regulator_enable(chip->vconn_supply);
if (ret) {
dev_err(chip->dev,
"failed to enable vconn supply: %d\n",
ret);
return ret;
}
} else {
regulator_disable(chip->vconn_supply);
}
}
/* Manage VCONN monitoring and power path */
ret = regmap_update_bits(chip->regmap, STUSB160X_VCONN_MONITORING_CTRL,
STUSB160X_VCONN_MONITORING_EN,
on ? STUSB160X_VCONN_MONITORING_EN : 0);
if (ret)
goto vconn_reg_disable;
return 0;
vconn_reg_disable:
if (chip->vconn_supply && on)
regulator_disable(chip->vconn_supply);
return ret;
}
static enum typec_pwr_opmode stusb160x_get_pwr_opmode(struct stusb160x *chip)
{
u32 val;
int ret;
ret = regmap_read(chip->regmap, STUSB160X_CC_CAPABILITY_CTRL, &val);
if (ret) {
dev_err(chip->dev, "Unable to get pwr opmode: %d\n", ret);
return TYPEC_PWR_MODE_USB;
}
return FIELD_GET(STUSB160X_CC_CURRENT_ADVERTISED, val);
}
static enum typec_accessory stusb160x_get_accessory(u32 status)
{
enum stusb160x_attached_mode mode;
mode = FIELD_GET(STUSB160X_CC_ATTACHED_MODE, status);
switch (mode) {
case DEBUG_ACCESSORY_ATTACHED:
return TYPEC_ACCESSORY_DEBUG;
case AUDIO_ACCESSORY_ATTACHED:
return TYPEC_ACCESSORY_AUDIO;
default:
return TYPEC_ACCESSORY_NONE;
}
}
static enum typec_role stusb160x_get_vconn_role(u32 status)
{
if (FIELD_GET(STUSB160X_CC_VCONN_SUPPLY, status))
return TYPEC_SOURCE;
return TYPEC_SINK;
}
static void stusb160x_set_data_role(struct stusb160x *chip,
enum typec_data_role data_role,
bool attached)
{
enum usb_role usb_role = USB_ROLE_NONE;
if (attached) {
if (data_role == TYPEC_HOST)
usb_role = USB_ROLE_HOST;
else
usb_role = USB_ROLE_DEVICE;
}
usb_role_switch_set_role(chip->role_sw, usb_role);
typec_set_data_role(chip->port, data_role);
}
static int stusb160x_attach(struct stusb160x *chip, u32 status)
{
struct typec_partner_desc desc;
int ret;
if ((STUSB160X_CC_POWER_ROLE(status) == TYPEC_SOURCE) &&
chip->vdd_supply) {
ret = regulator_enable(chip->vdd_supply);
if (ret) {
dev_err(chip->dev,
"Failed to enable Vbus supply: %d\n", ret);
return ret;
}
chip->vbus_on = true;
}
desc.usb_pd = false;
desc.accessory = stusb160x_get_accessory(status);
desc.identity = NULL;
chip->partner = typec_register_partner(chip->port, &desc);
if (IS_ERR(chip->partner)) {
ret = PTR_ERR(chip->partner);
goto vbus_disable;
}
typec_set_pwr_role(chip->port, STUSB160X_CC_POWER_ROLE(status));
typec_set_pwr_opmode(chip->port, stusb160x_get_pwr_opmode(chip));
typec_set_vconn_role(chip->port, stusb160x_get_vconn_role(status));
stusb160x_set_data_role(chip, STUSB160X_CC_DATA_ROLE(status), true);
return 0;
vbus_disable:
if (chip->vbus_on) {
regulator_disable(chip->vdd_supply);
chip->vbus_on = false;
}
return ret;
}
static void stusb160x_detach(struct stusb160x *chip, u32 status)
{
typec_unregister_partner(chip->partner);
chip->partner = NULL;
typec_set_pwr_role(chip->port, STUSB160X_CC_POWER_ROLE(status));
typec_set_pwr_opmode(chip->port, TYPEC_PWR_MODE_USB);
typec_set_vconn_role(chip->port, stusb160x_get_vconn_role(status));
stusb160x_set_data_role(chip, STUSB160X_CC_DATA_ROLE(status), false);
if (chip->vbus_on) {
regulator_disable(chip->vdd_supply);
chip->vbus_on = false;
}
}
static irqreturn_t stusb160x_irq_handler(int irq, void *data)
{
struct stusb160x *chip = data;
u32 pending, trans, status;
int ret;
ret = regmap_read(chip->regmap, STUSB160X_ALERT_STATUS, &pending);
if (ret)
goto err;
if (pending & STUSB160X_CC_CONNECTION) {
ret = regmap_read(chip->regmap,
STUSB160X_CC_CONNECTION_STATUS_TRANS, &trans);
if (ret)
goto err;
ret = regmap_read(chip->regmap,
STUSB160X_CC_CONNECTION_STATUS, &status);
if (ret)
goto err;
if (trans & STUSB160X_CC_ATTACH_TRANS) {
if (status & STUSB160X_CC_ATTACH) {
ret = stusb160x_attach(chip, status);
if (ret)
goto err;
} else {
stusb160x_detach(chip, status);
}
}
}
err:
return IRQ_HANDLED;
}
static int stusb160x_irq_init(struct stusb160x *chip, int irq)
{
u32 status;
int ret;
ret = regmap_read(chip->regmap,
STUSB160X_CC_CONNECTION_STATUS, &status);
if (ret)
return ret;
if (status & STUSB160X_CC_ATTACH) {
ret = stusb160x_attach(chip, status);
if (ret)
dev_err(chip->dev, "attach failed: %d\n", ret);
}
ret = devm_request_threaded_irq(chip->dev, irq, NULL,
stusb160x_irq_handler, IRQF_ONESHOT,
dev_name(chip->dev), chip);
if (ret)
goto partner_unregister;
/* Unmask CC_CONNECTION events */
ret = regmap_write_bits(chip->regmap, STUSB160X_ALERT_STATUS_MASK_CTRL,
STUSB160X_CC_CONNECTION, 0);
if (ret)
goto partner_unregister;
return 0;
partner_unregister:
if (chip->partner) {
typec_unregister_partner(chip->partner);
chip->partner = NULL;
}
return ret;
}
static int stusb160x_chip_init(struct stusb160x *chip)
{
u32 val;
int ret;
/* Change the default Type-C power mode */
if (chip->port_type == TYPEC_PORT_SRC)
ret = regmap_update_bits(chip->regmap,
STUSB160X_CC_POWER_MODE_CTRL,
STUSB160X_CC_POWER_MODE,
SOURCE_WITH_ACCESSORY);
else if (chip->port_type == TYPEC_PORT_SNK)
ret = regmap_update_bits(chip->regmap,
STUSB160X_CC_POWER_MODE_CTRL,
STUSB160X_CC_POWER_MODE,
SINK_WITH_ACCESSORY);
else /* (chip->port_type == TYPEC_PORT_DRP) */
ret = regmap_update_bits(chip->regmap,
STUSB160X_CC_POWER_MODE_CTRL,
STUSB160X_CC_POWER_MODE,
DUAL_WITH_ACCESSORY);
if (ret)
return ret;
if (chip->port_type == TYPEC_PORT_SNK)
goto skip_src;
/* Change the default Type-C Source power operation mode capability */
ret = regmap_update_bits(chip->regmap, STUSB160X_CC_CAPABILITY_CTRL,
STUSB160X_CC_CURRENT_ADVERTISED,
FIELD_PREP(STUSB160X_CC_CURRENT_ADVERTISED,
chip->pwr_opmode));
if (ret)
return ret;
/* Manage Type-C Source Vconn supply */
if (stusb160x_get_vconn(chip)) {
ret = stusb160x_set_vconn(chip, true);
if (ret)
return ret;
}
skip_src:
/* Mask all events interrupts - to be unmasked with interrupt support */
ret = regmap_update_bits(chip->regmap, STUSB160X_ALERT_STATUS_MASK_CTRL,
STUSB160X_ALL_ALERTS, STUSB160X_ALL_ALERTS);
if (ret)
return ret;
/* Read status at least once to clear any stale interrupts */
regmap_read(chip->regmap, STUSB160X_ALERT_STATUS, &val);
regmap_read(chip->regmap, STUSB160X_CC_CONNECTION_STATUS_TRANS, &val);
regmap_read(chip->regmap, STUSB160X_MONITORING_STATUS_TRANS, &val);
regmap_read(chip->regmap, STUSB160X_HW_FAULT_STATUS_TRANS, &val);
return 0;
}
static int stusb160x_get_fw_caps(struct stusb160x *chip,
struct fwnode_handle *fwnode)
{
const char *cap_str;
int ret;
chip->capability.fwnode = fwnode;
/*
* Supported port type can be configured through device tree
* else it is read from chip registers in stusb160x_get_caps.
*/
ret = fwnode_property_read_string(fwnode, "power-role", &cap_str);
if (!ret) {
ret = typec_find_port_power_role(cap_str);
if (ret < 0)
return ret;
chip->port_type = ret;
}
chip->capability.type = chip->port_type;
/* Skip DRP/Source capabilities in case of Sink only */
if (chip->port_type == TYPEC_PORT_SNK)
return 0;
if (chip->port_type == TYPEC_PORT_DRP)
chip->capability.prefer_role = TYPEC_SINK;
/*
* Supported power operation mode can be configured through device tree
* else it is read from chip registers in stusb160x_get_caps.
*/
ret = fwnode_property_read_string(fwnode, "typec-power-opmode", &cap_str);
if (!ret) {
ret = typec_find_pwr_opmode(cap_str);
/* Power delivery not yet supported */
if (ret < 0 || ret == TYPEC_PWR_MODE_PD) {
dev_err(chip->dev, "bad power operation mode: %d\n", ret);
return -EINVAL;
}
chip->pwr_opmode = ret;
}
return 0;
}
static int stusb160x_get_caps(struct stusb160x *chip)
{
enum typec_port_type *type = &chip->capability.type;
enum typec_port_data *data = &chip->capability.data;
enum typec_accessory *accessory = chip->capability.accessory;
u32 val;
int ret;
chip->capability.revision = USB_TYPEC_REV_1_2;
ret = regmap_read(chip->regmap, STUSB160X_CC_POWER_MODE_CTRL, &val);
if (ret)
return ret;
switch (FIELD_GET(STUSB160X_CC_POWER_MODE, val)) {
case SOURCE_WITH_ACCESSORY:
*type = TYPEC_PORT_SRC;
*data = TYPEC_PORT_DFP;
*accessory++ = TYPEC_ACCESSORY_AUDIO;
*accessory++ = TYPEC_ACCESSORY_DEBUG;
break;
case SINK_WITH_ACCESSORY:
*type = TYPEC_PORT_SNK;
*data = TYPEC_PORT_UFP;
*accessory++ = TYPEC_ACCESSORY_AUDIO;
*accessory++ = TYPEC_ACCESSORY_DEBUG;
break;
case SINK_WITHOUT_ACCESSORY:
*type = TYPEC_PORT_SNK;
*data = TYPEC_PORT_UFP;
break;
case DUAL_WITH_ACCESSORY:
case DUAL_WITH_ACCESSORY_AND_TRY_SRC:
case DUAL_WITH_ACCESSORY_AND_TRY_SNK:
*type = TYPEC_PORT_DRP;
*data = TYPEC_PORT_DRD;
*accessory++ = TYPEC_ACCESSORY_AUDIO;
*accessory++ = TYPEC_ACCESSORY_DEBUG;
break;
default:
return -EINVAL;
}
chip->port_type = *type;
chip->pwr_opmode = stusb160x_get_pwr_opmode(chip);
return 0;
}
static const struct of_device_id stusb160x_of_match[] = {
{ .compatible = "st,stusb1600", .data = &stusb1600_regmap_config},
{},
};
MODULE_DEVICE_TABLE(of, stusb160x_of_match);
static int stusb160x_probe(struct i2c_client *client)
{
struct stusb160x *chip;
const struct of_device_id *match;
struct regmap_config *regmap_config;
struct fwnode_handle *fwnode;
int ret;
chip = devm_kzalloc(&client->dev, sizeof(struct stusb160x), GFP_KERNEL);
if (!chip)
return -ENOMEM;
i2c_set_clientdata(client, chip);
match = i2c_of_match_device(stusb160x_of_match, client);
regmap_config = (struct regmap_config *)match->data;
chip->regmap = devm_regmap_init_i2c(client, regmap_config);
if (IS_ERR(chip->regmap)) {
ret = PTR_ERR(chip->regmap);
dev_err(&client->dev,
"Failed to allocate register map:%d\n", ret);
return ret;
}
chip->dev = &client->dev;
chip->vsys_supply = devm_regulator_get_optional(chip->dev, "vsys");
if (IS_ERR(chip->vsys_supply)) {
ret = PTR_ERR(chip->vsys_supply);
if (ret != -ENODEV)
return ret;
chip->vsys_supply = NULL;
}
chip->vdd_supply = devm_regulator_get_optional(chip->dev, "vdd");
if (IS_ERR(chip->vdd_supply)) {
ret = PTR_ERR(chip->vdd_supply);
if (ret != -ENODEV)
return ret;
chip->vdd_supply = NULL;
}
chip->vconn_supply = devm_regulator_get_optional(chip->dev, "vconn");
if (IS_ERR(chip->vconn_supply)) {
ret = PTR_ERR(chip->vconn_supply);
if (ret != -ENODEV)
return ret;
chip->vconn_supply = NULL;
}
fwnode = device_get_named_child_node(chip->dev, "connector");
if (!fwnode)
return -ENODEV;
/*
* This fwnode has a "compatible" property, but is never populated as a
* struct device. Instead we simply parse it to read the properties.
* This it breaks fw_devlink=on. To maintain backward compatibility
* with existing DT files, we work around this by deleting any
* fwnode_links to/from this fwnode.
*/
fw_devlink_purge_absent_suppliers(fwnode);
/*
* When both VDD and VSYS power supplies are present, the low power
* supply VSYS is selected when VSYS voltage is above 3.1 V.
* Otherwise VDD is selected.
*/
if (chip->vdd_supply &&
(!chip->vsys_supply ||
(regulator_get_voltage(chip->vsys_supply) <= 3100000)))
chip->main_supply = chip->vdd_supply;
else
chip->main_supply = chip->vsys_supply;
if (chip->main_supply) {
ret = regulator_enable(chip->main_supply);
if (ret) {
dev_err(chip->dev,
"Failed to enable main supply: %d\n", ret);
goto fwnode_put;
}
}
/* Get configuration from chip */
ret = stusb160x_get_caps(chip);
if (ret) {
dev_err(chip->dev, "Failed to get port caps: %d\n", ret);
goto main_reg_disable;
}
/* Get optional re-configuration from device tree */
ret = stusb160x_get_fw_caps(chip, fwnode);
if (ret) {
dev_err(chip->dev, "Failed to get connector caps: %d\n", ret);
goto main_reg_disable;
}
ret = stusb160x_chip_init(chip);
if (ret) {
dev_err(chip->dev, "Failed to init port: %d\n", ret);
goto main_reg_disable;
}
chip->port = typec_register_port(chip->dev, &chip->capability);
if (IS_ERR(chip->port)) {
ret = PTR_ERR(chip->port);
goto all_reg_disable;
}
/*
* Default power operation mode initialization: will be updated upon
* attach/detach interrupt
*/
typec_set_pwr_opmode(chip->port, chip->pwr_opmode);
if (client->irq) {
chip->role_sw = fwnode_usb_role_switch_get(fwnode);
if (IS_ERR(chip->role_sw)) {
ret = dev_err_probe(chip->dev, PTR_ERR(chip->role_sw),
"Failed to get usb role switch\n");
goto port_unregister;
}
ret = stusb160x_irq_init(chip, client->irq);
if (ret)
goto role_sw_put;
} else {
/*
* If Source or Dual power role, need to enable VDD supply
* providing Vbus if present. In case of interrupt support,
* VDD supply will be dynamically managed upon attach/detach
* interrupt.
*/
if (chip->port_type != TYPEC_PORT_SNK && chip->vdd_supply) {
ret = regulator_enable(chip->vdd_supply);
if (ret) {
dev_err(chip->dev,
"Failed to enable VDD supply: %d\n",
ret);
goto port_unregister;
}
chip->vbus_on = true;
}
}
fwnode_handle_put(fwnode);
return 0;
role_sw_put:
if (chip->role_sw)
usb_role_switch_put(chip->role_sw);
port_unregister:
typec_unregister_port(chip->port);
all_reg_disable:
if (stusb160x_get_vconn(chip))
stusb160x_set_vconn(chip, false);
main_reg_disable:
if (chip->main_supply)
regulator_disable(chip->main_supply);
fwnode_put:
fwnode_handle_put(fwnode);
return ret;
}
static void stusb160x_remove(struct i2c_client *client)
{
struct stusb160x *chip = i2c_get_clientdata(client);
if (chip->partner) {
typec_unregister_partner(chip->partner);
chip->partner = NULL;
}
if (chip->vbus_on)
regulator_disable(chip->vdd_supply);
if (chip->role_sw)
usb_role_switch_put(chip->role_sw);
typec_unregister_port(chip->port);
if (stusb160x_get_vconn(chip))
stusb160x_set_vconn(chip, false);
if (chip->main_supply)
regulator_disable(chip->main_supply);
}
static int __maybe_unused stusb160x_suspend(struct device *dev)
{
struct stusb160x *chip = dev_get_drvdata(dev);
/* Mask interrupts */
return regmap_update_bits(chip->regmap,
STUSB160X_ALERT_STATUS_MASK_CTRL,
STUSB160X_ALL_ALERTS, STUSB160X_ALL_ALERTS);
}
static int __maybe_unused stusb160x_resume(struct device *dev)
{
struct stusb160x *chip = dev_get_drvdata(dev);
u32 status;
int ret;
ret = regcache_sync(chip->regmap);
if (ret)
return ret;
/* Check if attach/detach occurred during low power */
ret = regmap_read(chip->regmap,
STUSB160X_CC_CONNECTION_STATUS, &status);
if (ret)
return ret;
if (chip->partner && !(status & STUSB160X_CC_ATTACH))
stusb160x_detach(chip, status);
if (!chip->partner && (status & STUSB160X_CC_ATTACH)) {
ret = stusb160x_attach(chip, status);
if (ret)
dev_err(chip->dev, "attach failed: %d\n", ret);
}
/* Unmask interrupts */
return regmap_write_bits(chip->regmap, STUSB160X_ALERT_STATUS_MASK_CTRL,
STUSB160X_CC_CONNECTION, 0);
}
static SIMPLE_DEV_PM_OPS(stusb160x_pm_ops, stusb160x_suspend, stusb160x_resume);
static struct i2c_driver stusb160x_driver = {
.driver = {
.name = "stusb160x",
.pm = &stusb160x_pm_ops,
.of_match_table = stusb160x_of_match,
},
.probe = stusb160x_probe,
.remove = stusb160x_remove,
};
module_i2c_driver(stusb160x_driver);
MODULE_AUTHOR("Amelie Delaunay <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics STUSB160x Type-C controller driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/typec/stusb160x.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for Analogix ANX7411 USB Type-C and PD controller
*
* Copyright(c) 2022, Analogix Semiconductor. All rights reserved.
*
*/
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/usb/pd.h>
#include <linux/usb/role.h>
#include <linux/usb/tcpci.h>
#include <linux/usb/typec.h>
#include <linux/usb/typec_dp.h>
#include <linux/usb/typec_mux.h>
#include <linux/workqueue.h>
#include <linux/power_supply.h>
#define TCPC_ADDRESS1 0x58
#define TCPC_ADDRESS2 0x56
#define TCPC_ADDRESS3 0x54
#define TCPC_ADDRESS4 0x52
#define SPI_ADDRESS1 0x7e
#define SPI_ADDRESS2 0x6e
#define SPI_ADDRESS3 0x64
#define SPI_ADDRESS4 0x62
struct anx7411_i2c_select {
u8 tcpc_address;
u8 spi_address;
};
#define VID_ANALOGIX 0x1F29
#define PID_ANALOGIX 0x7411
/* TCPC register define */
#define ANALOG_CTRL_10 0xAA
#define STATUS_LEN 2
#define ALERT_0 0xCB
#define RECEIVED_MSG BIT(7)
#define SOFTWARE_INT BIT(6)
#define MSG_LEN 32
#define HEADER_LEN 2
#define MSG_HEADER 0x00
#define MSG_TYPE 0x01
#define MSG_RAWDATA 0x02
#define MSG_LEN_MASK 0x1F
#define ALERT_1 0xCC
#define INTP_POW_ON BIT(7)
#define INTP_POW_OFF BIT(6)
#define VBUS_THRESHOLD_H 0xDD
#define VBUS_THRESHOLD_L 0xDE
#define FW_CTRL_0 0xF0
#define UNSTRUCT_VDM_EN BIT(0)
#define DELAY_200MS BIT(1)
#define VSAFE0 0
#define VSAFE1 BIT(2)
#define VSAFE2 BIT(3)
#define VSAFE3 (BIT(2) | BIT(3))
#define FRS_EN BIT(7)
#define FW_PARAM 0xF1
#define DONGLE_IOP BIT(0)
#define FW_CTRL_2 0xF7
#define SINK_CTRL_DIS_FLAG BIT(5)
/* SPI register define */
#define OCM_CTRL_0 0x6E
#define OCM_RESET BIT(6)
#define MAX_VOLTAGE 0xAC
#define MAX_POWER 0xAD
#define MIN_POWER 0xAE
#define REQUEST_VOLTAGE 0xAF
#define VOLTAGE_UNIT 100 /* mV per unit */
#define REQUEST_CURRENT 0xB1
#define CURRENT_UNIT 50 /* mA per unit */
#define CMD_SEND_BUF 0xC0
#define CMD_RECV_BUF 0xE0
#define REQ_VOL_20V_IN_100MV 0xC8
#define REQ_CUR_2_25A_IN_50MA 0x2D
#define REQ_CUR_3_25A_IN_50MA 0x41
#define DEF_5V 5000
#define DEF_1_5A 1500
#define LOBYTE(w) ((u8)((w) & 0xFF))
#define HIBYTE(w) ((u8)(((u16)(w) >> 8) & 0xFF))
enum anx7411_typec_message_type {
TYPE_SRC_CAP = 0x00,
TYPE_SNK_CAP = 0x01,
TYPE_SNK_IDENTITY = 0x02,
TYPE_SVID = 0x03,
TYPE_SET_SNK_DP_CAP = 0x08,
TYPE_PSWAP_REQ = 0x10,
TYPE_DSWAP_REQ = 0x11,
TYPE_VDM = 0x14,
TYPE_OBJ_REQ = 0x16,
TYPE_DP_ALT_ENTER = 0x19,
TYPE_DP_DISCOVER_MODES_INFO = 0x27,
TYPE_GET_DP_CONFIG = 0x29,
TYPE_DP_CONFIGURE = 0x2A,
TYPE_GET_DP_DISCOVER_MODES_INFO = 0x2E,
TYPE_GET_DP_ALT_ENTER = 0x2F,
};
#define FW_CTRL_1 0xB2
#define AUTO_PD_EN BIT(1)
#define TRYSRC_EN BIT(2)
#define TRYSNK_EN BIT(3)
#define FORCE_SEND_RDO BIT(6)
#define FW_VER 0xB4
#define FW_SUBVER 0xB5
#define INT_MASK 0xB6
#define INT_STS 0xB7
#define OCM_BOOT_UP BIT(0)
#define OC_OV_EVENT BIT(1)
#define VCONN_CHANGE BIT(2)
#define VBUS_CHANGE BIT(3)
#define CC_STATUS_CHANGE BIT(4)
#define DATA_ROLE_CHANGE BIT(5)
#define PR_CONSUMER_GOT_POWER BIT(6)
#define HPD_STATUS_CHANGE BIT(7)
#define SYSTEM_STSTUS 0xB8
/* 0: SINK off; 1: SINK on */
#define SINK_STATUS BIT(1)
/* 0: VCONN off; 1: VCONN on*/
#define VCONN_STATUS BIT(2)
/* 0: vbus off; 1: vbus on*/
#define VBUS_STATUS BIT(3)
/* 1: host; 0:device*/
#define DATA_ROLE BIT(5)
/* 0: Chunking; 1: Unchunked*/
#define SUPPORT_UNCHUNKING BIT(6)
/* 0: HPD low; 1: HPD high*/
#define HPD_STATUS BIT(7)
#define DATA_DFP 1
#define DATA_UFP 2
#define POWER_SOURCE 1
#define POWER_SINK 2
#define CC_STATUS 0xB9
#define CC1_RD BIT(0)
#define CC2_RD BIT(4)
#define CC1_RA BIT(1)
#define CC2_RA BIT(5)
#define CC1_RD BIT(0)
#define CC1_RP(cc) (((cc) >> 2) & 0x03)
#define CC2_RP(cc) (((cc) >> 6) & 0x03)
#define PD_REV_INIT 0xBA
#define PD_EXT_MSG_CTRL 0xBB
#define SRC_CAP_EXT_REPLY BIT(0)
#define MANUFACTURER_INFO_REPLY BIT(1)
#define BATTERY_STS_REPLY BIT(2)
#define BATTERY_CAP_REPLY BIT(3)
#define ALERT_REPLY BIT(4)
#define STATUS_REPLY BIT(5)
#define PPS_STATUS_REPLY BIT(6)
#define SNK_CAP_EXT_REPLY BIT(7)
#define NO_CONNECT 0x00
#define USB3_1_CONNECTED 0x01
#define DP_ALT_4LANES 0x02
#define USB3_1_DP_2LANES 0x03
#define CC1_CONNECTED 0x01
#define CC2_CONNECTED 0x02
#define SELECT_PIN_ASSIGMENT_C 0x04
#define SELECT_PIN_ASSIGMENT_D 0x08
#define SELECT_PIN_ASSIGMENT_E 0x10
#define SELECT_PIN_ASSIGMENT_U 0x00
#define REDRIVER_ADDRESS 0x20
#define REDRIVER_OFFSET 0x00
#define DP_SVID 0xFF01
#define VDM_ACK 0x40
#define VDM_CMD_RES 0x00
#define VDM_CMD_DIS_ID 0x01
#define VDM_CMD_DIS_SVID 0x02
#define VDM_CMD_DIS_MOD 0x03
#define VDM_CMD_ENTER_MODE 0x04
#define VDM_CMD_EXIT_MODE 0x05
#define VDM_CMD_ATTENTION 0x06
#define VDM_CMD_GET_STS 0x10
#define VDM_CMD_AND_ACK_MASK 0x5F
#define MAX_ALTMODE 2
#define HAS_SOURCE_CAP BIT(0)
#define HAS_SINK_CAP BIT(1)
#define HAS_SINK_WATT BIT(2)
enum anx7411_psy_state {
/* copy from drivers/usb/typec/tcpm */
ANX7411_PSY_OFFLINE = 0,
ANX7411_PSY_FIXED_ONLINE,
/* private */
/* PD keep in, but disconnct power to bq25700,
* this state can be active when higher capacity adapter plug in,
* and change to ONLINE state when higher capacity adapter plug out
*/
ANX7411_PSY_HANG = 0xff,
};
struct typec_params {
int request_current; /* ma */
int request_voltage; /* mv */
int cc_connect;
int cc_orientation_valid;
int cc_status;
int data_role;
int power_role;
int vconn_role;
int dp_altmode_enter;
int cust_altmode_enter;
struct usb_role_switch *role_sw;
struct typec_port *port;
struct typec_partner *partner;
struct typec_mux_dev *typec_mux;
struct typec_switch_dev *typec_switch;
struct typec_altmode *amode[MAX_ALTMODE];
struct typec_altmode *port_amode[MAX_ALTMODE];
struct typec_displayport_data data;
int pin_assignment;
struct typec_capability caps;
u32 src_pdo[PDO_MAX_OBJECTS];
u32 sink_pdo[PDO_MAX_OBJECTS];
u8 caps_flags;
u8 src_pdo_nr;
u8 sink_pdo_nr;
u8 sink_watt;
u8 sink_voltage;
};
#define MAX_BUF_LEN 30
struct fw_msg {
u8 msg_len;
u8 msg_type;
u8 buf[MAX_BUF_LEN];
} __packed;
struct anx7411_data {
int fw_version;
int fw_subversion;
struct i2c_client *tcpc_client;
struct i2c_client *spi_client;
struct fw_msg send_msg;
struct fw_msg recv_msg;
struct gpio_desc *intp_gpiod;
struct fwnode_handle *connector_fwnode;
struct typec_params typec;
int intp_irq;
struct work_struct work;
struct workqueue_struct *workqueue;
/* Lock for interrupt work queue */
struct mutex lock;
enum anx7411_psy_state psy_online;
enum power_supply_usb_type usb_type;
struct power_supply *psy;
struct power_supply_desc psy_desc;
struct device *dev;
};
static u8 snk_identity[] = {
LOBYTE(VID_ANALOGIX), HIBYTE(VID_ANALOGIX), 0x00, 0x82, /* snk_id_hdr */
0x00, 0x00, 0x00, 0x00, /* snk_cert */
0x00, 0x00, LOBYTE(PID_ANALOGIX), HIBYTE(PID_ANALOGIX), /* 5snk_ama */
};
static u8 dp_caps[4] = {0xC6, 0x00, 0x00, 0x00};
static int anx7411_reg_read(struct i2c_client *client,
u8 reg_addr)
{
return i2c_smbus_read_byte_data(client, reg_addr);
}
static int anx7411_reg_block_read(struct i2c_client *client,
u8 reg_addr, u8 len, u8 *buf)
{
return i2c_smbus_read_i2c_block_data(client, reg_addr, len, buf);
}
static int anx7411_reg_write(struct i2c_client *client,
u8 reg_addr, u8 reg_val)
{
return i2c_smbus_write_byte_data(client, reg_addr, reg_val);
}
static int anx7411_reg_block_write(struct i2c_client *client,
u8 reg_addr, u8 len, u8 *buf)
{
return i2c_smbus_write_i2c_block_data(client, reg_addr, len, buf);
}
static struct anx7411_i2c_select anx7411_i2c_addr[] = {
{TCPC_ADDRESS1, SPI_ADDRESS1},
{TCPC_ADDRESS2, SPI_ADDRESS2},
{TCPC_ADDRESS3, SPI_ADDRESS3},
{TCPC_ADDRESS4, SPI_ADDRESS4},
};
static int anx7411_detect_power_mode(struct anx7411_data *ctx)
{
int ret;
int mode;
ret = anx7411_reg_read(ctx->spi_client, REQUEST_CURRENT);
if (ret < 0)
return ret;
ctx->typec.request_current = ret * CURRENT_UNIT; /* 50ma per unit */
ret = anx7411_reg_read(ctx->spi_client, REQUEST_VOLTAGE);
if (ret < 0)
return ret;
ctx->typec.request_voltage = ret * VOLTAGE_UNIT; /* 100mv per unit */
if (ctx->psy_online == ANX7411_PSY_OFFLINE) {
ctx->psy_online = ANX7411_PSY_FIXED_ONLINE;
ctx->usb_type = POWER_SUPPLY_USB_TYPE_PD;
power_supply_changed(ctx->psy);
}
if (!ctx->typec.cc_orientation_valid)
return 0;
if (ctx->typec.cc_connect == CC1_CONNECTED)
mode = CC1_RP(ctx->typec.cc_status);
else
mode = CC2_RP(ctx->typec.cc_status);
if (mode) {
typec_set_pwr_opmode(ctx->typec.port, mode - 1);
return 0;
}
typec_set_pwr_opmode(ctx->typec.port, TYPEC_PWR_MODE_PD);
return 0;
}
static int anx7411_register_partner(struct anx7411_data *ctx,
int pd, int accessory)
{
struct typec_partner_desc desc;
struct typec_partner *partner;
if (ctx->typec.partner)
return 0;
desc.usb_pd = pd;
desc.accessory = accessory;
desc.identity = NULL;
partner = typec_register_partner(ctx->typec.port, &desc);
if (IS_ERR(partner))
return PTR_ERR(partner);
ctx->typec.partner = partner;
return 0;
}
static int anx7411_detect_cc_orientation(struct anx7411_data *ctx)
{
struct device *dev = &ctx->spi_client->dev;
int ret;
int cc1_rd, cc2_rd;
int cc1_ra, cc2_ra;
int cc1_rp, cc2_rp;
ret = anx7411_reg_read(ctx->spi_client, CC_STATUS);
if (ret < 0)
return ret;
ctx->typec.cc_status = ret;
cc1_rd = ret & CC1_RD ? 1 : 0;
cc2_rd = ret & CC2_RD ? 1 : 0;
cc1_ra = ret & CC1_RA ? 1 : 0;
cc2_ra = ret & CC2_RA ? 1 : 0;
cc1_rp = CC1_RP(ret);
cc2_rp = CC2_RP(ret);
/* Debug cable, nothing to do */
if (cc1_rd && cc2_rd) {
ctx->typec.cc_orientation_valid = 0;
return anx7411_register_partner(ctx, 0, TYPEC_ACCESSORY_DEBUG);
}
if (cc1_ra && cc2_ra) {
ctx->typec.cc_orientation_valid = 0;
return anx7411_register_partner(ctx, 0, TYPEC_ACCESSORY_AUDIO);
}
ctx->typec.cc_orientation_valid = 1;
ret = anx7411_register_partner(ctx, 1, TYPEC_ACCESSORY_NONE);
if (ret) {
dev_err(dev, "register partner\n");
return ret;
}
if (cc1_rd || cc1_rp) {
typec_set_orientation(ctx->typec.port, TYPEC_ORIENTATION_NORMAL);
ctx->typec.cc_connect = CC1_CONNECTED;
}
if (cc2_rd || cc2_rp) {
typec_set_orientation(ctx->typec.port, TYPEC_ORIENTATION_REVERSE);
ctx->typec.cc_connect = CC2_CONNECTED;
}
return 0;
}
static int anx7411_set_mux(struct anx7411_data *ctx, int pin_assignment)
{
int mode = TYPEC_STATE_SAFE;
switch (pin_assignment) {
case SELECT_PIN_ASSIGMENT_U:
/* default 4 line USB 3.1 */
mode = TYPEC_STATE_MODAL;
break;
case SELECT_PIN_ASSIGMENT_C:
case SELECT_PIN_ASSIGMENT_E:
/* 4 line DP */
mode = TYPEC_STATE_SAFE;
break;
case SELECT_PIN_ASSIGMENT_D:
/* 2 line DP, 2 line USB */
mode = TYPEC_MODE_USB3;
break;
default:
mode = TYPEC_STATE_SAFE;
break;
}
ctx->typec.pin_assignment = pin_assignment;
return typec_set_mode(ctx->typec.port, mode);
}
static int anx7411_set_usb_role(struct anx7411_data *ctx, enum usb_role role)
{
if (!ctx->typec.role_sw)
return 0;
return usb_role_switch_set_role(ctx->typec.role_sw, role);
}
static int anx7411_data_role_detect(struct anx7411_data *ctx)
{
int ret;
ret = anx7411_reg_read(ctx->spi_client, SYSTEM_STSTUS);
if (ret < 0)
return ret;
ctx->typec.data_role = (ret & DATA_ROLE) ? TYPEC_HOST : TYPEC_DEVICE;
ctx->typec.vconn_role = (ret & VCONN_STATUS) ? TYPEC_SOURCE : TYPEC_SINK;
typec_set_data_role(ctx->typec.port, ctx->typec.data_role);
typec_set_vconn_role(ctx->typec.port, ctx->typec.vconn_role);
if (ctx->typec.data_role == TYPEC_HOST)
return anx7411_set_usb_role(ctx, USB_ROLE_HOST);
return anx7411_set_usb_role(ctx, USB_ROLE_DEVICE);
}
static int anx7411_power_role_detect(struct anx7411_data *ctx)
{
int ret;
ret = anx7411_reg_read(ctx->spi_client, SYSTEM_STSTUS);
if (ret < 0)
return ret;
ctx->typec.power_role = (ret & SINK_STATUS) ? TYPEC_SINK : TYPEC_SOURCE;
if (ctx->typec.power_role == TYPEC_SOURCE) {
ctx->typec.request_current = DEF_1_5A;
ctx->typec.request_voltage = DEF_5V;
}
typec_set_pwr_role(ctx->typec.port, ctx->typec.power_role);
return 0;
}
static int anx7411_cc_status_detect(struct anx7411_data *ctx)
{
anx7411_detect_cc_orientation(ctx);
anx7411_detect_power_mode(ctx);
return 0;
}
static void anx7411_partner_unregister_altmode(struct anx7411_data *ctx)
{
int i;
ctx->typec.dp_altmode_enter = 0;
ctx->typec.cust_altmode_enter = 0;
for (i = 0; i < MAX_ALTMODE; i++)
if (ctx->typec.amode[i]) {
typec_unregister_altmode(ctx->typec.amode[i]);
ctx->typec.amode[i] = NULL;
}
ctx->typec.pin_assignment = 0;
}
static int anx7411_typec_register_altmode(struct anx7411_data *ctx,
int svid, int vdo)
{
struct device *dev = &ctx->spi_client->dev;
struct typec_altmode_desc desc;
int err;
int i;
desc.svid = svid;
desc.vdo = vdo;
for (i = 0; i < MAX_ALTMODE; i++)
if (!ctx->typec.amode[i])
break;
desc.mode = i + 1; /* start with 1 */
if (i >= MAX_ALTMODE) {
dev_err(dev, "no altmode space for registering\n");
return -ENOMEM;
}
ctx->typec.amode[i] = typec_partner_register_altmode(ctx->typec.partner,
&desc);
if (IS_ERR(ctx->typec.amode[i])) {
dev_err(dev, "failed to register altmode\n");
err = PTR_ERR(ctx->typec.amode[i]);
ctx->typec.amode[i] = NULL;
return err;
}
return 0;
}
static void anx7411_unregister_partner(struct anx7411_data *ctx)
{
if (ctx->typec.partner) {
typec_unregister_partner(ctx->typec.partner);
ctx->typec.partner = NULL;
}
}
static int anx7411_update_altmode(struct anx7411_data *ctx, int svid)
{
int i;
if (svid == DP_SVID)
ctx->typec.dp_altmode_enter = 1;
else
ctx->typec.cust_altmode_enter = 1;
for (i = 0; i < MAX_ALTMODE; i++) {
if (!ctx->typec.amode[i])
continue;
if (ctx->typec.amode[i]->svid == svid) {
typec_altmode_update_active(ctx->typec.amode[i], true);
typec_altmode_notify(ctx->typec.amode[i],
ctx->typec.pin_assignment,
&ctx->typec.data);
break;
}
}
return 0;
}
static int anx7411_register_altmode(struct anx7411_data *ctx,
bool dp_altmode, u8 *buf)
{
int ret;
int svid;
int mid;
if (!ctx->typec.partner)
return 0;
svid = DP_SVID;
if (dp_altmode) {
mid = buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
return anx7411_typec_register_altmode(ctx, svid, mid);
}
svid = (buf[3] << 8) | buf[2];
if ((buf[0] & VDM_CMD_AND_ACK_MASK) != (VDM_ACK | VDM_CMD_ENTER_MODE))
return anx7411_update_altmode(ctx, svid);
if ((buf[0] & VDM_CMD_AND_ACK_MASK) != (VDM_ACK | VDM_CMD_DIS_MOD))
return 0;
mid = buf[4] | (buf[5] << 8) | (buf[6] << 16) | (buf[7] << 24);
ret = anx7411_typec_register_altmode(ctx, svid, mid);
if (ctx->typec.cust_altmode_enter)
ret |= anx7411_update_altmode(ctx, svid);
return ret;
}
static int anx7411_parse_cmd(struct anx7411_data *ctx, u8 type, u8 *buf, u8 len)
{
struct device *dev = &ctx->spi_client->dev;
u8 cur_50ma, vol_100mv;
switch (type) {
case TYPE_SRC_CAP:
cur_50ma = anx7411_reg_read(ctx->spi_client, REQUEST_CURRENT);
vol_100mv = anx7411_reg_read(ctx->spi_client, REQUEST_VOLTAGE);
ctx->typec.request_voltage = vol_100mv * VOLTAGE_UNIT;
ctx->typec.request_current = cur_50ma * CURRENT_UNIT;
ctx->psy_online = ANX7411_PSY_FIXED_ONLINE;
ctx->usb_type = POWER_SUPPLY_USB_TYPE_PD;
power_supply_changed(ctx->psy);
break;
case TYPE_SNK_CAP:
break;
case TYPE_SVID:
break;
case TYPE_SNK_IDENTITY:
break;
case TYPE_GET_DP_ALT_ENTER:
/* DP alt mode enter success */
if (buf[0])
anx7411_update_altmode(ctx, DP_SVID);
break;
case TYPE_DP_ALT_ENTER:
/* Update DP altmode */
anx7411_update_altmode(ctx, DP_SVID);
break;
case TYPE_OBJ_REQ:
anx7411_detect_power_mode(ctx);
break;
case TYPE_DP_CONFIGURE:
anx7411_set_mux(ctx, buf[1]);
break;
case TYPE_DP_DISCOVER_MODES_INFO:
/* Make sure discover modes valid */
if (buf[0] | buf[1])
/* Register DP Altmode */
anx7411_register_altmode(ctx, 1, buf);
break;
case TYPE_VDM:
/* Register other altmode */
anx7411_register_altmode(ctx, 0, buf);
break;
default:
dev_err(dev, "ignore message(0x%.02x).\n", type);
break;
}
return 0;
}
static u8 checksum(struct device *dev, u8 *buf, u8 len)
{
u8 ret = 0;
u8 i;
for (i = 0; i < len; i++)
ret += buf[i];
return ret;
}
static int anx7411_read_msg_ctrl_status(struct i2c_client *client)
{
return anx7411_reg_read(client, CMD_SEND_BUF);
}
static int anx7411_wait_msg_empty(struct i2c_client *client)
{
int val;
return readx_poll_timeout(anx7411_read_msg_ctrl_status,
client, val, (val < 0) || (val == 0),
2000, 2000 * 150);
}
static int anx7411_send_msg(struct anx7411_data *ctx, u8 type, u8 *buf, u8 size)
{
struct device *dev = &ctx->spi_client->dev;
struct fw_msg *msg = &ctx->send_msg;
u8 crc;
int ret;
size = min_t(u8, size, (u8)MAX_BUF_LEN);
memcpy(msg->buf, buf, size);
msg->msg_type = type;
/* msg len equals buffer length + msg_type */
msg->msg_len = size + 1;
/* Do CRC check for all buffer data and msg_len and msg_type */
crc = checksum(dev, (u8 *)msg, size + HEADER_LEN);
msg->buf[size] = 0 - crc;
ret = anx7411_wait_msg_empty(ctx->spi_client);
if (ret)
return ret;
ret = anx7411_reg_block_write(ctx->spi_client,
CMD_SEND_BUF + 1, size + HEADER_LEN,
&msg->msg_type);
ret |= anx7411_reg_write(ctx->spi_client, CMD_SEND_BUF,
msg->msg_len);
return ret;
}
static int anx7411_process_cmd(struct anx7411_data *ctx)
{
struct device *dev = &ctx->spi_client->dev;
struct fw_msg *msg = &ctx->recv_msg;
u8 len;
u8 crc;
int ret;
/* Read message from firmware */
ret = anx7411_reg_block_read(ctx->spi_client, CMD_RECV_BUF,
MSG_LEN, (u8 *)msg);
if (ret < 0)
return 0;
if (!msg->msg_len)
return 0;
ret = anx7411_reg_write(ctx->spi_client, CMD_RECV_BUF, 0);
if (ret)
return ret;
len = msg->msg_len & MSG_LEN_MASK;
crc = checksum(dev, (u8 *)msg, len + HEADER_LEN);
if (crc) {
dev_err(dev, "message error crc(0x%.02x)\n", crc);
return -ERANGE;
}
return anx7411_parse_cmd(ctx, msg->msg_type, msg->buf, len - 1);
}
static void anx7411_translate_payload(struct device *dev, __le32 *payload,
u32 *pdo, int nr, const char *type)
{
int i;
if (nr > PDO_MAX_OBJECTS) {
dev_err(dev, "nr(%d) exceed PDO_MAX_OBJECTS(%d)\n",
nr, PDO_MAX_OBJECTS);
return;
}
for (i = 0; i < nr; i++)
payload[i] = cpu_to_le32(pdo[i]);
}
static int anx7411_config(struct anx7411_data *ctx)
{
struct device *dev = &ctx->spi_client->dev;
struct typec_params *typecp = &ctx->typec;
__le32 payload[PDO_MAX_OBJECTS];
int ret;
/* Config PD FW work under PD 2.0 */
ret = anx7411_reg_write(ctx->spi_client, PD_REV_INIT, PD_REV20);
ret |= anx7411_reg_write(ctx->tcpc_client, FW_CTRL_0,
UNSTRUCT_VDM_EN | DELAY_200MS |
VSAFE1 | FRS_EN);
ret |= anx7411_reg_write(ctx->spi_client, FW_CTRL_1,
AUTO_PD_EN | FORCE_SEND_RDO);
/* Set VBUS current threshold */
ret |= anx7411_reg_write(ctx->tcpc_client, VBUS_THRESHOLD_H, 0xff);
ret |= anx7411_reg_write(ctx->tcpc_client, VBUS_THRESHOLD_L, 0x03);
/* Fix dongle compatible issue */
ret |= anx7411_reg_write(ctx->tcpc_client, FW_PARAM,
anx7411_reg_read(ctx->tcpc_client, FW_PARAM) |
DONGLE_IOP);
ret |= anx7411_reg_write(ctx->spi_client, INT_MASK, 0);
ret |= anx7411_reg_write(ctx->spi_client, PD_EXT_MSG_CTRL, 0xFF);
if (ret)
return ret;
if (typecp->caps_flags & HAS_SOURCE_CAP) {
anx7411_translate_payload(dev, payload, typecp->src_pdo,
typecp->src_pdo_nr, "source");
anx7411_send_msg(ctx, TYPE_SRC_CAP, (u8 *)&payload,
typecp->src_pdo_nr * 4);
anx7411_send_msg(ctx, TYPE_SNK_IDENTITY, snk_identity,
sizeof(snk_identity));
anx7411_send_msg(ctx, TYPE_SET_SNK_DP_CAP, dp_caps,
sizeof(dp_caps));
}
if (typecp->caps_flags & HAS_SINK_CAP) {
anx7411_translate_payload(dev, payload, typecp->sink_pdo,
typecp->sink_pdo_nr, "sink");
anx7411_send_msg(ctx, TYPE_SNK_CAP, (u8 *)&payload,
typecp->sink_pdo_nr * 4);
}
if (typecp->caps_flags & HAS_SINK_WATT) {
if (typecp->sink_watt) {
ret |= anx7411_reg_write(ctx->spi_client, MAX_POWER,
typecp->sink_watt);
/* Set min power to 1W */
ret |= anx7411_reg_write(ctx->spi_client, MIN_POWER, 2);
}
if (typecp->sink_voltage)
ret |= anx7411_reg_write(ctx->spi_client, MAX_VOLTAGE,
typecp->sink_voltage);
if (ret)
return ret;
}
if (!typecp->caps_flags)
usleep_range(5000, 6000);
ctx->fw_version = anx7411_reg_read(ctx->spi_client, FW_VER);
ctx->fw_subversion = anx7411_reg_read(ctx->spi_client, FW_SUBVER);
return 0;
}
static void anx7411_chip_standby(struct anx7411_data *ctx)
{
int ret;
u8 cc1, cc2;
struct device *dev = &ctx->spi_client->dev;
ret = anx7411_reg_write(ctx->spi_client, OCM_CTRL_0,
anx7411_reg_read(ctx->spi_client, OCM_CTRL_0) |
OCM_RESET);
ret |= anx7411_reg_write(ctx->tcpc_client, ANALOG_CTRL_10, 0x80);
/* Set TCPC to RD and DRP enable */
cc1 = TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT;
cc2 = TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT;
ret |= anx7411_reg_write(ctx->tcpc_client, TCPC_ROLE_CTRL,
TCPC_ROLE_CTRL_DRP | cc1 | cc2);
/* Send DRP toggle command */
ret |= anx7411_reg_write(ctx->tcpc_client, TCPC_COMMAND,
TCPC_CMD_LOOK4CONNECTION);
/* Send TCPC enter standby command */
ret |= anx7411_reg_write(ctx->tcpc_client,
TCPC_COMMAND, TCPC_CMD_I2C_IDLE);
if (ret)
dev_err(dev, "Chip standby failed\n");
}
static void anx7411_work_func(struct work_struct *work)
{
int ret;
u8 buf[STATUS_LEN];
u8 int_change; /* Interrupt change */
u8 int_status; /* Firmware status update */
u8 alert0, alert1; /* Interrupt alert source */
struct anx7411_data *ctx = container_of(work, struct anx7411_data, work);
struct device *dev = &ctx->spi_client->dev;
mutex_lock(&ctx->lock);
/* Read interrupt change status */
ret = anx7411_reg_block_read(ctx->spi_client, INT_STS, STATUS_LEN, buf);
if (ret < 0) {
/* Power standby mode, just return */
goto unlock;
}
int_change = buf[0];
int_status = buf[1];
/* Read alert register */
ret = anx7411_reg_block_read(ctx->tcpc_client, ALERT_0, STATUS_LEN, buf);
if (ret < 0)
goto unlock;
alert0 = buf[0];
alert1 = buf[1];
/* Clear interrupt and alert status */
ret = anx7411_reg_write(ctx->spi_client, INT_STS, 0);
ret |= anx7411_reg_write(ctx->tcpc_client, ALERT_0, alert0);
ret |= anx7411_reg_write(ctx->tcpc_client, ALERT_1, alert1);
if (ret)
goto unlock;
if (alert1 & INTP_POW_OFF) {
anx7411_partner_unregister_altmode(ctx);
if (anx7411_set_usb_role(ctx, USB_ROLE_NONE))
dev_err(dev, "Set usb role\n");
anx7411_unregister_partner(ctx);
ctx->psy_online = ANX7411_PSY_OFFLINE;
ctx->usb_type = POWER_SUPPLY_USB_TYPE_C;
ctx->typec.request_voltage = 0;
ctx->typec.request_current = 0;
power_supply_changed(ctx->psy);
anx7411_chip_standby(ctx);
goto unlock;
}
if ((alert0 & SOFTWARE_INT) && (int_change & OCM_BOOT_UP)) {
if (anx7411_config(ctx))
dev_err(dev, "Config failed\n");
if (anx7411_data_role_detect(ctx))
dev_err(dev, "set PD data role\n");
if (anx7411_power_role_detect(ctx))
dev_err(dev, "set PD power role\n");
anx7411_set_mux(ctx, SELECT_PIN_ASSIGMENT_C);
}
if (alert0 & RECEIVED_MSG)
anx7411_process_cmd(ctx);
ret = (int_status & DATA_ROLE) ? TYPEC_HOST : TYPEC_DEVICE;
if (ctx->typec.data_role != ret)
if (anx7411_data_role_detect(ctx))
dev_err(dev, "set PD data role\n");
ret = (int_status & SINK_STATUS) ? TYPEC_SINK : TYPEC_SOURCE;
if (ctx->typec.power_role != ret)
if (anx7411_power_role_detect(ctx))
dev_err(dev, "set PD power role\n");
if ((alert0 & SOFTWARE_INT) && (int_change & CC_STATUS_CHANGE))
anx7411_cc_status_detect(ctx);
unlock:
mutex_unlock(&ctx->lock);
}
static irqreturn_t anx7411_intr_isr(int irq, void *data)
{
struct anx7411_data *ctx = (struct anx7411_data *)data;
queue_work(ctx->workqueue, &ctx->work);
return IRQ_HANDLED;
}
static int anx7411_register_i2c_dummy_clients(struct anx7411_data *ctx,
struct i2c_client *client)
{
int i;
u8 spi_addr;
for (i = 0; i < ARRAY_SIZE(anx7411_i2c_addr); i++) {
if (client->addr == (anx7411_i2c_addr[i].tcpc_address >> 1)) {
spi_addr = anx7411_i2c_addr[i].spi_address >> 1;
ctx->spi_client = i2c_new_dummy_device(client->adapter,
spi_addr);
if (!IS_ERR(ctx->spi_client))
return 0;
}
}
dev_err(&client->dev, "unable to get SPI slave\n");
return -ENOMEM;
}
static void anx7411_port_unregister_altmodes(struct typec_altmode **adev)
{
int i;
for (i = 0; i < MAX_ALTMODE; i++)
if (adev[i]) {
typec_unregister_altmode(adev[i]);
adev[i] = NULL;
}
}
static int anx7411_usb_mux_set(struct typec_mux_dev *mux,
struct typec_mux_state *state)
{
struct anx7411_data *ctx = typec_mux_get_drvdata(mux);
struct device *dev = &ctx->spi_client->dev;
int has_dp;
has_dp = (state->alt && state->alt->svid == USB_TYPEC_DP_SID &&
state->alt->mode == USB_TYPEC_DP_MODE);
if (!has_dp)
dev_err(dev, "dp altmode not register\n");
return 0;
}
static int anx7411_usb_set_orientation(struct typec_switch_dev *sw,
enum typec_orientation orientation)
{
/* No need set */
return 0;
}
static int anx7411_register_switch(struct anx7411_data *ctx,
struct device *dev,
struct fwnode_handle *fwnode)
{
struct typec_switch_desc sw_desc = { };
sw_desc.fwnode = fwnode;
sw_desc.drvdata = ctx;
sw_desc.name = fwnode_get_name(fwnode);
sw_desc.set = anx7411_usb_set_orientation;
ctx->typec.typec_switch = typec_switch_register(dev, &sw_desc);
if (IS_ERR(ctx->typec.typec_switch)) {
dev_err(dev, "switch register failed\n");
return PTR_ERR(ctx->typec.typec_switch);
}
return 0;
}
static int anx7411_register_mux(struct anx7411_data *ctx,
struct device *dev,
struct fwnode_handle *fwnode)
{
struct typec_mux_desc mux_desc = { };
mux_desc.fwnode = fwnode;
mux_desc.drvdata = ctx;
mux_desc.name = fwnode_get_name(fwnode);
mux_desc.set = anx7411_usb_mux_set;
ctx->typec.typec_mux = typec_mux_register(dev, &mux_desc);
if (IS_ERR(ctx->typec.typec_mux)) {
dev_err(dev, "mux register failed\n");
return PTR_ERR(ctx->typec.typec_mux);
}
return 0;
}
static void anx7411_unregister_mux(struct anx7411_data *ctx)
{
if (ctx->typec.typec_mux) {
typec_mux_unregister(ctx->typec.typec_mux);
ctx->typec.typec_mux = NULL;
}
}
static void anx7411_unregister_switch(struct anx7411_data *ctx)
{
if (ctx->typec.typec_switch) {
typec_switch_unregister(ctx->typec.typec_switch);
ctx->typec.typec_switch = NULL;
}
}
static int anx7411_typec_switch_probe(struct anx7411_data *ctx,
struct device *dev)
{
int ret;
struct device_node *node;
node = of_get_child_by_name(dev->of_node, "orientation_switch");
if (!node)
return 0;
ret = anx7411_register_switch(ctx, dev, &node->fwnode);
if (ret) {
dev_err(dev, "failed register switch");
return ret;
}
node = of_get_child_by_name(dev->of_node, "mode_switch");
if (!node) {
dev_err(dev, "no typec mux exist");
ret = -ENODEV;
goto unregister_switch;
}
ret = anx7411_register_mux(ctx, dev, &node->fwnode);
if (ret) {
dev_err(dev, "failed register mode switch");
ret = -ENODEV;
goto unregister_switch;
}
return 0;
unregister_switch:
anx7411_unregister_switch(ctx);
return ret;
}
static int anx7411_typec_port_probe(struct anx7411_data *ctx,
struct device *dev)
{
struct typec_capability *cap = &ctx->typec.caps;
struct typec_params *typecp = &ctx->typec;
struct fwnode_handle *fwnode;
const char *buf;
int ret, i;
fwnode = device_get_named_child_node(dev, "connector");
if (!fwnode)
return -EINVAL;
ret = fwnode_property_read_string(fwnode, "power-role", &buf);
if (ret) {
dev_err(dev, "power-role not found: %d\n", ret);
return ret;
}
ret = typec_find_port_power_role(buf);
if (ret < 0)
return ret;
cap->type = ret;
ret = fwnode_property_read_string(fwnode, "data-role", &buf);
if (ret) {
dev_err(dev, "data-role not found: %d\n", ret);
return ret;
}
ret = typec_find_port_data_role(buf);
if (ret < 0)
return ret;
cap->data = ret;
ret = fwnode_property_read_string(fwnode, "try-power-role", &buf);
if (ret) {
dev_err(dev, "try-power-role not found: %d\n", ret);
return ret;
}
ret = typec_find_power_role(buf);
if (ret < 0)
return ret;
cap->prefer_role = ret;
/* Get source pdos */
ret = fwnode_property_count_u32(fwnode, "source-pdos");
if (ret > 0) {
typecp->src_pdo_nr = min_t(u8, ret, PDO_MAX_OBJECTS);
ret = fwnode_property_read_u32_array(fwnode, "source-pdos",
typecp->src_pdo,
typecp->src_pdo_nr);
if (ret < 0) {
dev_err(dev, "source cap validate failed: %d\n", ret);
return -EINVAL;
}
typecp->caps_flags |= HAS_SOURCE_CAP;
}
ret = fwnode_property_count_u32(fwnode, "sink-pdos");
if (ret > 0) {
typecp->sink_pdo_nr = min_t(u8, ret, PDO_MAX_OBJECTS);
ret = fwnode_property_read_u32_array(fwnode, "sink-pdos",
typecp->sink_pdo,
typecp->sink_pdo_nr);
if (ret < 0) {
dev_err(dev, "sink cap validate failed: %d\n", ret);
return -EINVAL;
}
for (i = 0; i < typecp->sink_pdo_nr; i++) {
ret = 0;
switch (pdo_type(typecp->sink_pdo[i])) {
case PDO_TYPE_FIXED:
ret = pdo_fixed_voltage(typecp->sink_pdo[i]);
break;
case PDO_TYPE_BATT:
case PDO_TYPE_VAR:
ret = pdo_max_voltage(typecp->sink_pdo[i]);
break;
case PDO_TYPE_APDO:
default:
ret = 0;
break;
}
/* 100mv per unit */
typecp->sink_voltage = max(5000, ret) / 100;
}
typecp->caps_flags |= HAS_SINK_CAP;
}
if (!fwnode_property_read_u32(fwnode, "op-sink-microwatt", &ret)) {
typecp->sink_watt = ret / 500000; /* 500mw per unit */
typecp->caps_flags |= HAS_SINK_WATT;
}
cap->fwnode = fwnode;
ctx->typec.role_sw = usb_role_switch_get(dev);
if (IS_ERR(ctx->typec.role_sw)) {
dev_err(dev, "USB role switch not found.\n");
ctx->typec.role_sw = NULL;
}
ctx->typec.port = typec_register_port(dev, cap);
if (IS_ERR(ctx->typec.port)) {
ret = PTR_ERR(ctx->typec.port);
ctx->typec.port = NULL;
dev_err(dev, "Failed to register type c port %d\n", ret);
return ret;
}
typec_port_register_altmodes(ctx->typec.port, NULL, ctx,
ctx->typec.port_amode,
MAX_ALTMODE);
return 0;
}
static int anx7411_typec_check_connection(struct anx7411_data *ctx)
{
int ret;
ret = anx7411_reg_read(ctx->spi_client, FW_VER);
if (ret < 0)
return 0; /* No device attached in typec port */
/* Clear interrupt and alert status */
ret = anx7411_reg_write(ctx->spi_client, INT_STS, 0);
ret |= anx7411_reg_write(ctx->tcpc_client, ALERT_0, 0xFF);
ret |= anx7411_reg_write(ctx->tcpc_client, ALERT_1, 0xFF);
if (ret)
return ret;
ret = anx7411_cc_status_detect(ctx);
ret |= anx7411_power_role_detect(ctx);
ret |= anx7411_data_role_detect(ctx);
ret |= anx7411_set_mux(ctx, SELECT_PIN_ASSIGMENT_C);
if (ret)
return ret;
ret = anx7411_send_msg(ctx, TYPE_GET_DP_ALT_ENTER, NULL, 0);
ret |= anx7411_send_msg(ctx, TYPE_GET_DP_DISCOVER_MODES_INFO, NULL, 0);
return ret;
}
static int __maybe_unused anx7411_runtime_pm_suspend(struct device *dev)
{
struct anx7411_data *ctx = dev_get_drvdata(dev);
mutex_lock(&ctx->lock);
anx7411_partner_unregister_altmode(ctx);
if (ctx->typec.partner)
anx7411_unregister_partner(ctx);
mutex_unlock(&ctx->lock);
return 0;
}
static int __maybe_unused anx7411_runtime_pm_resume(struct device *dev)
{
struct anx7411_data *ctx = dev_get_drvdata(dev);
mutex_lock(&ctx->lock);
/* Detect PD connection */
if (anx7411_typec_check_connection(ctx))
dev_err(dev, "check connection");
mutex_unlock(&ctx->lock);
return 0;
}
static const struct dev_pm_ops anx7411_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(anx7411_runtime_pm_suspend,
anx7411_runtime_pm_resume, NULL)
};
static void anx7411_get_gpio_irq(struct anx7411_data *ctx)
{
struct device *dev = &ctx->tcpc_client->dev;
ctx->intp_gpiod = devm_gpiod_get_optional(dev, "interrupt", GPIOD_IN);
if (IS_ERR_OR_NULL(ctx->intp_gpiod)) {
dev_err(dev, "no interrupt gpio property\n");
return;
}
ctx->intp_irq = gpiod_to_irq(ctx->intp_gpiod);
if (ctx->intp_irq < 0)
dev_err(dev, "failed to get GPIO IRQ\n");
}
static enum power_supply_usb_type anx7411_psy_usb_types[] = {
POWER_SUPPLY_USB_TYPE_C,
POWER_SUPPLY_USB_TYPE_PD,
POWER_SUPPLY_USB_TYPE_PD_PPS,
};
static enum power_supply_property anx7411_psy_props[] = {
POWER_SUPPLY_PROP_USB_TYPE,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_VOLTAGE_MIN,
POWER_SUPPLY_PROP_VOLTAGE_MAX,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_MAX,
POWER_SUPPLY_PROP_CURRENT_NOW,
};
static int anx7411_psy_set_prop(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val)
{
struct anx7411_data *ctx = power_supply_get_drvdata(psy);
int ret = 0;
if (psp == POWER_SUPPLY_PROP_ONLINE)
ctx->psy_online = val->intval;
else
ret = -EINVAL;
power_supply_changed(ctx->psy);
return ret;
}
static int anx7411_psy_prop_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
return psp == POWER_SUPPLY_PROP_ONLINE;
}
static int anx7411_psy_get_prop(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct anx7411_data *ctx = power_supply_get_drvdata(psy);
int ret = 0;
switch (psp) {
case POWER_SUPPLY_PROP_USB_TYPE:
val->intval = ctx->usb_type;
break;
case POWER_SUPPLY_PROP_ONLINE:
val->intval = ctx->psy_online;
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
case POWER_SUPPLY_PROP_VOLTAGE_MIN:
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
val->intval = (ctx->psy_online) ?
ctx->typec.request_voltage * 1000 : 0;
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_CURRENT_MAX:
val->intval = (ctx->psy_online) ?
ctx->typec.request_current * 1000 : 0;
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int anx7411_psy_register(struct anx7411_data *ctx)
{
struct power_supply_desc *psy_desc = &ctx->psy_desc;
struct power_supply_config psy_cfg = {};
char *psy_name;
psy_name = devm_kasprintf(ctx->dev, GFP_KERNEL, "anx7411-source-psy-%s",
dev_name(ctx->dev));
if (!psy_name)
return -ENOMEM;
psy_desc->name = psy_name;
psy_desc->type = POWER_SUPPLY_TYPE_USB;
psy_desc->usb_types = anx7411_psy_usb_types;
psy_desc->num_usb_types = ARRAY_SIZE(anx7411_psy_usb_types);
psy_desc->properties = anx7411_psy_props;
psy_desc->num_properties = ARRAY_SIZE(anx7411_psy_props);
psy_desc->get_property = anx7411_psy_get_prop;
psy_desc->set_property = anx7411_psy_set_prop;
psy_desc->property_is_writeable = anx7411_psy_prop_writeable;
ctx->usb_type = POWER_SUPPLY_USB_TYPE_C;
ctx->psy = devm_power_supply_register(ctx->dev, psy_desc, &psy_cfg);
if (IS_ERR(ctx->psy))
dev_warn(ctx->dev, "unable to register psy\n");
return PTR_ERR_OR_ZERO(ctx->psy);
}
static int anx7411_i2c_probe(struct i2c_client *client)
{
struct anx7411_data *plat;
struct device *dev = &client->dev;
int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK))
return -ENODEV;
plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
return -ENOMEM;
plat->tcpc_client = client;
i2c_set_clientdata(client, plat);
mutex_init(&plat->lock);
ret = anx7411_register_i2c_dummy_clients(plat, client);
if (ret) {
dev_err(dev, "fail to reserve I2C bus\n");
return ret;
}
ret = anx7411_typec_switch_probe(plat, dev);
if (ret) {
dev_err(dev, "fail to probe typec switch\n");
goto free_i2c_dummy;
}
ret = anx7411_typec_port_probe(plat, dev);
if (ret) {
dev_err(dev, "fail to probe typec property.\n");
ret = -ENODEV;
goto free_typec_switch;
}
plat->intp_irq = client->irq;
if (!client->irq)
anx7411_get_gpio_irq(plat);
if (!plat->intp_irq) {
dev_err(dev, "fail to get interrupt IRQ\n");
ret = -EINVAL;
goto free_typec_port;
}
plat->dev = dev;
plat->psy_online = ANX7411_PSY_OFFLINE;
ret = anx7411_psy_register(plat);
if (ret) {
dev_err(dev, "register psy\n");
goto free_typec_port;
}
INIT_WORK(&plat->work, anx7411_work_func);
plat->workqueue = alloc_workqueue("anx7411_work",
WQ_FREEZABLE |
WQ_MEM_RECLAIM,
1);
if (!plat->workqueue) {
dev_err(dev, "fail to create work queue\n");
ret = -ENOMEM;
goto free_typec_port;
}
ret = devm_request_threaded_irq(dev, plat->intp_irq,
NULL, anx7411_intr_isr,
IRQF_TRIGGER_FALLING |
IRQF_ONESHOT,
"anx7411-intp", plat);
if (ret) {
dev_err(dev, "fail to request irq\n");
goto free_wq;
}
if (anx7411_typec_check_connection(plat))
dev_err(dev, "check status\n");
pm_runtime_enable(dev);
return 0;
free_wq:
destroy_workqueue(plat->workqueue);
free_typec_port:
typec_unregister_port(plat->typec.port);
anx7411_port_unregister_altmodes(plat->typec.port_amode);
free_typec_switch:
anx7411_unregister_switch(plat);
anx7411_unregister_mux(plat);
free_i2c_dummy:
i2c_unregister_device(plat->spi_client);
return ret;
}
static void anx7411_i2c_remove(struct i2c_client *client)
{
struct anx7411_data *plat = i2c_get_clientdata(client);
anx7411_partner_unregister_altmode(plat);
anx7411_unregister_partner(plat);
if (plat->workqueue)
destroy_workqueue(plat->workqueue);
if (plat->spi_client)
i2c_unregister_device(plat->spi_client);
if (plat->typec.role_sw)
usb_role_switch_put(plat->typec.role_sw);
anx7411_unregister_mux(plat);
anx7411_unregister_switch(plat);
if (plat->typec.port)
typec_unregister_port(plat->typec.port);
anx7411_port_unregister_altmodes(plat->typec.port_amode);
}
static const struct i2c_device_id anx7411_id[] = {
{"anx7411", 0},
{}
};
MODULE_DEVICE_TABLE(i2c, anx7411_id);
static const struct of_device_id anx_match_table[] = {
{.compatible = "analogix,anx7411",},
{},
};
static struct i2c_driver anx7411_driver = {
.driver = {
.name = "anx7411",
.of_match_table = anx_match_table,
.pm = &anx7411_pm_ops,
},
.probe = anx7411_i2c_probe,
.remove = anx7411_i2c_remove,
.id_table = anx7411_id,
};
module_i2c_driver(anx7411_driver);
MODULE_DESCRIPTION("Anx7411 USB Type-C PD driver");
MODULE_AUTHOR("Xin Ji <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1.5");
| linux-master | drivers/usb/typec/anx7411.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB Type-C Connector Class
*
* Copyright (C) 2017, Intel Corporation
* Author: Heikki Krogerus <[email protected]>
*/
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/usb/pd_vdo.h>
#include <linux/usb/typec_mux.h>
#include <linux/usb/typec_retimer.h>
#include "bus.h"
#include "class.h"
#include "pd.h"
static DEFINE_IDA(typec_index_ida);
struct class typec_class = {
.name = "typec",
};
/* ------------------------------------------------------------------------- */
/* Common attributes */
static const char * const typec_accessory_modes[] = {
[TYPEC_ACCESSORY_NONE] = "none",
[TYPEC_ACCESSORY_AUDIO] = "analog_audio",
[TYPEC_ACCESSORY_DEBUG] = "debug",
};
/* Product types defined in USB PD Specification R3.0 V2.0 */
static const char * const product_type_ufp[8] = {
[IDH_PTYPE_NOT_UFP] = "not_ufp",
[IDH_PTYPE_HUB] = "hub",
[IDH_PTYPE_PERIPH] = "peripheral",
[IDH_PTYPE_PSD] = "psd",
[IDH_PTYPE_AMA] = "ama",
};
static const char * const product_type_dfp[8] = {
[IDH_PTYPE_NOT_DFP] = "not_dfp",
[IDH_PTYPE_DFP_HUB] = "hub",
[IDH_PTYPE_DFP_HOST] = "host",
[IDH_PTYPE_DFP_PB] = "power_brick",
};
static const char * const product_type_cable[8] = {
[IDH_PTYPE_NOT_CABLE] = "not_cable",
[IDH_PTYPE_PCABLE] = "passive",
[IDH_PTYPE_ACABLE] = "active",
[IDH_PTYPE_VPD] = "vpd",
};
static struct usb_pd_identity *get_pd_identity(struct device *dev)
{
if (is_typec_partner(dev)) {
struct typec_partner *partner = to_typec_partner(dev);
return partner->identity;
} else if (is_typec_cable(dev)) {
struct typec_cable *cable = to_typec_cable(dev);
return cable->identity;
}
return NULL;
}
static const char *get_pd_product_type(struct device *dev)
{
struct typec_port *port = to_typec_port(dev->parent);
struct usb_pd_identity *id = get_pd_identity(dev);
const char *ptype = NULL;
if (is_typec_partner(dev)) {
if (!id)
return NULL;
if (port->data_role == TYPEC_HOST)
ptype = product_type_ufp[PD_IDH_PTYPE(id->id_header)];
else
ptype = product_type_dfp[PD_IDH_DFP_PTYPE(id->id_header)];
} else if (is_typec_cable(dev)) {
if (id)
ptype = product_type_cable[PD_IDH_PTYPE(id->id_header)];
else
ptype = to_typec_cable(dev)->active ?
product_type_cable[IDH_PTYPE_ACABLE] :
product_type_cable[IDH_PTYPE_PCABLE];
}
return ptype;
}
static ssize_t id_header_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_pd_identity *id = get_pd_identity(dev);
return sprintf(buf, "0x%08x\n", id->id_header);
}
static DEVICE_ATTR_RO(id_header);
static ssize_t cert_stat_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_pd_identity *id = get_pd_identity(dev);
return sprintf(buf, "0x%08x\n", id->cert_stat);
}
static DEVICE_ATTR_RO(cert_stat);
static ssize_t product_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_pd_identity *id = get_pd_identity(dev);
return sprintf(buf, "0x%08x\n", id->product);
}
static DEVICE_ATTR_RO(product);
static ssize_t product_type_vdo1_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_pd_identity *id = get_pd_identity(dev);
return sysfs_emit(buf, "0x%08x\n", id->vdo[0]);
}
static DEVICE_ATTR_RO(product_type_vdo1);
static ssize_t product_type_vdo2_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_pd_identity *id = get_pd_identity(dev);
return sysfs_emit(buf, "0x%08x\n", id->vdo[1]);
}
static DEVICE_ATTR_RO(product_type_vdo2);
static ssize_t product_type_vdo3_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_pd_identity *id = get_pd_identity(dev);
return sysfs_emit(buf, "0x%08x\n", id->vdo[2]);
}
static DEVICE_ATTR_RO(product_type_vdo3);
static struct attribute *usb_pd_id_attrs[] = {
&dev_attr_id_header.attr,
&dev_attr_cert_stat.attr,
&dev_attr_product.attr,
&dev_attr_product_type_vdo1.attr,
&dev_attr_product_type_vdo2.attr,
&dev_attr_product_type_vdo3.attr,
NULL
};
static const struct attribute_group usb_pd_id_group = {
.name = "identity",
.attrs = usb_pd_id_attrs,
};
static const struct attribute_group *usb_pd_id_groups[] = {
&usb_pd_id_group,
NULL,
};
static void typec_product_type_notify(struct device *dev)
{
char *envp[2] = { };
const char *ptype;
ptype = get_pd_product_type(dev);
if (!ptype)
return;
sysfs_notify(&dev->kobj, NULL, "type");
envp[0] = kasprintf(GFP_KERNEL, "PRODUCT_TYPE=%s", ptype);
if (!envp[0])
return;
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
kfree(envp[0]);
}
static void typec_report_identity(struct device *dev)
{
sysfs_notify(&dev->kobj, "identity", "id_header");
sysfs_notify(&dev->kobj, "identity", "cert_stat");
sysfs_notify(&dev->kobj, "identity", "product");
sysfs_notify(&dev->kobj, "identity", "product_type_vdo1");
sysfs_notify(&dev->kobj, "identity", "product_type_vdo2");
sysfs_notify(&dev->kobj, "identity", "product_type_vdo3");
typec_product_type_notify(dev);
}
static ssize_t
type_show(struct device *dev, struct device_attribute *attr, char *buf)
{
const char *ptype;
ptype = get_pd_product_type(dev);
if (!ptype)
return 0;
return sysfs_emit(buf, "%s\n", ptype);
}
static DEVICE_ATTR_RO(type);
static ssize_t usb_power_delivery_revision_show(struct device *dev,
struct device_attribute *attr,
char *buf);
static DEVICE_ATTR_RO(usb_power_delivery_revision);
/* ------------------------------------------------------------------------- */
/* Alternate Modes */
static int altmode_match(struct device *dev, void *data)
{
struct typec_altmode *adev = to_typec_altmode(dev);
struct typec_device_id *id = data;
if (!is_typec_altmode(dev))
return 0;
return ((adev->svid == id->svid) && (adev->mode == id->mode));
}
static void typec_altmode_set_partner(struct altmode *altmode)
{
struct typec_altmode *adev = &altmode->adev;
struct typec_device_id id = { adev->svid, adev->mode, };
struct typec_port *port = typec_altmode2port(adev);
struct altmode *partner;
struct device *dev;
dev = device_find_child(&port->dev, &id, altmode_match);
if (!dev)
return;
/* Bind the port alt mode to the partner/plug alt mode. */
partner = to_altmode(to_typec_altmode(dev));
altmode->partner = partner;
/* Bind the partner/plug alt mode to the port alt mode. */
if (is_typec_plug(adev->dev.parent)) {
struct typec_plug *plug = to_typec_plug(adev->dev.parent);
partner->plug[plug->index] = altmode;
} else {
partner->partner = altmode;
}
}
static void typec_altmode_put_partner(struct altmode *altmode)
{
struct altmode *partner = altmode->partner;
struct typec_altmode *adev;
if (!partner)
return;
adev = &partner->adev;
if (is_typec_plug(adev->dev.parent)) {
struct typec_plug *plug = to_typec_plug(adev->dev.parent);
partner->plug[plug->index] = NULL;
} else {
partner->partner = NULL;
}
put_device(&adev->dev);
}
/**
* typec_altmode_update_active - Report Enter/Exit mode
* @adev: Handle to the alternate mode
* @active: True when the mode has been entered
*
* If a partner or cable plug executes Enter/Exit Mode command successfully, the
* drivers use this routine to report the updated state of the mode.
*/
void typec_altmode_update_active(struct typec_altmode *adev, bool active)
{
char dir[6];
if (adev->active == active)
return;
if (!is_typec_port(adev->dev.parent) && adev->dev.driver) {
if (!active)
module_put(adev->dev.driver->owner);
else
WARN_ON(!try_module_get(adev->dev.driver->owner));
}
adev->active = active;
snprintf(dir, sizeof(dir), "mode%d", adev->mode);
sysfs_notify(&adev->dev.kobj, dir, "active");
sysfs_notify(&adev->dev.kobj, NULL, "active");
kobject_uevent(&adev->dev.kobj, KOBJ_CHANGE);
}
EXPORT_SYMBOL_GPL(typec_altmode_update_active);
/**
* typec_altmode2port - Alternate Mode to USB Type-C port
* @alt: The Alternate Mode
*
* Returns handle to the port that a cable plug or partner with @alt is
* connected to.
*/
struct typec_port *typec_altmode2port(struct typec_altmode *alt)
{
if (is_typec_plug(alt->dev.parent))
return to_typec_port(alt->dev.parent->parent->parent);
if (is_typec_partner(alt->dev.parent))
return to_typec_port(alt->dev.parent->parent);
if (is_typec_port(alt->dev.parent))
return to_typec_port(alt->dev.parent);
return NULL;
}
EXPORT_SYMBOL_GPL(typec_altmode2port);
static ssize_t
vdo_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct typec_altmode *alt = to_typec_altmode(dev);
return sprintf(buf, "0x%08x\n", alt->vdo);
}
static DEVICE_ATTR_RO(vdo);
static ssize_t
description_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct typec_altmode *alt = to_typec_altmode(dev);
return sprintf(buf, "%s\n", alt->desc ? alt->desc : "");
}
static DEVICE_ATTR_RO(description);
static ssize_t
active_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct typec_altmode *alt = to_typec_altmode(dev);
return sprintf(buf, "%s\n", alt->active ? "yes" : "no");
}
static ssize_t active_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
struct typec_altmode *adev = to_typec_altmode(dev);
struct altmode *altmode = to_altmode(adev);
bool enter;
int ret;
ret = kstrtobool(buf, &enter);
if (ret)
return ret;
if (adev->active == enter)
return size;
if (is_typec_port(adev->dev.parent)) {
typec_altmode_update_active(adev, enter);
/* Make sure that the partner exits the mode before disabling */
if (altmode->partner && !enter && altmode->partner->adev.active)
typec_altmode_exit(&altmode->partner->adev);
} else if (altmode->partner) {
if (enter && !altmode->partner->adev.active) {
dev_warn(dev, "port has the mode disabled\n");
return -EPERM;
}
}
/* Note: If there is no driver, the mode will not be entered */
if (adev->ops && adev->ops->activate) {
ret = adev->ops->activate(adev, enter);
if (ret)
return ret;
}
return size;
}
static DEVICE_ATTR_RW(active);
static ssize_t
supported_roles_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct altmode *alt = to_altmode(to_typec_altmode(dev));
ssize_t ret;
switch (alt->roles) {
case TYPEC_PORT_SRC:
ret = sprintf(buf, "source\n");
break;
case TYPEC_PORT_SNK:
ret = sprintf(buf, "sink\n");
break;
case TYPEC_PORT_DRP:
default:
ret = sprintf(buf, "source sink\n");
break;
}
return ret;
}
static DEVICE_ATTR_RO(supported_roles);
static ssize_t
mode_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct typec_altmode *adev = to_typec_altmode(dev);
return sprintf(buf, "%u\n", adev->mode);
}
static DEVICE_ATTR_RO(mode);
static ssize_t
svid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct typec_altmode *adev = to_typec_altmode(dev);
return sprintf(buf, "%04x\n", adev->svid);
}
static DEVICE_ATTR_RO(svid);
static struct attribute *typec_altmode_attrs[] = {
&dev_attr_active.attr,
&dev_attr_mode.attr,
&dev_attr_svid.attr,
&dev_attr_vdo.attr,
NULL
};
static umode_t typec_altmode_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct typec_altmode *adev = to_typec_altmode(kobj_to_dev(kobj));
if (attr == &dev_attr_active.attr)
if (!adev->ops || !adev->ops->activate)
return 0444;
return attr->mode;
}
static const struct attribute_group typec_altmode_group = {
.is_visible = typec_altmode_attr_is_visible,
.attrs = typec_altmode_attrs,
};
static const struct attribute_group *typec_altmode_groups[] = {
&typec_altmode_group,
NULL
};
static int altmode_id_get(struct device *dev)
{
struct ida *ids;
if (is_typec_partner(dev))
ids = &to_typec_partner(dev)->mode_ids;
else if (is_typec_plug(dev))
ids = &to_typec_plug(dev)->mode_ids;
else
ids = &to_typec_port(dev)->mode_ids;
return ida_simple_get(ids, 0, 0, GFP_KERNEL);
}
static void altmode_id_remove(struct device *dev, int id)
{
struct ida *ids;
if (is_typec_partner(dev))
ids = &to_typec_partner(dev)->mode_ids;
else if (is_typec_plug(dev))
ids = &to_typec_plug(dev)->mode_ids;
else
ids = &to_typec_port(dev)->mode_ids;
ida_simple_remove(ids, id);
}
static void typec_altmode_release(struct device *dev)
{
struct altmode *alt = to_altmode(to_typec_altmode(dev));
typec_altmode_put_partner(alt);
altmode_id_remove(alt->adev.dev.parent, alt->id);
kfree(alt);
}
const struct device_type typec_altmode_dev_type = {
.name = "typec_alternate_mode",
.groups = typec_altmode_groups,
.release = typec_altmode_release,
};
static struct typec_altmode *
typec_register_altmode(struct device *parent,
const struct typec_altmode_desc *desc)
{
unsigned int id = altmode_id_get(parent);
bool is_port = is_typec_port(parent);
struct altmode *alt;
int ret;
alt = kzalloc(sizeof(*alt), GFP_KERNEL);
if (!alt) {
altmode_id_remove(parent, id);
return ERR_PTR(-ENOMEM);
}
alt->adev.svid = desc->svid;
alt->adev.mode = desc->mode;
alt->adev.vdo = desc->vdo;
alt->roles = desc->roles;
alt->id = id;
alt->attrs[0] = &dev_attr_vdo.attr;
alt->attrs[1] = &dev_attr_description.attr;
alt->attrs[2] = &dev_attr_active.attr;
if (is_port) {
alt->attrs[3] = &dev_attr_supported_roles.attr;
alt->adev.active = true; /* Enabled by default */
}
sprintf(alt->group_name, "mode%d", desc->mode);
alt->group.name = alt->group_name;
alt->group.attrs = alt->attrs;
alt->groups[0] = &alt->group;
alt->adev.dev.parent = parent;
alt->adev.dev.groups = alt->groups;
alt->adev.dev.type = &typec_altmode_dev_type;
dev_set_name(&alt->adev.dev, "%s.%u", dev_name(parent), id);
/* Link partners and plugs with the ports */
if (!is_port)
typec_altmode_set_partner(alt);
/* The partners are bind to drivers */
if (is_typec_partner(parent))
alt->adev.dev.bus = &typec_bus;
/* Plug alt modes need a class to generate udev events. */
if (is_typec_plug(parent))
alt->adev.dev.class = &typec_class;
ret = device_register(&alt->adev.dev);
if (ret) {
dev_err(parent, "failed to register alternate mode (%d)\n",
ret);
put_device(&alt->adev.dev);
return ERR_PTR(ret);
}
return &alt->adev;
}
/**
* typec_unregister_altmode - Unregister Alternate Mode
* @adev: The alternate mode to be unregistered
*
* Unregister device created with typec_partner_register_altmode(),
* typec_plug_register_altmode() or typec_port_register_altmode().
*/
void typec_unregister_altmode(struct typec_altmode *adev)
{
if (IS_ERR_OR_NULL(adev))
return;
typec_retimer_put(to_altmode(adev)->retimer);
typec_mux_put(to_altmode(adev)->mux);
device_unregister(&adev->dev);
}
EXPORT_SYMBOL_GPL(typec_unregister_altmode);
/* ------------------------------------------------------------------------- */
/* Type-C Partners */
static ssize_t accessory_mode_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct typec_partner *p = to_typec_partner(dev);
return sprintf(buf, "%s\n", typec_accessory_modes[p->accessory]);
}
static DEVICE_ATTR_RO(accessory_mode);
static ssize_t supports_usb_power_delivery_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct typec_partner *p = to_typec_partner(dev);
return sprintf(buf, "%s\n", p->usb_pd ? "yes" : "no");
}
static DEVICE_ATTR_RO(supports_usb_power_delivery);
static ssize_t number_of_alternate_modes_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct typec_partner *partner;
struct typec_plug *plug;
int num_altmodes;
if (is_typec_partner(dev)) {
partner = to_typec_partner(dev);
num_altmodes = partner->num_altmodes;
} else if (is_typec_plug(dev)) {
plug = to_typec_plug(dev);
num_altmodes = plug->num_altmodes;
} else {
return 0;
}
return sysfs_emit(buf, "%d\n", num_altmodes);
}
static DEVICE_ATTR_RO(number_of_alternate_modes);
static struct attribute *typec_partner_attrs[] = {
&dev_attr_accessory_mode.attr,
&dev_attr_supports_usb_power_delivery.attr,
&dev_attr_number_of_alternate_modes.attr,
&dev_attr_type.attr,
&dev_attr_usb_power_delivery_revision.attr,
NULL
};
static umode_t typec_partner_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
{
struct typec_partner *partner = to_typec_partner(kobj_to_dev(kobj));
if (attr == &dev_attr_number_of_alternate_modes.attr) {
if (partner->num_altmodes < 0)
return 0;
}
if (attr == &dev_attr_type.attr)
if (!get_pd_product_type(kobj_to_dev(kobj)))
return 0;
return attr->mode;
}
static const struct attribute_group typec_partner_group = {
.is_visible = typec_partner_attr_is_visible,
.attrs = typec_partner_attrs
};
static const struct attribute_group *typec_partner_groups[] = {
&typec_partner_group,
NULL
};
static void typec_partner_release(struct device *dev)
{
struct typec_partner *partner = to_typec_partner(dev);
ida_destroy(&partner->mode_ids);
kfree(partner);
}
const struct device_type typec_partner_dev_type = {
.name = "typec_partner",
.groups = typec_partner_groups,
.release = typec_partner_release,
};
/**
* typec_partner_set_identity - Report result from Discover Identity command
* @partner: The partner updated identity values
*
* This routine is used to report that the result of Discover Identity USB power
* delivery command has become available.
*/
int typec_partner_set_identity(struct typec_partner *partner)
{
if (!partner->identity)
return -EINVAL;
typec_report_identity(&partner->dev);
return 0;
}
EXPORT_SYMBOL_GPL(typec_partner_set_identity);
/**
* typec_partner_set_pd_revision - Set the PD revision supported by the partner
* @partner: The partner to be updated.
* @pd_revision: USB Power Delivery Specification Revision supported by partner
*
* This routine is used to report that the PD revision of the port partner has
* become available.
*/
void typec_partner_set_pd_revision(struct typec_partner *partner, u16 pd_revision)
{
if (partner->pd_revision == pd_revision)
return;
partner->pd_revision = pd_revision;
sysfs_notify(&partner->dev.kobj, NULL, "usb_power_delivery_revision");
if (pd_revision != 0 && !partner->usb_pd) {
partner->usb_pd = 1;
sysfs_notify(&partner->dev.kobj, NULL,
"supports_usb_power_delivery");
}
kobject_uevent(&partner->dev.kobj, KOBJ_CHANGE);
}
EXPORT_SYMBOL_GPL(typec_partner_set_pd_revision);
/**
* typec_partner_set_usb_power_delivery - Declare USB Power Delivery Contract.
* @partner: The partner device.
* @pd: The USB PD instance.
*
* This routine can be used to declare USB Power Delivery Contract with @partner
* by linking @partner to @pd which contains the objects that were used during the
* negotiation of the contract.
*
* If @pd is NULL, the link is removed and the contract with @partner has ended.
*/
int typec_partner_set_usb_power_delivery(struct typec_partner *partner,
struct usb_power_delivery *pd)
{
int ret;
if (IS_ERR_OR_NULL(partner) || partner->pd == pd)
return 0;
if (pd) {
ret = usb_power_delivery_link_device(pd, &partner->dev);
if (ret)
return ret;
} else {
usb_power_delivery_unlink_device(partner->pd, &partner->dev);
}
partner->pd = pd;
return 0;
}
EXPORT_SYMBOL_GPL(typec_partner_set_usb_power_delivery);
/**
* typec_partner_set_num_altmodes - Set the number of available partner altmodes
* @partner: The partner to be updated.
* @num_altmodes: The number of altmodes we want to specify as available.
*
* This routine is used to report the number of alternate modes supported by the
* partner. This value is *not* enforced in alternate mode registration routines.
*
* @partner.num_altmodes is set to -1 on partner registration, denoting that
* a valid value has not been set for it yet.
*
* Returns 0 on success or negative error number on failure.
*/
int typec_partner_set_num_altmodes(struct typec_partner *partner, int num_altmodes)
{
int ret;
if (num_altmodes < 0)
return -EINVAL;
partner->num_altmodes = num_altmodes;
ret = sysfs_update_group(&partner->dev.kobj, &typec_partner_group);
if (ret < 0)
return ret;
sysfs_notify(&partner->dev.kobj, NULL, "number_of_alternate_modes");
kobject_uevent(&partner->dev.kobj, KOBJ_CHANGE);
return 0;
}
EXPORT_SYMBOL_GPL(typec_partner_set_num_altmodes);
/**
* typec_partner_register_altmode - Register USB Type-C Partner Alternate Mode
* @partner: USB Type-C Partner that supports the alternate mode
* @desc: Description of the alternate mode
*
* This routine is used to register each alternate mode individually that
* @partner has listed in response to Discover SVIDs command. The modes for a
* SVID listed in response to Discover Modes command need to be listed in an
* array in @desc.
*
* Returns handle to the alternate mode on success or ERR_PTR on failure.
*/
struct typec_altmode *
typec_partner_register_altmode(struct typec_partner *partner,
const struct typec_altmode_desc *desc)
{
return typec_register_altmode(&partner->dev, desc);
}
EXPORT_SYMBOL_GPL(typec_partner_register_altmode);
/**
* typec_partner_set_svdm_version - Set negotiated Structured VDM (SVDM) Version
* @partner: USB Type-C Partner that supports SVDM
* @svdm_version: Negotiated SVDM Version
*
* This routine is used to save the negotiated SVDM Version.
*/
void typec_partner_set_svdm_version(struct typec_partner *partner,
enum usb_pd_svdm_ver svdm_version)
{
partner->svdm_version = svdm_version;
}
EXPORT_SYMBOL_GPL(typec_partner_set_svdm_version);
/**
* typec_partner_usb_power_delivery_register - Register Type-C partner USB Power Delivery Support
* @partner: Type-C partner device.
* @desc: Description of the USB PD contract.
*
* This routine is a wrapper around usb_power_delivery_register(). It registers
* USB Power Delivery Capabilities for a Type-C partner device. Specifically,
* it sets the Type-C partner device as a parent for the resulting USB Power Delivery object.
*
* Returns handle to struct usb_power_delivery or ERR_PTR.
*/
struct usb_power_delivery *
typec_partner_usb_power_delivery_register(struct typec_partner *partner,
struct usb_power_delivery_desc *desc)
{
return usb_power_delivery_register(&partner->dev, desc);
}
EXPORT_SYMBOL_GPL(typec_partner_usb_power_delivery_register);
/**
* typec_register_partner - Register a USB Type-C Partner
* @port: The USB Type-C Port the partner is connected to
* @desc: Description of the partner
*
* Registers a device for USB Type-C Partner described in @desc.
*
* Returns handle to the partner on success or ERR_PTR on failure.
*/
struct typec_partner *typec_register_partner(struct typec_port *port,
struct typec_partner_desc *desc)
{
struct typec_partner *partner;
int ret;
partner = kzalloc(sizeof(*partner), GFP_KERNEL);
if (!partner)
return ERR_PTR(-ENOMEM);
ida_init(&partner->mode_ids);
partner->usb_pd = desc->usb_pd;
partner->accessory = desc->accessory;
partner->num_altmodes = -1;
partner->pd_revision = desc->pd_revision;
partner->svdm_version = port->cap->svdm_version;
if (desc->identity) {
/*
* Creating directory for the identity only if the driver is
* able to provide data to it.
*/
partner->dev.groups = usb_pd_id_groups;
partner->identity = desc->identity;
}
partner->dev.class = &typec_class;
partner->dev.parent = &port->dev;
partner->dev.type = &typec_partner_dev_type;
dev_set_name(&partner->dev, "%s-partner", dev_name(&port->dev));
ret = device_register(&partner->dev);
if (ret) {
dev_err(&port->dev, "failed to register partner (%d)\n", ret);
put_device(&partner->dev);
return ERR_PTR(ret);
}
return partner;
}
EXPORT_SYMBOL_GPL(typec_register_partner);
/**
* typec_unregister_partner - Unregister a USB Type-C Partner
* @partner: The partner to be unregistered
*
* Unregister device created with typec_register_partner().
*/
void typec_unregister_partner(struct typec_partner *partner)
{
if (!IS_ERR_OR_NULL(partner))
device_unregister(&partner->dev);
}
EXPORT_SYMBOL_GPL(typec_unregister_partner);
/* ------------------------------------------------------------------------- */
/* Type-C Cable Plugs */
static void typec_plug_release(struct device *dev)
{
struct typec_plug *plug = to_typec_plug(dev);
ida_destroy(&plug->mode_ids);
kfree(plug);
}
static struct attribute *typec_plug_attrs[] = {
&dev_attr_number_of_alternate_modes.attr,
NULL
};
static umode_t typec_plug_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
{
struct typec_plug *plug = to_typec_plug(kobj_to_dev(kobj));
if (attr == &dev_attr_number_of_alternate_modes.attr) {
if (plug->num_altmodes < 0)
return 0;
}
return attr->mode;
}
static const struct attribute_group typec_plug_group = {
.is_visible = typec_plug_attr_is_visible,
.attrs = typec_plug_attrs
};
static const struct attribute_group *typec_plug_groups[] = {
&typec_plug_group,
NULL
};
const struct device_type typec_plug_dev_type = {
.name = "typec_plug",
.groups = typec_plug_groups,
.release = typec_plug_release,
};
/**
* typec_plug_set_num_altmodes - Set the number of available plug altmodes
* @plug: The plug to be updated.
* @num_altmodes: The number of altmodes we want to specify as available.
*
* This routine is used to report the number of alternate modes supported by the
* plug. This value is *not* enforced in alternate mode registration routines.
*
* @plug.num_altmodes is set to -1 on plug registration, denoting that
* a valid value has not been set for it yet.
*
* Returns 0 on success or negative error number on failure.
*/
int typec_plug_set_num_altmodes(struct typec_plug *plug, int num_altmodes)
{
int ret;
if (num_altmodes < 0)
return -EINVAL;
plug->num_altmodes = num_altmodes;
ret = sysfs_update_group(&plug->dev.kobj, &typec_plug_group);
if (ret < 0)
return ret;
sysfs_notify(&plug->dev.kobj, NULL, "number_of_alternate_modes");
kobject_uevent(&plug->dev.kobj, KOBJ_CHANGE);
return 0;
}
EXPORT_SYMBOL_GPL(typec_plug_set_num_altmodes);
/**
* typec_plug_register_altmode - Register USB Type-C Cable Plug Alternate Mode
* @plug: USB Type-C Cable Plug that supports the alternate mode
* @desc: Description of the alternate mode
*
* This routine is used to register each alternate mode individually that @plug
* has listed in response to Discover SVIDs command. The modes for a SVID that
* the plug lists in response to Discover Modes command need to be listed in an
* array in @desc.
*
* Returns handle to the alternate mode on success or ERR_PTR on failure.
*/
struct typec_altmode *
typec_plug_register_altmode(struct typec_plug *plug,
const struct typec_altmode_desc *desc)
{
return typec_register_altmode(&plug->dev, desc);
}
EXPORT_SYMBOL_GPL(typec_plug_register_altmode);
/**
* typec_register_plug - Register a USB Type-C Cable Plug
* @cable: USB Type-C Cable with the plug
* @desc: Description of the cable plug
*
* Registers a device for USB Type-C Cable Plug described in @desc. A USB Type-C
* Cable Plug represents a plug with electronics in it that can response to USB
* Power Delivery SOP Prime or SOP Double Prime packages.
*
* Returns handle to the cable plug on success or ERR_PTR on failure.
*/
struct typec_plug *typec_register_plug(struct typec_cable *cable,
struct typec_plug_desc *desc)
{
struct typec_plug *plug;
char name[8];
int ret;
plug = kzalloc(sizeof(*plug), GFP_KERNEL);
if (!plug)
return ERR_PTR(-ENOMEM);
sprintf(name, "plug%d", desc->index);
ida_init(&plug->mode_ids);
plug->num_altmodes = -1;
plug->index = desc->index;
plug->dev.class = &typec_class;
plug->dev.parent = &cable->dev;
plug->dev.type = &typec_plug_dev_type;
dev_set_name(&plug->dev, "%s-%s", dev_name(cable->dev.parent), name);
ret = device_register(&plug->dev);
if (ret) {
dev_err(&cable->dev, "failed to register plug (%d)\n", ret);
put_device(&plug->dev);
return ERR_PTR(ret);
}
return plug;
}
EXPORT_SYMBOL_GPL(typec_register_plug);
/**
* typec_unregister_plug - Unregister a USB Type-C Cable Plug
* @plug: The cable plug to be unregistered
*
* Unregister device created with typec_register_plug().
*/
void typec_unregister_plug(struct typec_plug *plug)
{
if (!IS_ERR_OR_NULL(plug))
device_unregister(&plug->dev);
}
EXPORT_SYMBOL_GPL(typec_unregister_plug);
/* Type-C Cables */
static const char * const typec_plug_types[] = {
[USB_PLUG_NONE] = "unknown",
[USB_PLUG_TYPE_A] = "type-a",
[USB_PLUG_TYPE_B] = "type-b",
[USB_PLUG_TYPE_C] = "type-c",
[USB_PLUG_CAPTIVE] = "captive",
};
static ssize_t plug_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct typec_cable *cable = to_typec_cable(dev);
return sprintf(buf, "%s\n", typec_plug_types[cable->type]);
}
static DEVICE_ATTR_RO(plug_type);
static struct attribute *typec_cable_attrs[] = {
&dev_attr_type.attr,
&dev_attr_plug_type.attr,
&dev_attr_usb_power_delivery_revision.attr,
NULL
};
ATTRIBUTE_GROUPS(typec_cable);
static void typec_cable_release(struct device *dev)
{
struct typec_cable *cable = to_typec_cable(dev);
kfree(cable);
}
const struct device_type typec_cable_dev_type = {
.name = "typec_cable",
.groups = typec_cable_groups,
.release = typec_cable_release,
};
static int cable_match(struct device *dev, void *data)
{
return is_typec_cable(dev);
}
/**
* typec_cable_get - Get a reference to the USB Type-C cable
* @port: The USB Type-C Port the cable is connected to
*
* The caller must decrement the reference count with typec_cable_put() after
* use.
*/
struct typec_cable *typec_cable_get(struct typec_port *port)
{
struct device *dev;
dev = device_find_child(&port->dev, NULL, cable_match);
if (!dev)
return NULL;
return to_typec_cable(dev);
}
EXPORT_SYMBOL_GPL(typec_cable_get);
/**
* typec_cable_put - Decrement the reference count on USB Type-C cable
* @cable: The USB Type-C cable
*/
void typec_cable_put(struct typec_cable *cable)
{
put_device(&cable->dev);
}
EXPORT_SYMBOL_GPL(typec_cable_put);
/**
* typec_cable_is_active - Check is the USB Type-C cable active or passive
* @cable: The USB Type-C Cable
*
* Return 1 if the cable is active or 0 if it's passive.
*/
int typec_cable_is_active(struct typec_cable *cable)
{
return cable->active;
}
EXPORT_SYMBOL_GPL(typec_cable_is_active);
/**
* typec_cable_set_identity - Report result from Discover Identity command
* @cable: The cable updated identity values
*
* This routine is used to report that the result of Discover Identity USB power
* delivery command has become available.
*/
int typec_cable_set_identity(struct typec_cable *cable)
{
if (!cable->identity)
return -EINVAL;
typec_report_identity(&cable->dev);
return 0;
}
EXPORT_SYMBOL_GPL(typec_cable_set_identity);
/**
* typec_register_cable - Register a USB Type-C Cable
* @port: The USB Type-C Port the cable is connected to
* @desc: Description of the cable
*
* Registers a device for USB Type-C Cable described in @desc. The cable will be
* parent for the optional cable plug devises.
*
* Returns handle to the cable on success or ERR_PTR on failure.
*/
struct typec_cable *typec_register_cable(struct typec_port *port,
struct typec_cable_desc *desc)
{
struct typec_cable *cable;
int ret;
cable = kzalloc(sizeof(*cable), GFP_KERNEL);
if (!cable)
return ERR_PTR(-ENOMEM);
cable->type = desc->type;
cable->active = desc->active;
cable->pd_revision = desc->pd_revision;
if (desc->identity) {
/*
* Creating directory for the identity only if the driver is
* able to provide data to it.
*/
cable->dev.groups = usb_pd_id_groups;
cable->identity = desc->identity;
}
cable->dev.class = &typec_class;
cable->dev.parent = &port->dev;
cable->dev.type = &typec_cable_dev_type;
dev_set_name(&cable->dev, "%s-cable", dev_name(&port->dev));
ret = device_register(&cable->dev);
if (ret) {
dev_err(&port->dev, "failed to register cable (%d)\n", ret);
put_device(&cable->dev);
return ERR_PTR(ret);
}
return cable;
}
EXPORT_SYMBOL_GPL(typec_register_cable);
/**
* typec_unregister_cable - Unregister a USB Type-C Cable
* @cable: The cable to be unregistered
*
* Unregister device created with typec_register_cable().
*/
void typec_unregister_cable(struct typec_cable *cable)
{
if (!IS_ERR_OR_NULL(cable))
device_unregister(&cable->dev);
}
EXPORT_SYMBOL_GPL(typec_unregister_cable);
/* ------------------------------------------------------------------------- */
/* USB Type-C ports */
/**
* typec_port_set_usb_power_delivery - Assign USB PD for port.
* @port: USB Type-C port.
* @pd: USB PD instance.
*
* This routine can be used to set the USB Power Delivery Capabilities for @port
* that it will advertise to the partner.
*
* If @pd is NULL, the assignment is removed.
*/
int typec_port_set_usb_power_delivery(struct typec_port *port, struct usb_power_delivery *pd)
{
int ret;
if (IS_ERR_OR_NULL(port) || port->pd == pd)
return 0;
if (pd) {
ret = usb_power_delivery_link_device(pd, &port->dev);
if (ret)
return ret;
} else {
usb_power_delivery_unlink_device(port->pd, &port->dev);
}
port->pd = pd;
return 0;
}
EXPORT_SYMBOL_GPL(typec_port_set_usb_power_delivery);
static ssize_t select_usb_power_delivery_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct typec_port *port = to_typec_port(dev);
struct usb_power_delivery *pd;
if (!port->ops || !port->ops->pd_set)
return -EOPNOTSUPP;
pd = usb_power_delivery_find(buf);
if (!pd)
return -EINVAL;
return port->ops->pd_set(port, pd);
}
static ssize_t select_usb_power_delivery_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct typec_port *port = to_typec_port(dev);
struct usb_power_delivery **pds;
int i, ret = 0;
if (!port->ops || !port->ops->pd_get)
return -EOPNOTSUPP;
pds = port->ops->pd_get(port);
if (!pds)
return 0;
for (i = 0; pds[i]; i++) {
if (pds[i] == port->pd)
ret += sysfs_emit_at(buf, ret, "[%s] ", dev_name(&pds[i]->dev));
else
ret += sysfs_emit_at(buf, ret, "%s ", dev_name(&pds[i]->dev));
}
buf[ret - 1] = '\n';
return ret;
}
static DEVICE_ATTR_RW(select_usb_power_delivery);
static struct attribute *port_attrs[] = {
&dev_attr_select_usb_power_delivery.attr,
NULL
};
static umode_t port_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
{
struct typec_port *port = to_typec_port(kobj_to_dev(kobj));
if (!port->pd || !port->ops || !port->ops->pd_get)
return 0;
if (!port->ops->pd_set)
return 0444;
return attr->mode;
}
static const struct attribute_group pd_group = {
.is_visible = port_attr_is_visible,
.attrs = port_attrs,
};
static const char * const typec_orientations[] = {
[TYPEC_ORIENTATION_NONE] = "unknown",
[TYPEC_ORIENTATION_NORMAL] = "normal",
[TYPEC_ORIENTATION_REVERSE] = "reverse",
};
static const char * const typec_roles[] = {
[TYPEC_SINK] = "sink",
[TYPEC_SOURCE] = "source",
};
static const char * const typec_data_roles[] = {
[TYPEC_DEVICE] = "device",
[TYPEC_HOST] = "host",
};
static const char * const typec_port_power_roles[] = {
[TYPEC_PORT_SRC] = "source",
[TYPEC_PORT_SNK] = "sink",
[TYPEC_PORT_DRP] = "dual",
};
static const char * const typec_port_data_roles[] = {
[TYPEC_PORT_DFP] = "host",
[TYPEC_PORT_UFP] = "device",
[TYPEC_PORT_DRD] = "dual",
};
static const char * const typec_port_types_drp[] = {
[TYPEC_PORT_SRC] = "dual [source] sink",
[TYPEC_PORT_SNK] = "dual source [sink]",
[TYPEC_PORT_DRP] = "[dual] source sink",
};
static ssize_t
preferred_role_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
struct typec_port *port = to_typec_port(dev);
int role;
int ret;
if (port->cap->type != TYPEC_PORT_DRP) {
dev_dbg(dev, "Preferred role only supported with DRP ports\n");
return -EOPNOTSUPP;
}
if (!port->ops || !port->ops->try_role) {
dev_dbg(dev, "Setting preferred role not supported\n");
return -EOPNOTSUPP;
}
role = sysfs_match_string(typec_roles, buf);
if (role < 0) {
if (sysfs_streq(buf, "none"))
role = TYPEC_NO_PREFERRED_ROLE;
else
return -EINVAL;
}
ret = port->ops->try_role(port, role);
if (ret)
return ret;
port->prefer_role = role;
return size;
}
static ssize_t
preferred_role_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct typec_port *port = to_typec_port(dev);
if (port->cap->type != TYPEC_PORT_DRP)
return 0;
if (port->prefer_role < 0)
return 0;
return sprintf(buf, "%s\n", typec_roles[port->prefer_role]);
}
static DEVICE_ATTR_RW(preferred_role);
static ssize_t data_role_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct typec_port *port = to_typec_port(dev);
int ret;
if (!port->ops || !port->ops->dr_set) {
dev_dbg(dev, "data role swapping not supported\n");
return -EOPNOTSUPP;
}
ret = sysfs_match_string(typec_data_roles, buf);
if (ret < 0)
return ret;
mutex_lock(&port->port_type_lock);
if (port->cap->data != TYPEC_PORT_DRD) {
ret = -EOPNOTSUPP;
goto unlock_and_ret;
}
ret = port->ops->dr_set(port, ret);
if (ret)
goto unlock_and_ret;
ret = size;
unlock_and_ret:
mutex_unlock(&port->port_type_lock);
return ret;
}
static ssize_t data_role_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct typec_port *port = to_typec_port(dev);
if (port->cap->data == TYPEC_PORT_DRD)
return sprintf(buf, "%s\n", port->data_role == TYPEC_HOST ?
"[host] device" : "host [device]");
return sprintf(buf, "[%s]\n", typec_data_roles[port->data_role]);
}
static DEVICE_ATTR_RW(data_role);
static ssize_t power_role_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct typec_port *port = to_typec_port(dev);
int ret;
if (!port->ops || !port->ops->pr_set) {
dev_dbg(dev, "power role swapping not supported\n");
return -EOPNOTSUPP;
}
if (port->pwr_opmode != TYPEC_PWR_MODE_PD) {
dev_dbg(dev, "partner unable to swap power role\n");
return -EIO;
}
ret = sysfs_match_string(typec_roles, buf);
if (ret < 0)
return ret;
mutex_lock(&port->port_type_lock);
if (port->port_type != TYPEC_PORT_DRP) {
dev_dbg(dev, "port type fixed at \"%s\"",
typec_port_power_roles[port->port_type]);
ret = -EOPNOTSUPP;
goto unlock_and_ret;
}
ret = port->ops->pr_set(port, ret);
if (ret)
goto unlock_and_ret;
ret = size;
unlock_and_ret:
mutex_unlock(&port->port_type_lock);
return ret;
}
static ssize_t power_role_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct typec_port *port = to_typec_port(dev);
if (port->cap->type == TYPEC_PORT_DRP)
return sprintf(buf, "%s\n", port->pwr_role == TYPEC_SOURCE ?
"[source] sink" : "source [sink]");
return sprintf(buf, "[%s]\n", typec_roles[port->pwr_role]);
}
static DEVICE_ATTR_RW(power_role);
static ssize_t
port_type_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
struct typec_port *port = to_typec_port(dev);
int ret;
enum typec_port_type type;
if (port->cap->type != TYPEC_PORT_DRP ||
!port->ops || !port->ops->port_type_set) {
dev_dbg(dev, "changing port type not supported\n");
return -EOPNOTSUPP;
}
ret = sysfs_match_string(typec_port_power_roles, buf);
if (ret < 0)
return ret;
type = ret;
mutex_lock(&port->port_type_lock);
if (port->port_type == type) {
ret = size;
goto unlock_and_ret;
}
ret = port->ops->port_type_set(port, type);
if (ret)
goto unlock_and_ret;
port->port_type = type;
ret = size;
unlock_and_ret:
mutex_unlock(&port->port_type_lock);
return ret;
}
static ssize_t
port_type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct typec_port *port = to_typec_port(dev);
if (port->cap->type == TYPEC_PORT_DRP)
return sprintf(buf, "%s\n",
typec_port_types_drp[port->port_type]);
return sprintf(buf, "[%s]\n", typec_port_power_roles[port->cap->type]);
}
static DEVICE_ATTR_RW(port_type);
static const char * const typec_pwr_opmodes[] = {
[TYPEC_PWR_MODE_USB] = "default",
[TYPEC_PWR_MODE_1_5A] = "1.5A",
[TYPEC_PWR_MODE_3_0A] = "3.0A",
[TYPEC_PWR_MODE_PD] = "usb_power_delivery",
};
static ssize_t power_operation_mode_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct typec_port *port = to_typec_port(dev);
return sprintf(buf, "%s\n", typec_pwr_opmodes[port->pwr_opmode]);
}
static DEVICE_ATTR_RO(power_operation_mode);
static ssize_t vconn_source_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct typec_port *port = to_typec_port(dev);
bool source;
int ret;
if (!port->cap->pd_revision) {
dev_dbg(dev, "VCONN swap depends on USB Power Delivery\n");
return -EOPNOTSUPP;
}
if (!port->ops || !port->ops->vconn_set) {
dev_dbg(dev, "VCONN swapping not supported\n");
return -EOPNOTSUPP;
}
ret = kstrtobool(buf, &source);
if (ret)
return ret;
ret = port->ops->vconn_set(port, (enum typec_role)source);
if (ret)
return ret;
return size;
}
static ssize_t vconn_source_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct typec_port *port = to_typec_port(dev);
return sprintf(buf, "%s\n",
port->vconn_role == TYPEC_SOURCE ? "yes" : "no");
}
static DEVICE_ATTR_RW(vconn_source);
static ssize_t supported_accessory_modes_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct typec_port *port = to_typec_port(dev);
ssize_t ret = 0;
int i;
for (i = 0; i < ARRAY_SIZE(port->cap->accessory); i++) {
if (port->cap->accessory[i])
ret += sprintf(buf + ret, "%s ",
typec_accessory_modes[port->cap->accessory[i]]);
}
if (!ret)
return sprintf(buf, "none\n");
buf[ret - 1] = '\n';
return ret;
}
static DEVICE_ATTR_RO(supported_accessory_modes);
static ssize_t usb_typec_revision_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct typec_port *port = to_typec_port(dev);
u16 rev = port->cap->revision;
return sprintf(buf, "%d.%d\n", (rev >> 8) & 0xff, (rev >> 4) & 0xf);
}
static DEVICE_ATTR_RO(usb_typec_revision);
static ssize_t usb_power_delivery_revision_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
u16 rev = 0;
if (is_typec_partner(dev)) {
struct typec_partner *partner = to_typec_partner(dev);
rev = partner->pd_revision;
} else if (is_typec_cable(dev)) {
struct typec_cable *cable = to_typec_cable(dev);
rev = cable->pd_revision;
} else if (is_typec_port(dev)) {
struct typec_port *p = to_typec_port(dev);
rev = p->cap->pd_revision;
}
return sysfs_emit(buf, "%d.%d\n", (rev >> 8) & 0xff, (rev >> 4) & 0xf);
}
static ssize_t orientation_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct typec_port *port = to_typec_port(dev);
return sprintf(buf, "%s\n", typec_orientations[port->orientation]);
}
static DEVICE_ATTR_RO(orientation);
static struct attribute *typec_attrs[] = {
&dev_attr_data_role.attr,
&dev_attr_power_operation_mode.attr,
&dev_attr_power_role.attr,
&dev_attr_preferred_role.attr,
&dev_attr_supported_accessory_modes.attr,
&dev_attr_usb_power_delivery_revision.attr,
&dev_attr_usb_typec_revision.attr,
&dev_attr_vconn_source.attr,
&dev_attr_port_type.attr,
&dev_attr_orientation.attr,
NULL,
};
static umode_t typec_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct typec_port *port = to_typec_port(kobj_to_dev(kobj));
if (attr == &dev_attr_data_role.attr) {
if (port->cap->data != TYPEC_PORT_DRD ||
!port->ops || !port->ops->dr_set)
return 0444;
} else if (attr == &dev_attr_power_role.attr) {
if (port->cap->type != TYPEC_PORT_DRP ||
!port->ops || !port->ops->pr_set)
return 0444;
} else if (attr == &dev_attr_vconn_source.attr) {
if (!port->cap->pd_revision ||
!port->ops || !port->ops->vconn_set)
return 0444;
} else if (attr == &dev_attr_preferred_role.attr) {
if (port->cap->type != TYPEC_PORT_DRP ||
!port->ops || !port->ops->try_role)
return 0444;
} else if (attr == &dev_attr_port_type.attr) {
if (!port->ops || !port->ops->port_type_set)
return 0;
if (port->cap->type != TYPEC_PORT_DRP)
return 0444;
} else if (attr == &dev_attr_orientation.attr) {
if (port->cap->orientation_aware)
return 0444;
return 0;
}
return attr->mode;
}
static const struct attribute_group typec_group = {
.is_visible = typec_attr_is_visible,
.attrs = typec_attrs,
};
static const struct attribute_group *typec_groups[] = {
&typec_group,
&pd_group,
NULL
};
static int typec_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
int ret;
ret = add_uevent_var(env, "TYPEC_PORT=%s", dev_name(dev));
if (ret)
dev_err(dev, "failed to add uevent TYPEC_PORT\n");
return ret;
}
static void typec_release(struct device *dev)
{
struct typec_port *port = to_typec_port(dev);
ida_simple_remove(&typec_index_ida, port->id);
ida_destroy(&port->mode_ids);
typec_switch_put(port->sw);
typec_mux_put(port->mux);
typec_retimer_put(port->retimer);
kfree(port->cap);
kfree(port);
}
const struct device_type typec_port_dev_type = {
.name = "typec_port",
.groups = typec_groups,
.uevent = typec_uevent,
.release = typec_release,
};
/* --------------------------------------- */
/* Driver callbacks to report role updates */
static int partner_match(struct device *dev, void *data)
{
return is_typec_partner(dev);
}
/**
* typec_set_data_role - Report data role change
* @port: The USB Type-C Port where the role was changed
* @role: The new data role
*
* This routine is used by the port drivers to report data role changes.
*/
void typec_set_data_role(struct typec_port *port, enum typec_data_role role)
{
struct device *partner_dev;
if (port->data_role == role)
return;
port->data_role = role;
sysfs_notify(&port->dev.kobj, NULL, "data_role");
kobject_uevent(&port->dev.kobj, KOBJ_CHANGE);
partner_dev = device_find_child(&port->dev, NULL, partner_match);
if (!partner_dev)
return;
if (to_typec_partner(partner_dev)->identity)
typec_product_type_notify(partner_dev);
put_device(partner_dev);
}
EXPORT_SYMBOL_GPL(typec_set_data_role);
/**
* typec_set_pwr_role - Report power role change
* @port: The USB Type-C Port where the role was changed
* @role: The new data role
*
* This routine is used by the port drivers to report power role changes.
*/
void typec_set_pwr_role(struct typec_port *port, enum typec_role role)
{
if (port->pwr_role == role)
return;
port->pwr_role = role;
sysfs_notify(&port->dev.kobj, NULL, "power_role");
kobject_uevent(&port->dev.kobj, KOBJ_CHANGE);
}
EXPORT_SYMBOL_GPL(typec_set_pwr_role);
/**
* typec_set_vconn_role - Report VCONN source change
* @port: The USB Type-C Port which VCONN role changed
* @role: Source when @port is sourcing VCONN, or Sink when it's not
*
* This routine is used by the port drivers to report if the VCONN source is
* changes.
*/
void typec_set_vconn_role(struct typec_port *port, enum typec_role role)
{
if (port->vconn_role == role)
return;
port->vconn_role = role;
sysfs_notify(&port->dev.kobj, NULL, "vconn_source");
kobject_uevent(&port->dev.kobj, KOBJ_CHANGE);
}
EXPORT_SYMBOL_GPL(typec_set_vconn_role);
/**
* typec_set_pwr_opmode - Report changed power operation mode
* @port: The USB Type-C Port where the mode was changed
* @opmode: New power operation mode
*
* This routine is used by the port drivers to report changed power operation
* mode in @port. The modes are USB (default), 1.5A, 3.0A as defined in USB
* Type-C specification, and "USB Power Delivery" when the power levels are
* negotiated with methods defined in USB Power Delivery specification.
*/
void typec_set_pwr_opmode(struct typec_port *port,
enum typec_pwr_opmode opmode)
{
struct device *partner_dev;
if (port->pwr_opmode == opmode)
return;
port->pwr_opmode = opmode;
sysfs_notify(&port->dev.kobj, NULL, "power_operation_mode");
kobject_uevent(&port->dev.kobj, KOBJ_CHANGE);
partner_dev = device_find_child(&port->dev, NULL, partner_match);
if (partner_dev) {
struct typec_partner *partner = to_typec_partner(partner_dev);
if (opmode == TYPEC_PWR_MODE_PD && !partner->usb_pd) {
partner->usb_pd = 1;
sysfs_notify(&partner_dev->kobj, NULL,
"supports_usb_power_delivery");
kobject_uevent(&partner_dev->kobj, KOBJ_CHANGE);
}
put_device(partner_dev);
}
}
EXPORT_SYMBOL_GPL(typec_set_pwr_opmode);
/**
* typec_find_pwr_opmode - Get the typec power operation mode capability
* @name: power operation mode string
*
* This routine is used to find the typec_pwr_opmode by its string @name.
*
* Returns typec_pwr_opmode if success, otherwise negative error code.
*/
int typec_find_pwr_opmode(const char *name)
{
return match_string(typec_pwr_opmodes,
ARRAY_SIZE(typec_pwr_opmodes), name);
}
EXPORT_SYMBOL_GPL(typec_find_pwr_opmode);
/**
* typec_find_orientation - Convert orientation string to enum typec_orientation
* @name: Orientation string
*
* This routine is used to find the typec_orientation by its string name @name.
*
* Returns the orientation value on success, otherwise negative error code.
*/
int typec_find_orientation(const char *name)
{
return match_string(typec_orientations, ARRAY_SIZE(typec_orientations),
name);
}
EXPORT_SYMBOL_GPL(typec_find_orientation);
/**
* typec_find_port_power_role - Get the typec port power capability
* @name: port power capability string
*
* This routine is used to find the typec_port_type by its string name.
*
* Returns typec_port_type if success, otherwise negative error code.
*/
int typec_find_port_power_role(const char *name)
{
return match_string(typec_port_power_roles,
ARRAY_SIZE(typec_port_power_roles), name);
}
EXPORT_SYMBOL_GPL(typec_find_port_power_role);
/**
* typec_find_power_role - Find the typec one specific power role
* @name: power role string
*
* This routine is used to find the typec_role by its string name.
*
* Returns typec_role if success, otherwise negative error code.
*/
int typec_find_power_role(const char *name)
{
return match_string(typec_roles, ARRAY_SIZE(typec_roles), name);
}
EXPORT_SYMBOL_GPL(typec_find_power_role);
/**
* typec_find_port_data_role - Get the typec port data capability
* @name: port data capability string
*
* This routine is used to find the typec_port_data by its string name.
*
* Returns typec_port_data if success, otherwise negative error code.
*/
int typec_find_port_data_role(const char *name)
{
return match_string(typec_port_data_roles,
ARRAY_SIZE(typec_port_data_roles), name);
}
EXPORT_SYMBOL_GPL(typec_find_port_data_role);
/* ------------------------------------------ */
/* API for Multiplexer/DeMultiplexer Switches */
/**
* typec_set_orientation - Set USB Type-C cable plug orientation
* @port: USB Type-C Port
* @orientation: USB Type-C cable plug orientation
*
* Set cable plug orientation for @port.
*/
int typec_set_orientation(struct typec_port *port,
enum typec_orientation orientation)
{
int ret;
ret = typec_switch_set(port->sw, orientation);
if (ret)
return ret;
port->orientation = orientation;
sysfs_notify(&port->dev.kobj, NULL, "orientation");
kobject_uevent(&port->dev.kobj, KOBJ_CHANGE);
return 0;
}
EXPORT_SYMBOL_GPL(typec_set_orientation);
/**
* typec_get_orientation - Get USB Type-C cable plug orientation
* @port: USB Type-C Port
*
* Get current cable plug orientation for @port.
*/
enum typec_orientation typec_get_orientation(struct typec_port *port)
{
return port->orientation;
}
EXPORT_SYMBOL_GPL(typec_get_orientation);
/**
* typec_set_mode - Set mode of operation for USB Type-C connector
* @port: USB Type-C connector
* @mode: Accessory Mode, USB Operation or Safe State
*
* Configure @port for Accessory Mode @mode. This function will configure the
* muxes needed for @mode.
*/
int typec_set_mode(struct typec_port *port, int mode)
{
struct typec_mux_state state = { };
state.mode = mode;
return typec_mux_set(port->mux, &state);
}
EXPORT_SYMBOL_GPL(typec_set_mode);
/* --------------------------------------- */
/**
* typec_get_negotiated_svdm_version - Get negotiated SVDM Version
* @port: USB Type-C Port.
*
* Get the negotiated SVDM Version. The Version is set to the port default
* value stored in typec_capability on partner registration, and updated after
* a successful Discover Identity if the negotiated value is less than the
* default value.
*
* Returns usb_pd_svdm_ver if the partner has been registered otherwise -ENODEV.
*/
int typec_get_negotiated_svdm_version(struct typec_port *port)
{
enum usb_pd_svdm_ver svdm_version;
struct device *partner_dev;
partner_dev = device_find_child(&port->dev, NULL, partner_match);
if (!partner_dev)
return -ENODEV;
svdm_version = to_typec_partner(partner_dev)->svdm_version;
put_device(partner_dev);
return svdm_version;
}
EXPORT_SYMBOL_GPL(typec_get_negotiated_svdm_version);
/**
* typec_get_drvdata - Return private driver data pointer
* @port: USB Type-C port
*/
void *typec_get_drvdata(struct typec_port *port)
{
return dev_get_drvdata(&port->dev);
}
EXPORT_SYMBOL_GPL(typec_get_drvdata);
int typec_get_fw_cap(struct typec_capability *cap,
struct fwnode_handle *fwnode)
{
const char *cap_str;
int ret;
cap->fwnode = fwnode;
ret = fwnode_property_read_string(fwnode, "power-role", &cap_str);
if (ret < 0)
return ret;
ret = typec_find_port_power_role(cap_str);
if (ret < 0)
return ret;
cap->type = ret;
/* USB data support is optional */
ret = fwnode_property_read_string(fwnode, "data-role", &cap_str);
if (ret == 0) {
ret = typec_find_port_data_role(cap_str);
if (ret < 0)
return ret;
cap->data = ret;
}
/* Get the preferred power role for a DRP */
if (cap->type == TYPEC_PORT_DRP) {
cap->prefer_role = TYPEC_NO_PREFERRED_ROLE;
ret = fwnode_property_read_string(fwnode, "try-power-role", &cap_str);
if (ret == 0) {
ret = typec_find_power_role(cap_str);
if (ret < 0)
return ret;
cap->prefer_role = ret;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(typec_get_fw_cap);
/**
* typec_port_register_altmode - Register USB Type-C Port Alternate Mode
* @port: USB Type-C Port that supports the alternate mode
* @desc: Description of the alternate mode
*
* This routine is used to register an alternate mode that @port is capable of
* supporting.
*
* Returns handle to the alternate mode on success or ERR_PTR on failure.
*/
struct typec_altmode *
typec_port_register_altmode(struct typec_port *port,
const struct typec_altmode_desc *desc)
{
struct typec_altmode *adev;
struct typec_mux *mux;
struct typec_retimer *retimer;
mux = typec_mux_get(&port->dev);
if (IS_ERR(mux))
return ERR_CAST(mux);
retimer = typec_retimer_get(&port->dev);
if (IS_ERR(retimer)) {
typec_mux_put(mux);
return ERR_CAST(retimer);
}
adev = typec_register_altmode(&port->dev, desc);
if (IS_ERR(adev)) {
typec_retimer_put(retimer);
typec_mux_put(mux);
} else {
to_altmode(adev)->mux = mux;
to_altmode(adev)->retimer = retimer;
}
return adev;
}
EXPORT_SYMBOL_GPL(typec_port_register_altmode);
void typec_port_register_altmodes(struct typec_port *port,
const struct typec_altmode_ops *ops, void *drvdata,
struct typec_altmode **altmodes, size_t n)
{
struct fwnode_handle *altmodes_node, *child;
struct typec_altmode_desc desc;
struct typec_altmode *alt;
size_t index = 0;
u32 svid, vdo;
int ret;
altmodes_node = device_get_named_child_node(&port->dev, "altmodes");
if (!altmodes_node)
return; /* No altmodes specified */
fwnode_for_each_child_node(altmodes_node, child) {
ret = fwnode_property_read_u32(child, "svid", &svid);
if (ret) {
dev_err(&port->dev, "Error reading svid for altmode %s\n",
fwnode_get_name(child));
continue;
}
ret = fwnode_property_read_u32(child, "vdo", &vdo);
if (ret) {
dev_err(&port->dev, "Error reading vdo for altmode %s\n",
fwnode_get_name(child));
continue;
}
if (index >= n) {
dev_err(&port->dev, "Error not enough space for altmode %s\n",
fwnode_get_name(child));
continue;
}
desc.svid = svid;
desc.vdo = vdo;
desc.mode = index + 1;
alt = typec_port_register_altmode(port, &desc);
if (IS_ERR(alt)) {
dev_err(&port->dev, "Error registering altmode %s\n",
fwnode_get_name(child));
continue;
}
alt->ops = ops;
typec_altmode_set_drvdata(alt, drvdata);
altmodes[index] = alt;
index++;
}
}
EXPORT_SYMBOL_GPL(typec_port_register_altmodes);
/**
* typec_register_port - Register a USB Type-C Port
* @parent: Parent device
* @cap: Description of the port
*
* Registers a device for USB Type-C Port described in @cap.
*
* Returns handle to the port on success or ERR_PTR on failure.
*/
struct typec_port *typec_register_port(struct device *parent,
const struct typec_capability *cap)
{
struct typec_port *port;
int ret;
int id;
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
return ERR_PTR(-ENOMEM);
id = ida_simple_get(&typec_index_ida, 0, 0, GFP_KERNEL);
if (id < 0) {
kfree(port);
return ERR_PTR(id);
}
switch (cap->type) {
case TYPEC_PORT_SRC:
port->pwr_role = TYPEC_SOURCE;
port->vconn_role = TYPEC_SOURCE;
break;
case TYPEC_PORT_SNK:
port->pwr_role = TYPEC_SINK;
port->vconn_role = TYPEC_SINK;
break;
case TYPEC_PORT_DRP:
if (cap->prefer_role != TYPEC_NO_PREFERRED_ROLE)
port->pwr_role = cap->prefer_role;
else
port->pwr_role = TYPEC_SINK;
break;
}
switch (cap->data) {
case TYPEC_PORT_DFP:
port->data_role = TYPEC_HOST;
break;
case TYPEC_PORT_UFP:
port->data_role = TYPEC_DEVICE;
break;
case TYPEC_PORT_DRD:
if (cap->prefer_role == TYPEC_SOURCE)
port->data_role = TYPEC_HOST;
else
port->data_role = TYPEC_DEVICE;
break;
}
ida_init(&port->mode_ids);
mutex_init(&port->port_type_lock);
port->id = id;
port->ops = cap->ops;
port->port_type = cap->type;
port->prefer_role = cap->prefer_role;
device_initialize(&port->dev);
port->dev.class = &typec_class;
port->dev.parent = parent;
port->dev.fwnode = cap->fwnode;
port->dev.type = &typec_port_dev_type;
dev_set_name(&port->dev, "port%d", id);
dev_set_drvdata(&port->dev, cap->driver_data);
port->cap = kmemdup(cap, sizeof(*cap), GFP_KERNEL);
if (!port->cap) {
put_device(&port->dev);
return ERR_PTR(-ENOMEM);
}
port->sw = typec_switch_get(&port->dev);
if (IS_ERR(port->sw)) {
ret = PTR_ERR(port->sw);
put_device(&port->dev);
return ERR_PTR(ret);
}
port->mux = typec_mux_get(&port->dev);
if (IS_ERR(port->mux)) {
ret = PTR_ERR(port->mux);
put_device(&port->dev);
return ERR_PTR(ret);
}
port->retimer = typec_retimer_get(&port->dev);
if (IS_ERR(port->retimer)) {
ret = PTR_ERR(port->retimer);
put_device(&port->dev);
return ERR_PTR(ret);
}
port->pd = cap->pd;
ret = device_add(&port->dev);
if (ret) {
dev_err(parent, "failed to register port (%d)\n", ret);
put_device(&port->dev);
return ERR_PTR(ret);
}
ret = usb_power_delivery_link_device(port->pd, &port->dev);
if (ret) {
dev_err(&port->dev, "failed to link pd\n");
device_unregister(&port->dev);
return ERR_PTR(ret);
}
ret = typec_link_ports(port);
if (ret)
dev_warn(&port->dev, "failed to create symlinks (%d)\n", ret);
return port;
}
EXPORT_SYMBOL_GPL(typec_register_port);
/**
* typec_unregister_port - Unregister a USB Type-C Port
* @port: The port to be unregistered
*
* Unregister device created with typec_register_port().
*/
void typec_unregister_port(struct typec_port *port)
{
if (!IS_ERR_OR_NULL(port)) {
typec_unlink_ports(port);
typec_port_set_usb_power_delivery(port, NULL);
device_unregister(&port->dev);
}
}
EXPORT_SYMBOL_GPL(typec_unregister_port);
static int __init typec_init(void)
{
int ret;
ret = bus_register(&typec_bus);
if (ret)
return ret;
ret = class_register(&typec_mux_class);
if (ret)
goto err_unregister_bus;
ret = class_register(&retimer_class);
if (ret)
goto err_unregister_mux_class;
ret = class_register(&typec_class);
if (ret)
goto err_unregister_retimer_class;
ret = usb_power_delivery_init();
if (ret)
goto err_unregister_class;
return 0;
err_unregister_class:
class_unregister(&typec_class);
err_unregister_retimer_class:
class_unregister(&retimer_class);
err_unregister_mux_class:
class_unregister(&typec_mux_class);
err_unregister_bus:
bus_unregister(&typec_bus);
return ret;
}
subsys_initcall(typec_init);
static void __exit typec_exit(void)
{
usb_power_delivery_exit();
class_unregister(&typec_class);
ida_destroy(&typec_index_ida);
bus_unregister(&typec_bus);
class_unregister(&typec_mux_class);
class_unregister(&retimer_class);
}
module_exit(typec_exit);
MODULE_AUTHOR("Heikki Krogerus <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("USB Type-C Connector Class");
| linux-master | drivers/usb/typec/class.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB Type-C Connector Class Port Mapping Utility
*
* Copyright (C) 2021, Intel Corporation
* Author: Heikki Krogerus <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/component.h>
#include "class.h"
static int typec_aggregate_bind(struct device *dev)
{
return component_bind_all(dev, NULL);
}
static void typec_aggregate_unbind(struct device *dev)
{
component_unbind_all(dev, NULL);
}
static const struct component_master_ops typec_aggregate_ops = {
.bind = typec_aggregate_bind,
.unbind = typec_aggregate_unbind,
};
struct each_port_arg {
struct typec_port *port;
struct component_match *match;
};
static int typec_port_compare(struct device *dev, void *fwnode)
{
return device_match_fwnode(dev, fwnode);
}
static int typec_port_match(struct device *dev, void *data)
{
struct acpi_device *adev = to_acpi_device(dev);
struct each_port_arg *arg = data;
struct acpi_device *con_adev;
con_adev = ACPI_COMPANION(&arg->port->dev);
if (con_adev == adev)
return 0;
if (con_adev->pld_crc == adev->pld_crc)
component_match_add(&arg->port->dev, &arg->match, typec_port_compare,
acpi_fwnode_handle(adev));
return 0;
}
int typec_link_ports(struct typec_port *con)
{
struct each_port_arg arg = { .port = con, .match = NULL };
if (!has_acpi_companion(&con->dev))
return 0;
acpi_bus_for_each_dev(typec_port_match, &arg);
if (!arg.match)
return 0;
/*
* REVISIT: Now each connector can have only a single component master.
* So far only the USB ports connected to the USB Type-C connector share
* the _PLD with it, but if there one day is something else (like maybe
* the DisplayPort ACPI device object) that also shares the _PLD with
* the connector, every one of those needs to have its own component
* master, because each different type of component needs to be bind to
* the connector independently of the other components. That requires
* improvements to the component framework. Right now you can only have
* one master per device.
*/
return component_master_add_with_match(&con->dev, &typec_aggregate_ops, arg.match);
}
void typec_unlink_ports(struct typec_port *con)
{
if (has_acpi_companion(&con->dev))
component_master_del(&con->dev, &typec_aggregate_ops);
}
| linux-master | drivers/usb/typec/port-mapper.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* TI HD3SS3220 Type-C DRP Port Controller Driver
*
* Copyright (C) 2019 Renesas Electronics Corp.
*/
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/usb/role.h>
#include <linux/irqreturn.h>
#include <linux/interrupt.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/usb/typec.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#define HD3SS3220_REG_CN_STAT_CTRL 0x09
#define HD3SS3220_REG_GEN_CTRL 0x0A
#define HD3SS3220_REG_DEV_REV 0xA0
/* Register HD3SS3220_REG_CN_STAT_CTRL*/
#define HD3SS3220_REG_CN_STAT_CTRL_ATTACHED_STATE_MASK (BIT(7) | BIT(6))
#define HD3SS3220_REG_CN_STAT_CTRL_AS_DFP BIT(6)
#define HD3SS3220_REG_CN_STAT_CTRL_AS_UFP BIT(7)
#define HD3SS3220_REG_CN_STAT_CTRL_TO_ACCESSORY (BIT(7) | BIT(6))
#define HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS BIT(4)
/* Register HD3SS3220_REG_GEN_CTRL*/
#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_MASK (BIT(2) | BIT(1))
#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT 0x00
#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SNK BIT(1)
#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SRC (BIT(2) | BIT(1))
struct hd3ss3220 {
struct device *dev;
struct regmap *regmap;
struct usb_role_switch *role_sw;
struct typec_port *port;
struct delayed_work output_poll_work;
enum usb_role role_state;
bool poll;
};
static int hd3ss3220_set_source_pref(struct hd3ss3220 *hd3ss3220, int src_pref)
{
return regmap_update_bits(hd3ss3220->regmap, HD3SS3220_REG_GEN_CTRL,
HD3SS3220_REG_GEN_CTRL_SRC_PREF_MASK,
src_pref);
}
static enum usb_role hd3ss3220_get_attached_state(struct hd3ss3220 *hd3ss3220)
{
unsigned int reg_val;
enum usb_role attached_state;
int ret;
ret = regmap_read(hd3ss3220->regmap, HD3SS3220_REG_CN_STAT_CTRL,
®_val);
if (ret < 0)
return ret;
switch (reg_val & HD3SS3220_REG_CN_STAT_CTRL_ATTACHED_STATE_MASK) {
case HD3SS3220_REG_CN_STAT_CTRL_AS_DFP:
attached_state = USB_ROLE_HOST;
break;
case HD3SS3220_REG_CN_STAT_CTRL_AS_UFP:
attached_state = USB_ROLE_DEVICE;
break;
default:
attached_state = USB_ROLE_NONE;
break;
}
return attached_state;
}
static int hd3ss3220_dr_set(struct typec_port *port, enum typec_data_role role)
{
struct hd3ss3220 *hd3ss3220 = typec_get_drvdata(port);
enum usb_role role_val;
int pref, ret = 0;
if (role == TYPEC_HOST) {
role_val = USB_ROLE_HOST;
pref = HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SRC;
} else {
role_val = USB_ROLE_DEVICE;
pref = HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SNK;
}
ret = hd3ss3220_set_source_pref(hd3ss3220, pref);
usleep_range(10, 100);
usb_role_switch_set_role(hd3ss3220->role_sw, role_val);
typec_set_data_role(hd3ss3220->port, role);
return ret;
}
static const struct typec_operations hd3ss3220_ops = {
.dr_set = hd3ss3220_dr_set
};
static void hd3ss3220_set_role(struct hd3ss3220 *hd3ss3220)
{
enum usb_role role_state = hd3ss3220_get_attached_state(hd3ss3220);
usb_role_switch_set_role(hd3ss3220->role_sw, role_state);
if (role_state == USB_ROLE_NONE)
hd3ss3220_set_source_pref(hd3ss3220,
HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT);
switch (role_state) {
case USB_ROLE_HOST:
typec_set_data_role(hd3ss3220->port, TYPEC_HOST);
break;
case USB_ROLE_DEVICE:
typec_set_data_role(hd3ss3220->port, TYPEC_DEVICE);
break;
default:
break;
}
hd3ss3220->role_state = role_state;
}
static void output_poll_execute(struct work_struct *work)
{
struct delayed_work *delayed_work = to_delayed_work(work);
struct hd3ss3220 *hd3ss3220 = container_of(delayed_work,
struct hd3ss3220,
output_poll_work);
enum usb_role role_state = hd3ss3220_get_attached_state(hd3ss3220);
if (hd3ss3220->role_state != role_state)
hd3ss3220_set_role(hd3ss3220);
schedule_delayed_work(&hd3ss3220->output_poll_work, HZ);
}
static irqreturn_t hd3ss3220_irq(struct hd3ss3220 *hd3ss3220)
{
int err;
hd3ss3220_set_role(hd3ss3220);
err = regmap_write_bits(hd3ss3220->regmap, HD3SS3220_REG_CN_STAT_CTRL,
HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS,
HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS);
if (err < 0)
return IRQ_NONE;
return IRQ_HANDLED;
}
static irqreturn_t hd3ss3220_irq_handler(int irq, void *data)
{
struct i2c_client *client = to_i2c_client(data);
struct hd3ss3220 *hd3ss3220 = i2c_get_clientdata(client);
return hd3ss3220_irq(hd3ss3220);
}
static const struct regmap_config config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0x0A,
};
static int hd3ss3220_probe(struct i2c_client *client)
{
struct typec_capability typec_cap = { };
struct hd3ss3220 *hd3ss3220;
struct fwnode_handle *connector, *ep;
int ret;
unsigned int data;
hd3ss3220 = devm_kzalloc(&client->dev, sizeof(struct hd3ss3220),
GFP_KERNEL);
if (!hd3ss3220)
return -ENOMEM;
i2c_set_clientdata(client, hd3ss3220);
hd3ss3220->dev = &client->dev;
hd3ss3220->regmap = devm_regmap_init_i2c(client, &config);
if (IS_ERR(hd3ss3220->regmap))
return PTR_ERR(hd3ss3220->regmap);
hd3ss3220_set_source_pref(hd3ss3220,
HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT);
/* For backward compatibility check the connector child node first */
connector = device_get_named_child_node(hd3ss3220->dev, "connector");
if (connector) {
hd3ss3220->role_sw = fwnode_usb_role_switch_get(connector);
} else {
ep = fwnode_graph_get_next_endpoint(dev_fwnode(hd3ss3220->dev), NULL);
if (!ep)
return -ENODEV;
connector = fwnode_graph_get_remote_port_parent(ep);
fwnode_handle_put(ep);
if (!connector)
return -ENODEV;
hd3ss3220->role_sw = usb_role_switch_get(hd3ss3220->dev);
}
if (IS_ERR(hd3ss3220->role_sw)) {
ret = PTR_ERR(hd3ss3220->role_sw);
goto err_put_fwnode;
}
typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
typec_cap.driver_data = hd3ss3220;
typec_cap.type = TYPEC_PORT_DRP;
typec_cap.data = TYPEC_PORT_DRD;
typec_cap.ops = &hd3ss3220_ops;
typec_cap.fwnode = connector;
hd3ss3220->port = typec_register_port(&client->dev, &typec_cap);
if (IS_ERR(hd3ss3220->port)) {
ret = PTR_ERR(hd3ss3220->port);
goto err_put_role;
}
hd3ss3220_set_role(hd3ss3220);
ret = regmap_read(hd3ss3220->regmap, HD3SS3220_REG_CN_STAT_CTRL, &data);
if (ret < 0)
goto err_unreg_port;
if (data & HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS) {
ret = regmap_write(hd3ss3220->regmap,
HD3SS3220_REG_CN_STAT_CTRL,
data | HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS);
if (ret < 0)
goto err_unreg_port;
}
if (client->irq > 0) {
ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
hd3ss3220_irq_handler,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"hd3ss3220", &client->dev);
if (ret)
goto err_unreg_port;
} else {
INIT_DELAYED_WORK(&hd3ss3220->output_poll_work, output_poll_execute);
hd3ss3220->poll = true;
}
ret = i2c_smbus_read_byte_data(client, HD3SS3220_REG_DEV_REV);
if (ret < 0)
goto err_unreg_port;
fwnode_handle_put(connector);
if (hd3ss3220->poll)
schedule_delayed_work(&hd3ss3220->output_poll_work, HZ);
dev_info(&client->dev, "probed revision=0x%x\n", ret);
return 0;
err_unreg_port:
typec_unregister_port(hd3ss3220->port);
err_put_role:
usb_role_switch_put(hd3ss3220->role_sw);
err_put_fwnode:
fwnode_handle_put(connector);
return ret;
}
static void hd3ss3220_remove(struct i2c_client *client)
{
struct hd3ss3220 *hd3ss3220 = i2c_get_clientdata(client);
if (hd3ss3220->poll)
cancel_delayed_work_sync(&hd3ss3220->output_poll_work);
typec_unregister_port(hd3ss3220->port);
usb_role_switch_put(hd3ss3220->role_sw);
}
static const struct of_device_id dev_ids[] = {
{ .compatible = "ti,hd3ss3220"},
{}
};
MODULE_DEVICE_TABLE(of, dev_ids);
static struct i2c_driver hd3ss3220_driver = {
.driver = {
.name = "hd3ss3220",
.of_match_table = dev_ids,
},
.probe = hd3ss3220_probe,
.remove = hd3ss3220_remove,
};
module_i2c_driver(hd3ss3220_driver);
MODULE_AUTHOR("Biju Das <[email protected]>");
MODULE_DESCRIPTION("TI HD3SS3220 DRP Port Controller Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/typec/hd3ss3220.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Bus for USB Type-C Alternate Modes
*
* Copyright (C) 2018 Intel Corporation
* Author: Heikki Krogerus <[email protected]>
*/
#include <linux/usb/pd_vdo.h>
#include "bus.h"
#include "class.h"
#include "mux.h"
#include "retimer.h"
static inline int
typec_altmode_set_retimer(struct altmode *alt, unsigned long conf, void *data)
{
struct typec_retimer_state state;
if (!alt->retimer)
return 0;
state.alt = &alt->adev;
state.mode = conf;
state.data = data;
return typec_retimer_set(alt->retimer, &state);
}
static inline int
typec_altmode_set_mux(struct altmode *alt, unsigned long conf, void *data)
{
struct typec_mux_state state;
if (!alt->mux)
return 0;
state.alt = &alt->adev;
state.mode = conf;
state.data = data;
return typec_mux_set(alt->mux, &state);
}
/* Wrapper to set various Type-C port switches together. */
static inline int
typec_altmode_set_switches(struct altmode *alt, unsigned long conf, void *data)
{
int ret;
ret = typec_altmode_set_retimer(alt, conf, data);
if (ret)
return ret;
return typec_altmode_set_mux(alt, conf, data);
}
static int typec_altmode_set_state(struct typec_altmode *adev,
unsigned long conf, void *data)
{
bool is_port = is_typec_port(adev->dev.parent);
struct altmode *port_altmode;
port_altmode = is_port ? to_altmode(adev) : to_altmode(adev)->partner;
return typec_altmode_set_switches(port_altmode, conf, data);
}
/* -------------------------------------------------------------------------- */
/* Common API */
/**
* typec_altmode_notify - Communication between the OS and alternate mode driver
* @adev: Handle to the alternate mode
* @conf: Alternate mode specific configuration value
* @data: Alternate mode specific data
*
* The primary purpose for this function is to allow the alternate mode drivers
* to tell which pin configuration has been negotiated with the partner. That
* information will then be used for example to configure the muxes.
* Communication to the other direction is also possible, and low level device
* drivers can also send notifications to the alternate mode drivers. The actual
* communication will be specific for every SVID.
*/
int typec_altmode_notify(struct typec_altmode *adev,
unsigned long conf, void *data)
{
bool is_port;
struct altmode *altmode;
struct altmode *partner;
int ret;
if (!adev)
return 0;
altmode = to_altmode(adev);
if (!altmode->partner)
return -ENODEV;
is_port = is_typec_port(adev->dev.parent);
partner = altmode->partner;
ret = typec_altmode_set_switches(is_port ? altmode : partner, conf, data);
if (ret)
return ret;
if (partner->adev.ops && partner->adev.ops->notify)
return partner->adev.ops->notify(&partner->adev, conf, data);
return 0;
}
EXPORT_SYMBOL_GPL(typec_altmode_notify);
/**
* typec_altmode_enter - Enter Mode
* @adev: The alternate mode
* @vdo: VDO for the Enter Mode command
*
* The alternate mode drivers use this function to enter mode. The port drivers
* use this to inform the alternate mode drivers that the partner has initiated
* Enter Mode command. If the alternate mode does not require VDO, @vdo must be
* NULL.
*/
int typec_altmode_enter(struct typec_altmode *adev, u32 *vdo)
{
struct altmode *partner = to_altmode(adev)->partner;
struct typec_altmode *pdev = &partner->adev;
int ret;
if (!adev || adev->active)
return 0;
if (!pdev->ops || !pdev->ops->enter)
return -EOPNOTSUPP;
if (is_typec_port(pdev->dev.parent) && !pdev->active)
return -EPERM;
/* Moving to USB Safe State */
ret = typec_altmode_set_state(adev, TYPEC_STATE_SAFE, NULL);
if (ret)
return ret;
/* Enter Mode */
return pdev->ops->enter(pdev, vdo);
}
EXPORT_SYMBOL_GPL(typec_altmode_enter);
/**
* typec_altmode_exit - Exit Mode
* @adev: The alternate mode
*
* The partner of @adev has initiated Exit Mode command.
*/
int typec_altmode_exit(struct typec_altmode *adev)
{
struct altmode *partner = to_altmode(adev)->partner;
struct typec_altmode *pdev = &partner->adev;
int ret;
if (!adev || !adev->active)
return 0;
if (!pdev->ops || !pdev->ops->exit)
return -EOPNOTSUPP;
/* Moving to USB Safe State */
ret = typec_altmode_set_state(adev, TYPEC_STATE_SAFE, NULL);
if (ret)
return ret;
/* Exit Mode command */
return pdev->ops->exit(pdev);
}
EXPORT_SYMBOL_GPL(typec_altmode_exit);
/**
* typec_altmode_attention - Attention command
* @adev: The alternate mode
* @vdo: VDO for the Attention command
*
* Notifies the partner of @adev about Attention command.
*/
int typec_altmode_attention(struct typec_altmode *adev, u32 vdo)
{
struct altmode *partner = to_altmode(adev)->partner;
struct typec_altmode *pdev;
if (!partner)
return -ENODEV;
pdev = &partner->adev;
if (pdev->ops && pdev->ops->attention)
pdev->ops->attention(pdev, vdo);
return 0;
}
EXPORT_SYMBOL_GPL(typec_altmode_attention);
/**
* typec_altmode_vdm - Send Vendor Defined Messages (VDM) to the partner
* @adev: Alternate mode handle
* @header: VDM Header
* @vdo: Array of Vendor Defined Data Objects
* @count: Number of Data Objects
*
* The alternate mode drivers use this function for SVID specific communication
* with the partner. The port drivers use it to deliver the Structured VDMs
* received from the partners to the alternate mode drivers.
*/
int typec_altmode_vdm(struct typec_altmode *adev,
const u32 header, const u32 *vdo, int count)
{
struct typec_altmode *pdev;
struct altmode *altmode;
if (!adev)
return 0;
altmode = to_altmode(adev);
if (!altmode->partner)
return -ENODEV;
pdev = &altmode->partner->adev;
if (!pdev->ops || !pdev->ops->vdm)
return -EOPNOTSUPP;
return pdev->ops->vdm(pdev, header, vdo, count);
}
EXPORT_SYMBOL_GPL(typec_altmode_vdm);
const struct typec_altmode *
typec_altmode_get_partner(struct typec_altmode *adev)
{
if (!adev || !to_altmode(adev)->partner)
return NULL;
return &to_altmode(adev)->partner->adev;
}
EXPORT_SYMBOL_GPL(typec_altmode_get_partner);
/* -------------------------------------------------------------------------- */
/* API for the alternate mode drivers */
/**
* typec_altmode_get_plug - Find cable plug alternate mode
* @adev: Handle to partner alternate mode
* @index: Cable plug index
*
* Increment reference count for cable plug alternate mode device. Returns
* handle to the cable plug alternate mode, or NULL if none is found.
*/
struct typec_altmode *typec_altmode_get_plug(struct typec_altmode *adev,
enum typec_plug_index index)
{
struct altmode *port = to_altmode(adev)->partner;
if (port->plug[index]) {
get_device(&port->plug[index]->adev.dev);
return &port->plug[index]->adev;
}
return NULL;
}
EXPORT_SYMBOL_GPL(typec_altmode_get_plug);
/**
* typec_altmode_put_plug - Decrement cable plug alternate mode reference count
* @plug: Handle to the cable plug alternate mode
*/
void typec_altmode_put_plug(struct typec_altmode *plug)
{
if (plug)
put_device(&plug->dev);
}
EXPORT_SYMBOL_GPL(typec_altmode_put_plug);
int __typec_altmode_register_driver(struct typec_altmode_driver *drv,
struct module *module)
{
if (!drv->probe)
return -EINVAL;
drv->driver.owner = module;
drv->driver.bus = &typec_bus;
return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(__typec_altmode_register_driver);
void typec_altmode_unregister_driver(struct typec_altmode_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL_GPL(typec_altmode_unregister_driver);
/* -------------------------------------------------------------------------- */
/* API for the port drivers */
/**
* typec_match_altmode - Match SVID and mode to an array of alternate modes
* @altmodes: Array of alternate modes
* @n: Number of elements in the array, or -1 for NULL terminated arrays
* @svid: Standard or Vendor ID to match with
* @mode: Mode to match with
*
* Return pointer to an alternate mode with SVID matching @svid, or NULL when no
* match is found.
*/
struct typec_altmode *typec_match_altmode(struct typec_altmode **altmodes,
size_t n, u16 svid, u8 mode)
{
int i;
for (i = 0; i < n; i++) {
if (!altmodes[i])
break;
if (altmodes[i]->svid == svid && altmodes[i]->mode == mode)
return altmodes[i];
}
return NULL;
}
EXPORT_SYMBOL_GPL(typec_match_altmode);
/* -------------------------------------------------------------------------- */
static ssize_t
description_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct typec_altmode *alt = to_typec_altmode(dev);
return sprintf(buf, "%s\n", alt->desc ? alt->desc : "");
}
static DEVICE_ATTR_RO(description);
static struct attribute *typec_attrs[] = {
&dev_attr_description.attr,
NULL
};
ATTRIBUTE_GROUPS(typec);
static int typec_match(struct device *dev, struct device_driver *driver)
{
struct typec_altmode_driver *drv = to_altmode_driver(driver);
struct typec_altmode *altmode = to_typec_altmode(dev);
const struct typec_device_id *id;
for (id = drv->id_table; id->svid; id++)
if (id->svid == altmode->svid &&
(id->mode == TYPEC_ANY_MODE || id->mode == altmode->mode))
return 1;
return 0;
}
static int typec_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct typec_altmode *altmode = to_typec_altmode(dev);
if (add_uevent_var(env, "SVID=%04X", altmode->svid))
return -ENOMEM;
if (add_uevent_var(env, "MODE=%u", altmode->mode))
return -ENOMEM;
return add_uevent_var(env, "MODALIAS=typec:id%04Xm%02X",
altmode->svid, altmode->mode);
}
static int typec_altmode_create_links(struct altmode *alt)
{
struct device *port_dev = &alt->partner->adev.dev;
struct device *dev = &alt->adev.dev;
int err;
err = sysfs_create_link(&dev->kobj, &port_dev->kobj, "port");
if (err)
return err;
err = sysfs_create_link(&port_dev->kobj, &dev->kobj, "partner");
if (err)
sysfs_remove_link(&dev->kobj, "port");
return err;
}
static void typec_altmode_remove_links(struct altmode *alt)
{
sysfs_remove_link(&alt->partner->adev.dev.kobj, "partner");
sysfs_remove_link(&alt->adev.dev.kobj, "port");
}
static int typec_probe(struct device *dev)
{
struct typec_altmode_driver *drv = to_altmode_driver(dev->driver);
struct typec_altmode *adev = to_typec_altmode(dev);
struct altmode *altmode = to_altmode(adev);
int ret;
/* Fail if the port does not support the alternate mode */
if (!altmode->partner)
return -ENODEV;
ret = typec_altmode_create_links(altmode);
if (ret) {
dev_warn(dev, "failed to create symlinks\n");
return ret;
}
ret = drv->probe(adev);
if (ret)
typec_altmode_remove_links(altmode);
return ret;
}
static void typec_remove(struct device *dev)
{
struct typec_altmode_driver *drv = to_altmode_driver(dev->driver);
struct typec_altmode *adev = to_typec_altmode(dev);
struct altmode *altmode = to_altmode(adev);
typec_altmode_remove_links(altmode);
if (drv->remove)
drv->remove(to_typec_altmode(dev));
if (adev->active) {
WARN_ON(typec_altmode_set_state(adev, TYPEC_STATE_SAFE, NULL));
typec_altmode_update_active(adev, false);
}
adev->desc = NULL;
adev->ops = NULL;
}
const struct bus_type typec_bus = {
.name = "typec",
.dev_groups = typec_groups,
.match = typec_match,
.uevent = typec_uevent,
.probe = typec_probe,
.remove = typec_remove,
};
| linux-master | drivers/usb/typec/bus.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB Type-C Multiplexer/DeMultiplexer Switch support
*
* Copyright (C) 2018 Intel Corporation
* Author: Heikki Krogerus <[email protected]>
* Hans de Goede <[email protected]>
*/
#include <linux/device.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/slab.h>
#include "class.h"
#include "mux.h"
#define TYPEC_MUX_MAX_DEVS 3
struct typec_switch {
struct typec_switch_dev *sw_devs[TYPEC_MUX_MAX_DEVS];
unsigned int num_sw_devs;
};
static int switch_fwnode_match(struct device *dev, const void *fwnode)
{
if (!is_typec_switch_dev(dev))
return 0;
return device_match_fwnode(dev, fwnode);
}
static void *typec_switch_match(const struct fwnode_handle *fwnode,
const char *id, void *data)
{
struct device *dev;
/*
* Device graph (OF graph) does not give any means to identify the
* device type or the device class of the remote port parent that @fwnode
* represents, so in order to identify the type or the class of @fwnode
* an additional device property is needed. With typec switches the
* property is named "orientation-switch" (@id). The value of the device
* property is ignored.
*/
if (id && !fwnode_property_present(fwnode, id))
return NULL;
/*
* At this point we are sure that @fwnode is a typec switch in all
* cases. If the switch hasn't yet been registered for some reason, the
* function "defers probe" for now.
*/
dev = class_find_device(&typec_mux_class, NULL, fwnode,
switch_fwnode_match);
return dev ? to_typec_switch_dev(dev) : ERR_PTR(-EPROBE_DEFER);
}
/**
* fwnode_typec_switch_get - Find USB Type-C orientation switch
* @fwnode: The caller device node
*
* Finds a switch linked with @dev. Returns a reference to the switch on
* success, NULL if no matching connection was found, or
* ERR_PTR(-EPROBE_DEFER) when a connection was found but the switch
* has not been enumerated yet.
*/
struct typec_switch *fwnode_typec_switch_get(struct fwnode_handle *fwnode)
{
struct typec_switch_dev *sw_devs[TYPEC_MUX_MAX_DEVS];
struct typec_switch *sw;
int count;
int err;
int i;
sw = kzalloc(sizeof(*sw), GFP_KERNEL);
if (!sw)
return ERR_PTR(-ENOMEM);
count = fwnode_connection_find_matches(fwnode, "orientation-switch", NULL,
typec_switch_match,
(void **)sw_devs,
ARRAY_SIZE(sw_devs));
if (count <= 0) {
kfree(sw);
return NULL;
}
for (i = 0; i < count; i++) {
if (IS_ERR(sw_devs[i])) {
err = PTR_ERR(sw_devs[i]);
goto put_sw_devs;
}
}
for (i = 0; i < count; i++) {
WARN_ON(!try_module_get(sw_devs[i]->dev.parent->driver->owner));
sw->sw_devs[i] = sw_devs[i];
}
sw->num_sw_devs = count;
return sw;
put_sw_devs:
for (i = 0; i < count; i++) {
if (!IS_ERR(sw_devs[i]))
put_device(&sw_devs[i]->dev);
}
kfree(sw);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(fwnode_typec_switch_get);
/**
* typec_switch_put - Release USB Type-C orientation switch
* @sw: USB Type-C orientation switch
*
* Decrement reference count for @sw.
*/
void typec_switch_put(struct typec_switch *sw)
{
struct typec_switch_dev *sw_dev;
unsigned int i;
if (IS_ERR_OR_NULL(sw))
return;
for (i = 0; i < sw->num_sw_devs; i++) {
sw_dev = sw->sw_devs[i];
module_put(sw_dev->dev.parent->driver->owner);
put_device(&sw_dev->dev);
}
kfree(sw);
}
EXPORT_SYMBOL_GPL(typec_switch_put);
static void typec_switch_release(struct device *dev)
{
kfree(to_typec_switch_dev(dev));
}
const struct device_type typec_switch_dev_type = {
.name = "orientation_switch",
.release = typec_switch_release,
};
/**
* typec_switch_register - Register USB Type-C orientation switch
* @parent: Parent device
* @desc: Orientation switch description
*
* This function registers a switch that can be used for routing the correct
* data pairs depending on the cable plug orientation from the USB Type-C
* connector to the USB controllers. USB Type-C plugs can be inserted
* right-side-up or upside-down.
*/
struct typec_switch_dev *
typec_switch_register(struct device *parent,
const struct typec_switch_desc *desc)
{
struct typec_switch_dev *sw_dev;
int ret;
if (!desc || !desc->set)
return ERR_PTR(-EINVAL);
sw_dev = kzalloc(sizeof(*sw_dev), GFP_KERNEL);
if (!sw_dev)
return ERR_PTR(-ENOMEM);
sw_dev->set = desc->set;
device_initialize(&sw_dev->dev);
sw_dev->dev.parent = parent;
sw_dev->dev.fwnode = desc->fwnode;
sw_dev->dev.class = &typec_mux_class;
sw_dev->dev.type = &typec_switch_dev_type;
sw_dev->dev.driver_data = desc->drvdata;
ret = dev_set_name(&sw_dev->dev, "%s-switch", desc->name ? desc->name : dev_name(parent));
if (ret) {
put_device(&sw_dev->dev);
return ERR_PTR(ret);
}
ret = device_add(&sw_dev->dev);
if (ret) {
dev_err(parent, "failed to register switch (%d)\n", ret);
put_device(&sw_dev->dev);
return ERR_PTR(ret);
}
return sw_dev;
}
EXPORT_SYMBOL_GPL(typec_switch_register);
int typec_switch_set(struct typec_switch *sw,
enum typec_orientation orientation)
{
struct typec_switch_dev *sw_dev;
unsigned int i;
int ret;
if (IS_ERR_OR_NULL(sw))
return 0;
for (i = 0; i < sw->num_sw_devs; i++) {
sw_dev = sw->sw_devs[i];
ret = sw_dev->set(sw_dev, orientation);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(typec_switch_set);
/**
* typec_switch_unregister - Unregister USB Type-C orientation switch
* @sw_dev: USB Type-C orientation switch
*
* Unregister switch that was registered with typec_switch_register().
*/
void typec_switch_unregister(struct typec_switch_dev *sw_dev)
{
if (!IS_ERR_OR_NULL(sw_dev))
device_unregister(&sw_dev->dev);
}
EXPORT_SYMBOL_GPL(typec_switch_unregister);
void typec_switch_set_drvdata(struct typec_switch_dev *sw_dev, void *data)
{
dev_set_drvdata(&sw_dev->dev, data);
}
EXPORT_SYMBOL_GPL(typec_switch_set_drvdata);
void *typec_switch_get_drvdata(struct typec_switch_dev *sw_dev)
{
return dev_get_drvdata(&sw_dev->dev);
}
EXPORT_SYMBOL_GPL(typec_switch_get_drvdata);
/* ------------------------------------------------------------------------- */
struct typec_mux {
struct typec_mux_dev *mux_devs[TYPEC_MUX_MAX_DEVS];
unsigned int num_mux_devs;
};
static int mux_fwnode_match(struct device *dev, const void *fwnode)
{
if (!is_typec_mux_dev(dev))
return 0;
return device_match_fwnode(dev, fwnode);
}
static void *typec_mux_match(const struct fwnode_handle *fwnode,
const char *id, void *data)
{
struct device *dev;
/*
* Device graph (OF graph) does not give any means to identify the
* device type or the device class of the remote port parent that @fwnode
* represents, so in order to identify the type or the class of @fwnode
* an additional device property is needed. With typec muxes the
* property is named "mode-switch" (@id). The value of the device
* property is ignored.
*/
if (id && !fwnode_property_present(fwnode, id))
return NULL;
dev = class_find_device(&typec_mux_class, NULL, fwnode,
mux_fwnode_match);
return dev ? to_typec_mux_dev(dev) : ERR_PTR(-EPROBE_DEFER);
}
/**
* fwnode_typec_mux_get - Find USB Type-C Multiplexer
* @fwnode: The caller device node
*
* Finds a mux linked to the caller. This function is primarily meant for the
* Type-C drivers. Returns a reference to the mux on success, NULL if no
* matching connection was found, or ERR_PTR(-EPROBE_DEFER) when a connection
* was found but the mux has not been enumerated yet.
*/
struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode)
{
struct typec_mux_dev *mux_devs[TYPEC_MUX_MAX_DEVS];
struct typec_mux *mux;
int count;
int err;
int i;
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
return ERR_PTR(-ENOMEM);
count = fwnode_connection_find_matches(fwnode, "mode-switch",
NULL, typec_mux_match,
(void **)mux_devs,
ARRAY_SIZE(mux_devs));
if (count <= 0) {
kfree(mux);
return NULL;
}
for (i = 0; i < count; i++) {
if (IS_ERR(mux_devs[i])) {
err = PTR_ERR(mux_devs[i]);
goto put_mux_devs;
}
}
for (i = 0; i < count; i++) {
WARN_ON(!try_module_get(mux_devs[i]->dev.parent->driver->owner));
mux->mux_devs[i] = mux_devs[i];
}
mux->num_mux_devs = count;
return mux;
put_mux_devs:
for (i = 0; i < count; i++) {
if (!IS_ERR(mux_devs[i]))
put_device(&mux_devs[i]->dev);
}
kfree(mux);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(fwnode_typec_mux_get);
/**
* typec_mux_put - Release handle to a Multiplexer
* @mux: USB Type-C Connector Multiplexer/DeMultiplexer
*
* Decrements reference count for @mux.
*/
void typec_mux_put(struct typec_mux *mux)
{
struct typec_mux_dev *mux_dev;
unsigned int i;
if (IS_ERR_OR_NULL(mux))
return;
for (i = 0; i < mux->num_mux_devs; i++) {
mux_dev = mux->mux_devs[i];
module_put(mux_dev->dev.parent->driver->owner);
put_device(&mux_dev->dev);
}
kfree(mux);
}
EXPORT_SYMBOL_GPL(typec_mux_put);
int typec_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
{
struct typec_mux_dev *mux_dev;
unsigned int i;
int ret;
if (IS_ERR_OR_NULL(mux))
return 0;
for (i = 0; i < mux->num_mux_devs; i++) {
mux_dev = mux->mux_devs[i];
ret = mux_dev->set(mux_dev, state);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(typec_mux_set);
static void typec_mux_release(struct device *dev)
{
kfree(to_typec_mux_dev(dev));
}
const struct device_type typec_mux_dev_type = {
.name = "mode_switch",
.release = typec_mux_release,
};
/**
* typec_mux_register - Register Multiplexer routing USB Type-C pins
* @parent: Parent device
* @desc: Multiplexer description
*
* USB Type-C connectors can be used for alternate modes of operation besides
* USB when Accessory/Alternate Modes are supported. With some of those modes,
* the pins on the connector need to be reconfigured. This function registers
* multiplexer switches routing the pins on the connector.
*/
struct typec_mux_dev *
typec_mux_register(struct device *parent, const struct typec_mux_desc *desc)
{
struct typec_mux_dev *mux_dev;
int ret;
if (!desc || !desc->set)
return ERR_PTR(-EINVAL);
mux_dev = kzalloc(sizeof(*mux_dev), GFP_KERNEL);
if (!mux_dev)
return ERR_PTR(-ENOMEM);
mux_dev->set = desc->set;
device_initialize(&mux_dev->dev);
mux_dev->dev.parent = parent;
mux_dev->dev.fwnode = desc->fwnode;
mux_dev->dev.class = &typec_mux_class;
mux_dev->dev.type = &typec_mux_dev_type;
mux_dev->dev.driver_data = desc->drvdata;
ret = dev_set_name(&mux_dev->dev, "%s-mux", desc->name ? desc->name : dev_name(parent));
if (ret) {
put_device(&mux_dev->dev);
return ERR_PTR(ret);
}
ret = device_add(&mux_dev->dev);
if (ret) {
dev_err(parent, "failed to register mux (%d)\n", ret);
put_device(&mux_dev->dev);
return ERR_PTR(ret);
}
return mux_dev;
}
EXPORT_SYMBOL_GPL(typec_mux_register);
/**
* typec_mux_unregister - Unregister Multiplexer Switch
* @mux_dev: USB Type-C Connector Multiplexer/DeMultiplexer
*
* Unregister mux that was registered with typec_mux_register().
*/
void typec_mux_unregister(struct typec_mux_dev *mux_dev)
{
if (!IS_ERR_OR_NULL(mux_dev))
device_unregister(&mux_dev->dev);
}
EXPORT_SYMBOL_GPL(typec_mux_unregister);
void typec_mux_set_drvdata(struct typec_mux_dev *mux_dev, void *data)
{
dev_set_drvdata(&mux_dev->dev, data);
}
EXPORT_SYMBOL_GPL(typec_mux_set_drvdata);
void *typec_mux_get_drvdata(struct typec_mux_dev *mux_dev)
{
return dev_get_drvdata(&mux_dev->dev);
}
EXPORT_SYMBOL_GPL(typec_mux_get_drvdata);
struct class typec_mux_class = {
.name = "typec_mux",
};
| linux-master | drivers/usb/typec/mux.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bitfield.h>
#include <linux/completion.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/power_supply.h>
#include <linux/regmap.h>
#include <linux/usb/pd.h>
#include <linux/usb/role.h>
#include <linux/usb/typec.h>
#define RT1719_REG_TXCTRL1 0x03
#define RT1719_REG_TXCTRL2 0x04
#define RT1719_REG_POLICYINFO 0x0E
#define RT1719_REG_SRCPDO1 0x11
#define RT1719_REG_MASKS 0x2D
#define RT1719_REG_EVENTS 0x33
#define RT1719_REG_STATS 0x37
#define RT1719_REG_PSELINFO 0x3C
#define RT1719_REG_USBSETINFO 0x3E
#define RT1719_REG_VENID 0x82
#define RT1719_UNIQUE_PID 0x1719
#define RT1719_REQDRSWAP_MASK BIT(7)
#define RT1719_EVALMODE_MASK BIT(4)
#define RT1719_REQSRCPDO_MASK GENMASK(2, 0)
#define RT1719_TXSPDOREQ_MASK BIT(7)
#define RT1719_INT_DRSW_ACCEPT BIT(23)
#define RT1719_INT_RX_SRCCAP BIT(21)
#define RT1719_INT_VBUS_DCT BIT(6)
#define RT1719_INT_VBUS_PRESENT BIT(5)
#define RT1719_INT_PE_SNK_RDY BIT(2)
#define RT1719_CC1_STAT GENMASK(9, 8)
#define RT1719_CC2_STAT GENMASK(11, 10)
#define RT1719_POLARITY_MASK BIT(23)
#define RT1719_DATAROLE_MASK BIT(22)
#define RT1719_PDSPECREV_MASK GENMASK(21, 20)
#define RT1719_SPDOSEL_MASK GENMASK(18, 16)
#define RT1719_SPDONUM_MASK GENMASK(15, 13)
#define RT1719_ATTACH_VBUS BIT(12)
#define RT1719_ATTACH_DBG BIT(10)
#define RT1719_ATTACH_SNK BIT(9)
#define RT1719_ATTACHDEV_MASK (RT1719_ATTACH_VBUS | RT1719_ATTACH_DBG | \
RT1719_ATTACH_SNK)
#define RT1719_PE_EXP_CONTRACT BIT(2)
#define RT1719_PSEL_SUPPORT BIT(15)
#define RT1719_TBLSEL_MASK BIT(6)
#define RT1719_LATPSEL_MASK GENMASK(5, 0)
#define RT1719_USBINFO_MASK GENMASK(1, 0)
#define RT1719_USB_DFPUFP 3
#define RT1719_MAX_SRCPDO 7
enum {
SNK_PWR_OPEN = 0,
SNK_PWR_DEF,
SNK_PWR_1P5A,
SNK_PWR_3A
};
enum {
USBPD_SPECREV_1_0 = 0,
USBPD_SPECREV_2_0,
USBPD_SPECREV_3_0
};
enum rt1719_snkcap {
RT1719_SNKCAP_5V = 0,
RT1719_SNKCAP_9V,
RT1719_SNKCAP_12V,
RT1719_SNKCAP_15V,
RT1719_SNKCAP_20V,
RT1719_MAX_SNKCAP
};
struct rt1719_psel_cap {
u8 lomask;
u8 himask;
u32 milliwatt;
u32 milliamp;
};
struct rt1719_data {
struct device *dev;
struct regmap *regmap;
struct typec_port *port;
struct usb_role_switch *role_sw;
struct power_supply *psy;
struct typec_partner *partner;
struct power_supply_desc psy_desc;
struct usb_pd_identity partner_ident;
struct typec_partner_desc partner_desc;
struct completion req_completion;
enum power_supply_usb_type usb_type;
bool attached;
bool pd_capable;
bool drswap_support;
u32 voltage;
u32 req_voltage;
u32 max_current;
u32 op_current;
u32 spdos[RT1719_MAX_SRCPDO];
u16 snkcaps[RT1719_MAX_SNKCAP];
int spdo_num;
int spdo_sel;
u32 conn_info;
u16 conn_stat;
};
static const enum power_supply_usb_type rt1719_psy_usb_types[] = {
POWER_SUPPLY_USB_TYPE_C,
POWER_SUPPLY_USB_TYPE_PD,
POWER_SUPPLY_USB_TYPE_PD_PPS
};
static const enum power_supply_property rt1719_psy_properties[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_USB_TYPE,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_MAX,
POWER_SUPPLY_PROP_CURRENT_NOW
};
static int rt1719_read16(struct rt1719_data *data, unsigned int reg, u16 *val)
{
__le16 regval;
int ret;
ret = regmap_raw_read(data->regmap, reg, ®val, sizeof(regval));
if (ret)
return ret;
*val = le16_to_cpu(regval);
return 0;
}
static int rt1719_read32(struct rt1719_data *data, unsigned int reg, u32 *val)
{
__le32 regval;
int ret;
ret = regmap_raw_read(data->regmap, reg, ®val, sizeof(regval));
if (ret)
return ret;
*val = le32_to_cpu(regval);
return 0;
}
static int rt1719_write32(struct rt1719_data *data, unsigned int reg, u32 val)
{
__le32 regval = cpu_to_le32(val);
return regmap_raw_write(data->regmap, reg, ®val, sizeof(regval));
}
static enum typec_pwr_opmode rt1719_get_pwr_opmode(u32 conn, u16 stat)
{
u16 cc1, cc2, cc_stat;
cc1 = FIELD_GET(RT1719_CC1_STAT, stat);
cc2 = FIELD_GET(RT1719_CC2_STAT, stat);
if (conn & RT1719_ATTACH_SNK) {
if (conn & RT1719_POLARITY_MASK)
cc_stat = cc2;
else
cc_stat = cc1;
switch (cc_stat) {
case SNK_PWR_3A:
return TYPEC_PWR_MODE_3_0A;
case SNK_PWR_1P5A:
return TYPEC_PWR_MODE_1_5A;
}
} else if (conn & RT1719_ATTACH_DBG) {
if ((cc1 == SNK_PWR_1P5A && cc2 == SNK_PWR_DEF) ||
(cc1 == SNK_PWR_DEF && cc2 == SNK_PWR_1P5A))
return TYPEC_PWR_MODE_1_5A;
else if ((cc1 == SNK_PWR_3A && cc2 == SNK_PWR_DEF) ||
(cc1 == SNK_PWR_DEF && cc2 == SNK_PWR_3A))
return TYPEC_PWR_MODE_3_0A;
}
return TYPEC_PWR_MODE_USB;
}
static enum typec_data_role rt1719_get_data_role(u32 conn)
{
if (conn & RT1719_DATAROLE_MASK)
return TYPEC_HOST;
return TYPEC_DEVICE;
}
static void rt1719_set_data_role(struct rt1719_data *data,
enum typec_data_role data_role,
bool attached)
{
enum usb_role usb_role = USB_ROLE_NONE;
if (attached) {
if (data_role == TYPEC_HOST)
usb_role = USB_ROLE_HOST;
else
usb_role = USB_ROLE_DEVICE;
}
usb_role_switch_set_role(data->role_sw, usb_role);
typec_set_data_role(data->port, data_role);
}
static void rt1719_update_data_role(struct rt1719_data *data)
{
if (!data->attached)
return;
rt1719_set_data_role(data, rt1719_get_data_role(data->conn_info), true);
}
static void rt1719_register_partner(struct rt1719_data *data)
{
u16 spec_rev = 0;
if (data->pd_capable) {
u32 rev;
rev = FIELD_GET(RT1719_PDSPECREV_MASK, data->conn_info);
switch (rev) {
case USBPD_SPECREV_3_0:
spec_rev = 0x0300;
break;
case USBPD_SPECREV_2_0:
spec_rev = 0x0200;
break;
default:
spec_rev = 0x0100;
break;
}
}
/* Just to prevent multiple times attach */
if (data->partner)
typec_unregister_partner(data->partner);
memset(&data->partner_ident, 0, sizeof(data->partner_ident));
data->partner_desc.usb_pd = data->pd_capable;
data->partner_desc.pd_revision = spec_rev;
if (data->conn_info & RT1719_ATTACH_DBG)
data->partner_desc.accessory = TYPEC_ACCESSORY_DEBUG;
else
data->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
data->partner = typec_register_partner(data->port, &data->partner_desc);
}
static void rt1719_attach(struct rt1719_data *data)
{
enum typec_pwr_opmode pwr_opmode;
enum typec_data_role data_role;
u32 volt = 5000, curr = 500;
if (!(data->conn_info & RT1719_ATTACHDEV_MASK))
return;
pwr_opmode = rt1719_get_pwr_opmode(data->conn_info, data->conn_stat);
data_role = rt1719_get_data_role(data->conn_info);
typec_set_pwr_opmode(data->port, pwr_opmode);
rt1719_set_data_role(data, data_role, true);
if (data->conn_info & RT1719_ATTACH_SNK)
rt1719_register_partner(data);
if (pwr_opmode == TYPEC_PWR_MODE_3_0A)
curr = 3000;
else if (pwr_opmode == TYPEC_PWR_MODE_1_5A)
curr = 1500;
data->voltage = volt * 1000;
data->max_current = data->op_current = curr * 1000;
data->attached = true;
power_supply_changed(data->psy);
}
static void rt1719_detach(struct rt1719_data *data)
{
if (!data->attached || (data->conn_info & RT1719_ATTACHDEV_MASK))
return;
typec_unregister_partner(data->partner);
data->partner = NULL;
typec_set_pwr_opmode(data->port, TYPEC_PWR_MODE_USB);
rt1719_set_data_role(data, TYPEC_DEVICE, false);
memset32(data->spdos, 0, RT1719_MAX_SRCPDO);
data->spdo_num = 0;
data->voltage = data->max_current = data->op_current = 0;
data->attached = data->pd_capable = false;
data->usb_type = POWER_SUPPLY_USB_TYPE_C;
power_supply_changed(data->psy);
}
static void rt1719_update_operating_status(struct rt1719_data *data)
{
enum power_supply_usb_type usb_type = POWER_SUPPLY_USB_TYPE_PD;
u32 voltage, max_current, op_current;
int i, snk_sel;
for (i = 0; i < data->spdo_num; i++) {
u32 pdo = data->spdos[i];
enum pd_pdo_type type = pdo_type(pdo);
if (type == PDO_TYPE_APDO) {
usb_type = POWER_SUPPLY_USB_TYPE_PD_PPS;
break;
}
}
data->spdo_sel = FIELD_GET(RT1719_SPDOSEL_MASK, data->conn_info);
if (data->spdo_sel <= 0)
return;
data->usb_type = usb_type;
voltage = pdo_fixed_voltage(data->spdos[data->spdo_sel - 1]);
max_current = pdo_max_current(data->spdos[data->spdo_sel - 1]);
switch (voltage) {
case 5000:
snk_sel = RT1719_SNKCAP_5V;
break;
case 9000:
snk_sel = RT1719_SNKCAP_9V;
break;
case 12000:
snk_sel = RT1719_SNKCAP_12V;
break;
case 15000:
snk_sel = RT1719_SNKCAP_15V;
break;
case 20000:
snk_sel = RT1719_SNKCAP_20V;
break;
default:
return;
}
op_current = min(max_current, pdo_max_current(data->snkcaps[snk_sel]));
/* covert mV/mA to uV/uA */
data->voltage = voltage * 1000;
data->max_current = max_current * 1000;
data->op_current = op_current * 1000;
power_supply_changed(data->psy);
}
static void rt1719_update_pwr_opmode(struct rt1719_data *data)
{
if (!data->attached)
return;
if (!data->pd_capable) {
data->pd_capable = true;
typec_set_pwr_opmode(data->port, TYPEC_PWR_MODE_PD);
rt1719_register_partner(data);
}
rt1719_update_operating_status(data);
}
static void rt1719_update_source_pdos(struct rt1719_data *data)
{
int spdo_num = FIELD_GET(RT1719_SPDONUM_MASK, data->conn_info);
__le32 src_pdos[RT1719_MAX_SRCPDO] = { };
int i, ret;
if (!data->attached)
return;
ret = regmap_raw_read(data->regmap, RT1719_REG_SRCPDO1, src_pdos,
sizeof(__le32) * spdo_num);
if (ret)
return;
data->spdo_num = spdo_num;
for (i = 0; i < spdo_num; i++)
data->spdos[i] = le32_to_cpu(src_pdos[i]);
}
static int rt1719_dr_set(struct typec_port *port, enum typec_data_role role)
{
struct rt1719_data *data = typec_get_drvdata(port);
enum typec_data_role cur_role;
int ret;
if (!data->attached || !data->pd_capable || !data->drswap_support)
return -EOPNOTSUPP;
if (data->spdo_num > 0 && !(data->spdos[0] & PDO_FIXED_DATA_SWAP))
return -EINVAL;
cur_role = rt1719_get_data_role(data->conn_info);
if (cur_role == role)
return 0;
ret = regmap_update_bits(data->regmap, RT1719_REG_TXCTRL1,
RT1719_REQDRSWAP_MASK, RT1719_REQDRSWAP_MASK);
if (ret)
return ret;
reinit_completion(&data->req_completion);
ret = wait_for_completion_timeout(&data->req_completion,
msecs_to_jiffies(400));
if (ret == 0)
return -ETIMEDOUT;
cur_role = rt1719_get_data_role(data->conn_info);
if (cur_role != role)
return -EAGAIN;
rt1719_set_data_role(data, role, true);
return 0;
}
static const struct typec_operations rt1719_port_ops = {
.dr_set = rt1719_dr_set,
};
static int rt1719_usbpd_request_voltage(struct rt1719_data *data)
{
u32 src_voltage;
int snk_sel, src_sel = -1;
int i, ret;
if (!data->attached || !data->pd_capable || data->spdo_sel <= 0)
return -EINVAL;
src_voltage = pdo_fixed_voltage(data->spdos[data->spdo_sel - 1]);
if (src_voltage == data->req_voltage)
return 0;
switch (data->req_voltage) {
case 5000:
snk_sel = RT1719_SNKCAP_5V;
break;
case 9000:
snk_sel = RT1719_SNKCAP_9V;
break;
case 12000:
snk_sel = RT1719_SNKCAP_12V;
break;
case 15000:
snk_sel = RT1719_SNKCAP_15V;
break;
case 20000:
snk_sel = RT1719_SNKCAP_20V;
break;
default:
return -EINVAL;
}
if (!(data->snkcaps[snk_sel] & RT1719_PSEL_SUPPORT))
return -EINVAL;
for (i = 0; i < data->spdo_num; i++) {
enum pd_pdo_type type = pdo_type(data->spdos[i]);
if (type != PDO_TYPE_FIXED)
continue;
src_voltage = pdo_fixed_voltage(data->spdos[i]);
if (src_voltage == data->req_voltage) {
src_sel = i;
break;
}
}
if (src_sel == -1)
return -EOPNOTSUPP;
ret = regmap_update_bits(data->regmap, RT1719_REG_TXCTRL1,
RT1719_EVALMODE_MASK | RT1719_REQSRCPDO_MASK,
RT1719_EVALMODE_MASK | (src_sel + 1));
ret |= regmap_update_bits(data->regmap, RT1719_REG_TXCTRL2,
RT1719_TXSPDOREQ_MASK, RT1719_TXSPDOREQ_MASK);
if (ret)
return ret;
reinit_completion(&data->req_completion);
ret = wait_for_completion_timeout(&data->req_completion,
msecs_to_jiffies(400));
if (!ret)
return -ETIMEDOUT;
return 0;
}
static int rt1719_psy_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val)
{
struct rt1719_data *data = power_supply_get_drvdata(psy);
if (psp == POWER_SUPPLY_PROP_VOLTAGE_NOW) {
data->req_voltage = val->intval / 1000;
return rt1719_usbpd_request_voltage(data);
}
return -EINVAL;
}
static int rt1719_psy_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct rt1719_data *data = power_supply_get_drvdata(psy);
int ret = 0;
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
val->intval = data->attached ? 1 : 0;
break;
case POWER_SUPPLY_PROP_USB_TYPE:
val->intval = data->usb_type;
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
val->intval = data->voltage;
break;
case POWER_SUPPLY_PROP_CURRENT_MAX:
val->intval = data->max_current;
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
val->intval = data->op_current;
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int rt1719_psy_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
if (psp == POWER_SUPPLY_PROP_VOLTAGE_NOW)
return 1;
return 0;
}
static int devm_rt1719_psy_register(struct rt1719_data *data)
{
struct power_supply_config psy_cfg = { };
char *psy_name;
psy_cfg.fwnode = dev_fwnode(data->dev);
psy_cfg.drv_data = data;
psy_name = devm_kasprintf(data->dev, GFP_KERNEL, "rt1719-source-psy-%s",
dev_name(data->dev));
if (!psy_name)
return -ENOMEM;
data->psy_desc.name = psy_name;
data->psy_desc.type = POWER_SUPPLY_TYPE_USB;
data->psy_desc.usb_types = rt1719_psy_usb_types;
data->psy_desc.num_usb_types = ARRAY_SIZE(rt1719_psy_usb_types);
data->psy_desc.properties = rt1719_psy_properties;
data->psy_desc.num_properties = ARRAY_SIZE(rt1719_psy_properties);
data->psy_desc.get_property = rt1719_psy_get_property;
data->psy_desc.set_property = rt1719_psy_set_property;
data->psy_desc.property_is_writeable = rt1719_psy_property_is_writeable;
data->usb_type = POWER_SUPPLY_USB_TYPE_C;
data->psy = devm_power_supply_register(data->dev, &data->psy_desc,
&psy_cfg);
return PTR_ERR_OR_ZERO(data->psy);
}
static irqreturn_t rt1719_irq_handler(int irq, void *priv)
{
struct rt1719_data *data = priv;
u32 events, conn_info;
u16 conn_stat;
int ret;
ret = rt1719_read32(data, RT1719_REG_EVENTS, &events);
ret |= rt1719_read32(data, RT1719_REG_POLICYINFO, &conn_info);
ret |= rt1719_read16(data, RT1719_REG_STATS, &conn_stat);
if (ret)
return IRQ_NONE;
data->conn_info = conn_info;
data->conn_stat = conn_stat;
events &= (RT1719_INT_DRSW_ACCEPT | RT1719_INT_RX_SRCCAP |
RT1719_INT_VBUS_PRESENT | RT1719_INT_VBUS_DCT |
RT1719_INT_PE_SNK_RDY);
if (events & RT1719_INT_DRSW_ACCEPT)
rt1719_update_data_role(data);
if (events & RT1719_INT_VBUS_PRESENT)
rt1719_attach(data);
if (events & RT1719_INT_VBUS_DCT)
rt1719_detach(data);
if (events & RT1719_INT_RX_SRCCAP)
rt1719_update_source_pdos(data);
if (events & RT1719_INT_PE_SNK_RDY) {
complete(&data->req_completion);
rt1719_update_pwr_opmode(data);
}
/* Write 1 to clear already handled events */
rt1719_write32(data, RT1719_REG_EVENTS, events);
return IRQ_HANDLED;
}
static int rt1719_irq_init(struct rt1719_data *data)
{
struct i2c_client *i2c = to_i2c_client(data->dev);
u32 irq_enable;
int ret;
irq_enable = RT1719_INT_DRSW_ACCEPT | RT1719_INT_RX_SRCCAP |
RT1719_INT_VBUS_DCT | RT1719_INT_VBUS_PRESENT |
RT1719_INT_PE_SNK_RDY;
ret = rt1719_write32(data, RT1719_REG_MASKS, irq_enable);
if (ret) {
dev_err(&i2c->dev, "Failed to config irq enable\n");
return ret;
}
return devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
rt1719_irq_handler, IRQF_ONESHOT,
dev_name(&i2c->dev), data);
}
static int rt1719_init_attach_state(struct rt1719_data *data)
{
u32 conn_info, irq_clear;
u16 conn_stat;
int ret;
irq_clear = RT1719_INT_DRSW_ACCEPT | RT1719_INT_RX_SRCCAP |
RT1719_INT_VBUS_DCT | RT1719_INT_VBUS_PRESENT |
RT1719_INT_PE_SNK_RDY;
ret = rt1719_read32(data, RT1719_REG_POLICYINFO, &conn_info);
ret |= rt1719_read16(data, RT1719_REG_STATS, &conn_stat);
ret |= rt1719_write32(data, RT1719_REG_EVENTS, irq_clear);
if (ret)
return ret;
data->conn_info = conn_info;
data->conn_stat = conn_stat;
if (conn_info & RT1719_ATTACHDEV_MASK)
rt1719_attach(data);
if (conn_info & RT1719_PE_EXP_CONTRACT) {
rt1719_update_source_pdos(data);
rt1719_update_pwr_opmode(data);
}
return 0;
}
#define RT1719_PSEL_CAPINFO(_lomask, _milliwatt, _himask, _milliamp) { \
.lomask = _lomask, \
.milliwatt = _milliwatt, \
.himask = _himask, \
.milliamp = _milliamp, \
}
static const struct rt1719_psel_cap rt1719_psel_caps[] = {
RT1719_PSEL_CAPINFO(0x18, 75000, 0x10, 5000),
RT1719_PSEL_CAPINFO(0x18, 60000, 0x10, 4500),
RT1719_PSEL_CAPINFO(0x18, 45000, 0x10, 4000),
RT1719_PSEL_CAPINFO(0x18, 30000, 0x10, 3500),
RT1719_PSEL_CAPINFO(0x18, 25000, 0x10, 3000),
RT1719_PSEL_CAPINFO(0x18, 20000, 0x10, 2500),
RT1719_PSEL_CAPINFO(0x18, 15000, 0x10, 2000),
RT1719_PSEL_CAPINFO(0x18, 10000, 0x10, 1000),
RT1719_PSEL_CAPINFO(0x1C, 60000, 0x1F, 5000),
RT1719_PSEL_CAPINFO(0x1C, 45000, 0x1F, 4500),
RT1719_PSEL_CAPINFO(0x1C, 30000, 0x1F, 4000),
RT1719_PSEL_CAPINFO(0x1C, 24000, 0x1F, 3500),
RT1719_PSEL_CAPINFO(0x1C, 15000, 0x1F, 3000),
RT1719_PSEL_CAPINFO(0x1C, 10000, 0x1F, 2500),
RT1719_PSEL_CAPINFO(0x0C, 60000, 0x1F, 2000),
RT1719_PSEL_CAPINFO(0x0C, 45000, 0x1F, 1000),
RT1719_PSEL_CAPINFO(0x0C, 36000, 0x08, 5000),
RT1719_PSEL_CAPINFO(0x0C, 30000, 0x08, 4500),
RT1719_PSEL_CAPINFO(0x0C, 24000, 0x08, 4000),
RT1719_PSEL_CAPINFO(0x0C, 15000, 0x08, 3500),
RT1719_PSEL_CAPINFO(0x0C, 10000, 0x08, 3000),
RT1719_PSEL_CAPINFO(0x1E, 45000, 0x08, 2500),
RT1719_PSEL_CAPINFO(0x1E, 36000, 0x08, 2000),
RT1719_PSEL_CAPINFO(0x1E, 27000, 0x08, 1500),
RT1719_PSEL_CAPINFO(0x1E, 20000, 0x08, 1000),
RT1719_PSEL_CAPINFO(0x1E, 15000, 0x0F, 5000),
RT1719_PSEL_CAPINFO(0x1E, 9000, 0x0F, 4500),
RT1719_PSEL_CAPINFO(0x0E, 45000, 0x0F, 4000),
RT1719_PSEL_CAPINFO(0x0E, 36000, 0x0F, 3500),
RT1719_PSEL_CAPINFO(0x0E, 27000, 0x0F, 3000),
RT1719_PSEL_CAPINFO(0x0E, 20000, 0x0F, 2500),
RT1719_PSEL_CAPINFO(0x0E, 15000, 0x0F, 2000),
RT1719_PSEL_CAPINFO(0x0E, 9000, 0x0F, 1500),
RT1719_PSEL_CAPINFO(0x06, 45000, 0x0F, 1000),
RT1719_PSEL_CAPINFO(0x06, 36000, 0x0F, 500),
RT1719_PSEL_CAPINFO(0x06, 27000, 0x04, 5000),
RT1719_PSEL_CAPINFO(0x06, 24000, 0x04, 4500),
RT1719_PSEL_CAPINFO(0x06, 18000, 0x04, 4000),
RT1719_PSEL_CAPINFO(0x06, 12000, 0x04, 3500),
RT1719_PSEL_CAPINFO(0x06, 9000, 0x04, 3000),
RT1719_PSEL_CAPINFO(0x1F, 25000, 0x04, 2500),
RT1719_PSEL_CAPINFO(0x1F, 20000, 0x04, 2000),
RT1719_PSEL_CAPINFO(0x1F, 15000, 0x04, 1500),
RT1719_PSEL_CAPINFO(0x1F, 10000, 0x04, 1000),
RT1719_PSEL_CAPINFO(0x1F, 7500, 0x07, 5000),
RT1719_PSEL_CAPINFO(0x0F, 25000, 0x07, 4500),
RT1719_PSEL_CAPINFO(0x0F, 20000, 0x07, 4000),
RT1719_PSEL_CAPINFO(0x0F, 15000, 0x07, 3500),
RT1719_PSEL_CAPINFO(0x0F, 10000, 0x07, 3000),
RT1719_PSEL_CAPINFO(0x0F, 7500, 0x07, 2500),
RT1719_PSEL_CAPINFO(0x07, 25000, 0x07, 2000),
RT1719_PSEL_CAPINFO(0x07, 20000, 0x07, 1500),
RT1719_PSEL_CAPINFO(0x07, 15000, 0x07, 1000),
RT1719_PSEL_CAPINFO(0x07, 10000, 0x07, 500),
RT1719_PSEL_CAPINFO(0x07, 7500, 0x03, 5000),
RT1719_PSEL_CAPINFO(0x03, 25000, 0x03, 4500),
RT1719_PSEL_CAPINFO(0x03, 20000, 0x03, 4000),
RT1719_PSEL_CAPINFO(0x03, 15000, 0x03, 3500),
RT1719_PSEL_CAPINFO(0x03, 10000, 0x03, 3000),
RT1719_PSEL_CAPINFO(0x03, 7500, 0x03, 2500),
RT1719_PSEL_CAPINFO(0x01, 15000, 0x03, 2000),
RT1719_PSEL_CAPINFO(0x01, 10000, 0x03, 1500),
RT1719_PSEL_CAPINFO(0x01, 7500, 0x03, 1000),
RT1719_PSEL_CAPINFO(0x01, 2500, 0x03, 500)
};
static u16 rt1719_gen_snkcap_by_current(const struct rt1719_psel_cap *psel_cap,
enum rt1719_snkcap capsel)
{
u16 cap = RT1719_PSEL_SUPPORT;
if (!(psel_cap->himask & BIT(capsel)))
return 0;
cap |= psel_cap->milliamp / 10;
return cap;
}
static u16 rt1719_gen_snkcap_by_watt(const struct rt1719_psel_cap *psel_cap,
enum rt1719_snkcap capsel)
{
u32 volt_div[RT1719_MAX_SNKCAP] = { 5, 9, 12, 15, 20 };
u16 cap = RT1719_PSEL_SUPPORT;
if (!(psel_cap->lomask & BIT(capsel)))
return 0;
cap |= min(psel_cap->milliwatt / volt_div[capsel], (u32)5000) / 10;
return cap;
}
static u16 rt1719_gen_snkcap(unsigned int pselinfo, enum rt1719_snkcap capsel)
{
int psel = FIELD_GET(RT1719_LATPSEL_MASK, pselinfo);
const struct rt1719_psel_cap *psel_cap;
bool by_current = false;
if (pselinfo & RT1719_TBLSEL_MASK)
by_current = true;
psel_cap = rt1719_psel_caps + psel;
if (by_current)
return rt1719_gen_snkcap_by_current(psel_cap, capsel);
return rt1719_gen_snkcap_by_watt(psel_cap, capsel);
}
static int rt1719_get_caps(struct rt1719_data *data)
{
unsigned int pselinfo, usbinfo;
int i, ret;
ret = regmap_read(data->regmap, RT1719_REG_PSELINFO, &pselinfo);
ret |= regmap_read(data->regmap, RT1719_REG_USBSETINFO, &usbinfo);
if (ret)
return ret;
for (i = 0; i < RT1719_MAX_SNKCAP; i++)
data->snkcaps[i] = rt1719_gen_snkcap(pselinfo, i);
usbinfo = FIELD_GET(RT1719_USBINFO_MASK, usbinfo);
if (usbinfo == RT1719_USB_DFPUFP)
data->drswap_support = true;
return 0;
}
static int rt1719_check_exist(struct rt1719_data *data)
{
u16 pid;
int ret;
ret = rt1719_read16(data, RT1719_REG_VENID, &pid);
if (ret)
return ret;
if (pid != RT1719_UNIQUE_PID) {
dev_err(data->dev, "Incorrect PID 0x%04x\n", pid);
return -ENODEV;
}
return 0;
}
static const struct regmap_config rt1719_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0xff,
};
static int rt1719_probe(struct i2c_client *i2c)
{
struct rt1719_data *data;
struct fwnode_handle *fwnode;
struct typec_capability typec_cap = { };
int ret;
data = devm_kzalloc(&i2c->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->dev = &i2c->dev;
init_completion(&data->req_completion);
data->regmap = devm_regmap_init_i2c(i2c, &rt1719_regmap_config);
if (IS_ERR(data->regmap)) {
ret = PTR_ERR(data->regmap);
dev_err(&i2c->dev, "Failed to init regmap (%d)\n", ret);
return ret;
}
ret = rt1719_check_exist(data);
if (ret)
return ret;
ret = rt1719_get_caps(data);
if (ret)
return ret;
fwnode = device_get_named_child_node(&i2c->dev, "connector");
if (!fwnode)
return -ENODEV;
data->role_sw = fwnode_usb_role_switch_get(fwnode);
if (IS_ERR(data->role_sw)) {
ret = PTR_ERR(data->role_sw);
dev_err(&i2c->dev, "Failed to get usb role switch (%d)\n", ret);
goto err_fwnode_put;
}
ret = devm_rt1719_psy_register(data);
if (ret) {
dev_err(&i2c->dev, "Failed to register psy (%d)\n", ret);
goto err_role_put;
}
typec_cap.revision = USB_TYPEC_REV_1_2;
typec_cap.pd_revision = 0x300; /* USB-PD spec release 3.0 */
typec_cap.type = TYPEC_PORT_SNK;
typec_cap.data = TYPEC_PORT_DRD;
typec_cap.ops = &rt1719_port_ops;
typec_cap.fwnode = fwnode;
typec_cap.driver_data = data;
typec_cap.accessory[0] = TYPEC_ACCESSORY_DEBUG;
data->partner_desc.identity = &data->partner_ident;
data->port = typec_register_port(&i2c->dev, &typec_cap);
if (IS_ERR(data->port)) {
ret = PTR_ERR(data->port);
dev_err(&i2c->dev, "Failed to register typec port (%d)\n", ret);
goto err_role_put;
}
ret = rt1719_init_attach_state(data);
if (ret) {
dev_err(&i2c->dev, "Failed to init attach state (%d)\n", ret);
goto err_role_put;
}
ret = rt1719_irq_init(data);
if (ret) {
dev_err(&i2c->dev, "Failed to init irq\n");
goto err_role_put;
}
fwnode_handle_put(fwnode);
i2c_set_clientdata(i2c, data);
return 0;
err_role_put:
usb_role_switch_put(data->role_sw);
err_fwnode_put:
fwnode_handle_put(fwnode);
return ret;
}
static void rt1719_remove(struct i2c_client *i2c)
{
struct rt1719_data *data = i2c_get_clientdata(i2c);
typec_unregister_port(data->port);
usb_role_switch_put(data->role_sw);
}
static const struct of_device_id __maybe_unused rt1719_device_table[] = {
{ .compatible = "richtek,rt1719", },
{ }
};
MODULE_DEVICE_TABLE(of, rt1719_device_table);
static struct i2c_driver rt1719_driver = {
.driver = {
.name = "rt1719",
.of_match_table = rt1719_device_table,
},
.probe = rt1719_probe,
.remove = rt1719_remove,
};
module_i2c_driver(rt1719_driver);
MODULE_AUTHOR("ChiYuan Huang <[email protected]>");
MODULE_DESCRIPTION("Richtek RT1719 Sink Only USBPD Controller Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/typec/rt1719.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB Power Delivery sysfs entries
*
* Copyright (C) 2022, Intel Corporation
* Author: Heikki Krogerus <[email protected]>
*/
#include <linux/slab.h>
#include <linux/usb/pd.h>
#include "pd.h"
static DEFINE_IDA(pd_ida);
static struct class pd_class = {
.name = "usb_power_delivery",
};
#define to_pdo(o) container_of(o, struct pdo, dev)
struct pdo {
struct device dev;
int object_position;
u32 pdo;
};
static void pdo_release(struct device *dev)
{
kfree(to_pdo(dev));
}
/* -------------------------------------------------------------------------- */
/* Fixed Supply */
static ssize_t
dual_role_power_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & PDO_FIXED_DUAL_ROLE));
}
static DEVICE_ATTR_RO(dual_role_power);
static ssize_t
usb_suspend_supported_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & PDO_FIXED_SUSPEND));
}
static DEVICE_ATTR_RO(usb_suspend_supported);
static ssize_t
higher_capability_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & PDO_FIXED_HIGHER_CAP));
}
static DEVICE_ATTR_RO(higher_capability);
static ssize_t
unconstrained_power_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & PDO_FIXED_EXTPOWER));
}
static DEVICE_ATTR_RO(unconstrained_power);
static ssize_t
usb_communication_capable_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & PDO_FIXED_USB_COMM));
}
static DEVICE_ATTR_RO(usb_communication_capable);
static ssize_t
dual_role_data_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & PDO_FIXED_DATA_SWAP));
}
static DEVICE_ATTR_RO(dual_role_data);
static ssize_t
unchunked_extended_messages_supported_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & PDO_FIXED_UNCHUNK_EXT));
}
static DEVICE_ATTR_RO(unchunked_extended_messages_supported);
/*
* REVISIT: Peak Current requires access also to the RDO.
static ssize_t
peak_current_show(struct device *dev, struct device_attribute *attr, char *buf)
{
...
}
*/
static ssize_t
fast_role_swap_current_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%u\n", (to_pdo(dev)->pdo >> PDO_FIXED_FRS_CURR_SHIFT) & 3);
}
static DEVICE_ATTR_RO(fast_role_swap_current);
static ssize_t voltage_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%umV\n", pdo_fixed_voltage(to_pdo(dev)->pdo));
}
static DEVICE_ATTR_RO(voltage);
/* Shared with Variable supplies, both source and sink */
static ssize_t current_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%umA\n", pdo_max_current(to_pdo(dev)->pdo));
}
/* Shared with Variable type supplies */
static struct device_attribute maximum_current_attr = {
.attr = {
.name = "maximum_current",
.mode = 0444,
},
.show = current_show,
};
static struct device_attribute operational_current_attr = {
.attr = {
.name = "operational_current",
.mode = 0444,
},
.show = current_show,
};
static struct attribute *source_fixed_supply_attrs[] = {
&dev_attr_dual_role_power.attr,
&dev_attr_usb_suspend_supported.attr,
&dev_attr_unconstrained_power.attr,
&dev_attr_usb_communication_capable.attr,
&dev_attr_dual_role_data.attr,
&dev_attr_unchunked_extended_messages_supported.attr,
/*&dev_attr_peak_current.attr,*/
&dev_attr_voltage.attr,
&maximum_current_attr.attr,
NULL
};
static umode_t fixed_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
{
if (to_pdo(kobj_to_dev(kobj))->object_position &&
/*attr != &dev_attr_peak_current.attr &&*/
attr != &dev_attr_voltage.attr &&
attr != &maximum_current_attr.attr &&
attr != &operational_current_attr.attr)
return 0;
return attr->mode;
}
static const struct attribute_group source_fixed_supply_group = {
.is_visible = fixed_attr_is_visible,
.attrs = source_fixed_supply_attrs,
};
__ATTRIBUTE_GROUPS(source_fixed_supply);
static struct device_type source_fixed_supply_type = {
.name = "pdo",
.release = pdo_release,
.groups = source_fixed_supply_groups,
};
static struct attribute *sink_fixed_supply_attrs[] = {
&dev_attr_dual_role_power.attr,
&dev_attr_higher_capability.attr,
&dev_attr_unconstrained_power.attr,
&dev_attr_usb_communication_capable.attr,
&dev_attr_dual_role_data.attr,
&dev_attr_unchunked_extended_messages_supported.attr,
&dev_attr_fast_role_swap_current.attr,
&dev_attr_voltage.attr,
&operational_current_attr.attr,
NULL
};
static const struct attribute_group sink_fixed_supply_group = {
.is_visible = fixed_attr_is_visible,
.attrs = sink_fixed_supply_attrs,
};
__ATTRIBUTE_GROUPS(sink_fixed_supply);
static struct device_type sink_fixed_supply_type = {
.name = "pdo",
.release = pdo_release,
.groups = sink_fixed_supply_groups,
};
/* -------------------------------------------------------------------------- */
/* Variable Supply */
static ssize_t
maximum_voltage_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%umV\n", pdo_max_voltage(to_pdo(dev)->pdo));
}
static DEVICE_ATTR_RO(maximum_voltage);
static ssize_t
minimum_voltage_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%umV\n", pdo_min_voltage(to_pdo(dev)->pdo));
}
static DEVICE_ATTR_RO(minimum_voltage);
static struct attribute *source_variable_supply_attrs[] = {
&dev_attr_maximum_voltage.attr,
&dev_attr_minimum_voltage.attr,
&maximum_current_attr.attr,
NULL
};
ATTRIBUTE_GROUPS(source_variable_supply);
static struct device_type source_variable_supply_type = {
.name = "pdo",
.release = pdo_release,
.groups = source_variable_supply_groups,
};
static struct attribute *sink_variable_supply_attrs[] = {
&dev_attr_maximum_voltage.attr,
&dev_attr_minimum_voltage.attr,
&operational_current_attr.attr,
NULL
};
ATTRIBUTE_GROUPS(sink_variable_supply);
static struct device_type sink_variable_supply_type = {
.name = "pdo",
.release = pdo_release,
.groups = sink_variable_supply_groups,
};
/* -------------------------------------------------------------------------- */
/* Battery */
static ssize_t
maximum_power_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%umW\n", pdo_max_power(to_pdo(dev)->pdo));
}
static DEVICE_ATTR_RO(maximum_power);
static ssize_t
operational_power_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%umW\n", pdo_max_power(to_pdo(dev)->pdo));
}
static DEVICE_ATTR_RO(operational_power);
static struct attribute *source_battery_attrs[] = {
&dev_attr_maximum_voltage.attr,
&dev_attr_minimum_voltage.attr,
&dev_attr_maximum_power.attr,
NULL
};
ATTRIBUTE_GROUPS(source_battery);
static struct device_type source_battery_type = {
.name = "pdo",
.release = pdo_release,
.groups = source_battery_groups,
};
static struct attribute *sink_battery_attrs[] = {
&dev_attr_maximum_voltage.attr,
&dev_attr_minimum_voltage.attr,
&dev_attr_operational_power.attr,
NULL
};
ATTRIBUTE_GROUPS(sink_battery);
static struct device_type sink_battery_type = {
.name = "pdo",
.release = pdo_release,
.groups = sink_battery_groups,
};
/* -------------------------------------------------------------------------- */
/* Standard Power Range (SPR) Programmable Power Supply (PPS) */
static ssize_t
pps_power_limited_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & BIT(27)));
}
static DEVICE_ATTR_RO(pps_power_limited);
static ssize_t
pps_max_voltage_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%umV\n", pdo_pps_apdo_max_voltage(to_pdo(dev)->pdo));
}
static ssize_t
pps_min_voltage_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%umV\n", pdo_pps_apdo_min_voltage(to_pdo(dev)->pdo));
}
static ssize_t
pps_max_current_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%umA\n", pdo_pps_apdo_max_current(to_pdo(dev)->pdo));
}
static struct device_attribute pps_max_voltage_attr = {
.attr = {
.name = "maximum_voltage",
.mode = 0444,
},
.show = pps_max_voltage_show,
};
static struct device_attribute pps_min_voltage_attr = {
.attr = {
.name = "minimum_voltage",
.mode = 0444,
},
.show = pps_min_voltage_show,
};
static struct device_attribute pps_max_current_attr = {
.attr = {
.name = "maximum_current",
.mode = 0444,
},
.show = pps_max_current_show,
};
static struct attribute *source_pps_attrs[] = {
&dev_attr_pps_power_limited.attr,
&pps_max_voltage_attr.attr,
&pps_min_voltage_attr.attr,
&pps_max_current_attr.attr,
NULL
};
ATTRIBUTE_GROUPS(source_pps);
static struct device_type source_pps_type = {
.name = "pdo",
.release = pdo_release,
.groups = source_pps_groups,
};
static struct attribute *sink_pps_attrs[] = {
&pps_max_voltage_attr.attr,
&pps_min_voltage_attr.attr,
&pps_max_current_attr.attr,
NULL
};
ATTRIBUTE_GROUPS(sink_pps);
static struct device_type sink_pps_type = {
.name = "pdo",
.release = pdo_release,
.groups = sink_pps_groups,
};
/* -------------------------------------------------------------------------- */
static const char * const supply_name[] = {
[PDO_TYPE_FIXED] = "fixed_supply",
[PDO_TYPE_BATT] = "battery",
[PDO_TYPE_VAR] = "variable_supply",
};
static const char * const apdo_supply_name[] = {
[APDO_TYPE_PPS] = "programmable_supply",
};
static struct device_type *source_type[] = {
[PDO_TYPE_FIXED] = &source_fixed_supply_type,
[PDO_TYPE_BATT] = &source_battery_type,
[PDO_TYPE_VAR] = &source_variable_supply_type,
};
static struct device_type *source_apdo_type[] = {
[APDO_TYPE_PPS] = &source_pps_type,
};
static struct device_type *sink_type[] = {
[PDO_TYPE_FIXED] = &sink_fixed_supply_type,
[PDO_TYPE_BATT] = &sink_battery_type,
[PDO_TYPE_VAR] = &sink_variable_supply_type,
};
static struct device_type *sink_apdo_type[] = {
[APDO_TYPE_PPS] = &sink_pps_type,
};
/* REVISIT: Export when EPR_*_Capabilities need to be supported. */
static int add_pdo(struct usb_power_delivery_capabilities *cap, u32 pdo, int position)
{
struct device_type *type;
const char *name;
struct pdo *p;
int ret;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
p->pdo = pdo;
p->object_position = position;
if (pdo_type(pdo) == PDO_TYPE_APDO) {
/* FIXME: Only PPS supported for now! Skipping others. */
if (pdo_apdo_type(pdo) > APDO_TYPE_PPS) {
dev_warn(&cap->dev, "Unknown APDO type. PDO 0x%08x\n", pdo);
kfree(p);
return 0;
}
if (is_source(cap->role))
type = source_apdo_type[pdo_apdo_type(pdo)];
else
type = sink_apdo_type[pdo_apdo_type(pdo)];
name = apdo_supply_name[pdo_apdo_type(pdo)];
} else {
if (is_source(cap->role))
type = source_type[pdo_type(pdo)];
else
type = sink_type[pdo_type(pdo)];
name = supply_name[pdo_type(pdo)];
}
p->dev.parent = &cap->dev;
p->dev.type = type;
dev_set_name(&p->dev, "%u:%s", position + 1, name);
ret = device_register(&p->dev);
if (ret) {
put_device(&p->dev);
return ret;
}
return 0;
}
static int remove_pdo(struct device *dev, void *data)
{
device_unregister(dev);
return 0;
}
/* -------------------------------------------------------------------------- */
static const char * const cap_name[] = {
[TYPEC_SINK] = "sink-capabilities",
[TYPEC_SOURCE] = "source-capabilities",
};
static void pd_capabilities_release(struct device *dev)
{
kfree(to_usb_power_delivery_capabilities(dev));
}
static struct device_type pd_capabilities_type = {
.name = "capabilities",
.release = pd_capabilities_release,
};
/**
* usb_power_delivery_register_capabilities - Register a set of capabilities.
* @pd: The USB PD instance that the capabilities belong to.
* @desc: Description of the Capablities Message.
*
* This function registers a Capabilities Message described in @desc. The
* capabilities will have their own sub-directory under @pd in sysfs.
*
* The function returns pointer to struct usb_power_delivery_capabilities, or
* ERR_PRT(errno).
*/
struct usb_power_delivery_capabilities *
usb_power_delivery_register_capabilities(struct usb_power_delivery *pd,
struct usb_power_delivery_capabilities_desc *desc)
{
struct usb_power_delivery_capabilities *cap;
int ret;
int i;
cap = kzalloc(sizeof(*cap), GFP_KERNEL);
if (!cap)
return ERR_PTR(-ENOMEM);
cap->pd = pd;
cap->role = desc->role;
cap->dev.parent = &pd->dev;
cap->dev.type = &pd_capabilities_type;
dev_set_name(&cap->dev, "%s", cap_name[cap->role]);
ret = device_register(&cap->dev);
if (ret) {
put_device(&cap->dev);
return ERR_PTR(ret);
}
for (i = 0; i < PDO_MAX_OBJECTS && desc->pdo[i]; i++) {
ret = add_pdo(cap, desc->pdo[i], i);
if (ret) {
usb_power_delivery_unregister_capabilities(cap);
return ERR_PTR(ret);
}
}
return cap;
}
EXPORT_SYMBOL_GPL(usb_power_delivery_register_capabilities);
/**
* usb_power_delivery_unregister_capabilities - Unregister a set of capabilities
* @cap: The capabilities
*/
void usb_power_delivery_unregister_capabilities(struct usb_power_delivery_capabilities *cap)
{
if (!cap)
return;
device_for_each_child(&cap->dev, NULL, remove_pdo);
device_unregister(&cap->dev);
}
EXPORT_SYMBOL_GPL(usb_power_delivery_unregister_capabilities);
/* -------------------------------------------------------------------------- */
static ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_power_delivery *pd = to_usb_power_delivery(dev);
return sysfs_emit(buf, "%u.%u\n", (pd->revision >> 8) & 0xff, (pd->revision >> 4) & 0xf);
}
static DEVICE_ATTR_RO(revision);
static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_power_delivery *pd = to_usb_power_delivery(dev);
return sysfs_emit(buf, "%u.%u\n", (pd->version >> 8) & 0xff, (pd->version >> 4) & 0xf);
}
static DEVICE_ATTR_RO(version);
static struct attribute *pd_attrs[] = {
&dev_attr_revision.attr,
&dev_attr_version.attr,
NULL
};
static umode_t pd_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
{
struct usb_power_delivery *pd = to_usb_power_delivery(kobj_to_dev(kobj));
if (attr == &dev_attr_version.attr && !pd->version)
return 0;
return attr->mode;
}
static const struct attribute_group pd_group = {
.is_visible = pd_attr_is_visible,
.attrs = pd_attrs,
};
__ATTRIBUTE_GROUPS(pd);
static void pd_release(struct device *dev)
{
struct usb_power_delivery *pd = to_usb_power_delivery(dev);
ida_simple_remove(&pd_ida, pd->id);
kfree(pd);
}
static struct device_type pd_type = {
.name = "usb_power_delivery",
.release = pd_release,
.groups = pd_groups,
};
struct usb_power_delivery *usb_power_delivery_find(const char *name)
{
struct device *dev;
dev = class_find_device_by_name(&pd_class, name);
return dev ? to_usb_power_delivery(dev) : NULL;
}
/**
* usb_power_delivery_register - Register USB Power Delivery Support.
* @parent: Parent device.
* @desc: Description of the USB PD contract.
*
* This routine can be used to register USB Power Delivery capabilities that a
* device or devices can support. These capabilities represent all the
* capabilities that can be negotiated with a partner, so not only the Power
* Capabilities that are negotiated using the USB PD Capabilities Message.
*
* The USB Power Delivery Support object that this routine generates can be used
* as the parent object for all the actual USB Power Delivery Messages and
* objects that can be negotiated with the partner.
*
* Returns handle to struct usb_power_delivery or ERR_PTR.
*/
struct usb_power_delivery *
usb_power_delivery_register(struct device *parent, struct usb_power_delivery_desc *desc)
{
struct usb_power_delivery *pd;
int ret;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return ERR_PTR(-ENOMEM);
ret = ida_simple_get(&pd_ida, 0, 0, GFP_KERNEL);
if (ret < 0) {
kfree(pd);
return ERR_PTR(ret);
}
pd->id = ret;
pd->revision = desc->revision;
pd->version = desc->version;
pd->dev.parent = parent;
pd->dev.type = &pd_type;
pd->dev.class = &pd_class;
dev_set_name(&pd->dev, "pd%d", pd->id);
ret = device_register(&pd->dev);
if (ret) {
put_device(&pd->dev);
return ERR_PTR(ret);
}
return pd;
}
EXPORT_SYMBOL_GPL(usb_power_delivery_register);
/**
* usb_power_delivery_unregister - Unregister USB Power Delivery Support.
* @pd: The USB PD contract.
*/
void usb_power_delivery_unregister(struct usb_power_delivery *pd)
{
if (IS_ERR_OR_NULL(pd))
return;
device_unregister(&pd->dev);
}
EXPORT_SYMBOL_GPL(usb_power_delivery_unregister);
/**
* usb_power_delivery_link_device - Link device to its USB PD object.
* @pd: The USB PD instance.
* @dev: The device.
*
* This function can be used to create a symlink named "usb_power_delivery" for
* @dev that points to @pd.
*/
int usb_power_delivery_link_device(struct usb_power_delivery *pd, struct device *dev)
{
int ret;
if (IS_ERR_OR_NULL(pd) || !dev)
return 0;
ret = sysfs_create_link(&dev->kobj, &pd->dev.kobj, "usb_power_delivery");
if (ret)
return ret;
get_device(&pd->dev);
get_device(dev);
return 0;
}
EXPORT_SYMBOL_GPL(usb_power_delivery_link_device);
/**
* usb_power_delivery_unlink_device - Unlink device from its USB PD object.
* @pd: The USB PD instance.
* @dev: The device.
*
* Remove the symlink that was previously created with pd_link_device().
*/
void usb_power_delivery_unlink_device(struct usb_power_delivery *pd, struct device *dev)
{
if (IS_ERR_OR_NULL(pd) || !dev)
return;
sysfs_remove_link(&dev->kobj, "usb_power_delivery");
put_device(&pd->dev);
put_device(dev);
}
EXPORT_SYMBOL_GPL(usb_power_delivery_unlink_device);
/* -------------------------------------------------------------------------- */
int __init usb_power_delivery_init(void)
{
return class_register(&pd_class);
}
void __exit usb_power_delivery_exit(void)
{
ida_destroy(&pd_ida);
class_unregister(&pd_class);
}
| linux-master | drivers/usb/typec/pd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Willsemi WUSB3801 Type-C port controller driver
*
* Copyright (C) 2022 Samuel Holland <[email protected]>
*/
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/usb/typec.h>
#define WUSB3801_REG_DEVICE_ID 0x01
#define WUSB3801_REG_CTRL0 0x02
#define WUSB3801_REG_INT 0x03
#define WUSB3801_REG_STAT 0x04
#define WUSB3801_REG_CTRL1 0x05
#define WUSB3801_REG_TEST00 0x06
#define WUSB3801_REG_TEST01 0x07
#define WUSB3801_REG_TEST02 0x08
#define WUSB3801_REG_TEST03 0x09
#define WUSB3801_REG_TEST04 0x0a
#define WUSB3801_REG_TEST05 0x0b
#define WUSB3801_REG_TEST06 0x0c
#define WUSB3801_REG_TEST07 0x0d
#define WUSB3801_REG_TEST08 0x0e
#define WUSB3801_REG_TEST09 0x0f
#define WUSB3801_REG_TEST0A 0x10
#define WUSB3801_REG_TEST0B 0x11
#define WUSB3801_REG_TEST0C 0x12
#define WUSB3801_REG_TEST0D 0x13
#define WUSB3801_REG_TEST0E 0x14
#define WUSB3801_REG_TEST0F 0x15
#define WUSB3801_REG_TEST10 0x16
#define WUSB3801_REG_TEST11 0x17
#define WUSB3801_REG_TEST12 0x18
#define WUSB3801_DEVICE_ID_VERSION_ID GENMASK(7, 3)
#define WUSB3801_DEVICE_ID_VENDOR_ID GENMASK(2, 0)
#define WUSB3801_CTRL0_DIS_ACC_SUPPORT BIT(7)
#define WUSB3801_CTRL0_TRY GENMASK(6, 5)
#define WUSB3801_CTRL0_TRY_NONE (0x0 << 5)
#define WUSB3801_CTRL0_TRY_SNK (0x1 << 5)
#define WUSB3801_CTRL0_TRY_SRC (0x2 << 5)
#define WUSB3801_CTRL0_CURRENT GENMASK(4, 3) /* SRC */
#define WUSB3801_CTRL0_CURRENT_DEFAULT (0x0 << 3)
#define WUSB3801_CTRL0_CURRENT_1_5A (0x1 << 3)
#define WUSB3801_CTRL0_CURRENT_3_0A (0x2 << 3)
#define WUSB3801_CTRL0_ROLE GENMASK(2, 1)
#define WUSB3801_CTRL0_ROLE_SNK (0x0 << 1)
#define WUSB3801_CTRL0_ROLE_SRC (0x1 << 1)
#define WUSB3801_CTRL0_ROLE_DRP (0x2 << 1)
#define WUSB3801_CTRL0_INT_MASK BIT(0)
#define WUSB3801_INT_ATTACHED BIT(0)
#define WUSB3801_INT_DETACHED BIT(1)
#define WUSB3801_STAT_VBUS_DETECTED BIT(7)
#define WUSB3801_STAT_CURRENT GENMASK(6, 5) /* SNK */
#define WUSB3801_STAT_CURRENT_STANDBY (0x0 << 5)
#define WUSB3801_STAT_CURRENT_DEFAULT (0x1 << 5)
#define WUSB3801_STAT_CURRENT_1_5A (0x2 << 5)
#define WUSB3801_STAT_CURRENT_3_0A (0x3 << 5)
#define WUSB3801_STAT_PARTNER GENMASK(4, 2)
#define WUSB3801_STAT_PARTNER_STANDBY (0x0 << 2)
#define WUSB3801_STAT_PARTNER_SNK (0x1 << 2)
#define WUSB3801_STAT_PARTNER_SRC (0x2 << 2)
#define WUSB3801_STAT_PARTNER_AUDIO (0x3 << 2)
#define WUSB3801_STAT_PARTNER_DEBUG (0x4 << 2)
#define WUSB3801_STAT_ORIENTATION GENMASK(1, 0)
#define WUSB3801_STAT_ORIENTATION_NONE (0x0 << 0)
#define WUSB3801_STAT_ORIENTATION_CC1 (0x1 << 0)
#define WUSB3801_STAT_ORIENTATION_CC2 (0x2 << 0)
#define WUSB3801_STAT_ORIENTATION_BOTH (0x3 << 0)
#define WUSB3801_CTRL1_SM_RESET BIT(0)
#define WUSB3801_TEST01_VENDOR_SUB_ID (BIT(8) | BIT(6))
#define WUSB3801_TEST02_FORCE_ERR_RCY BIT(8)
#define WUSB3801_TEST0A_WAIT_VBUS BIT(5)
struct wusb3801 {
struct typec_capability cap;
struct device *dev;
struct typec_partner *partner;
struct typec_port *port;
struct regmap *regmap;
struct regulator *vbus_supply;
unsigned int partner_type;
enum typec_port_type port_type;
enum typec_pwr_opmode pwr_opmode;
bool vbus_on;
};
static enum typec_role wusb3801_get_default_role(struct wusb3801 *wusb3801)
{
switch (wusb3801->port_type) {
case TYPEC_PORT_SRC:
return TYPEC_SOURCE;
case TYPEC_PORT_SNK:
return TYPEC_SINK;
case TYPEC_PORT_DRP:
default:
if (wusb3801->cap.prefer_role == TYPEC_SOURCE)
return TYPEC_SOURCE;
return TYPEC_SINK;
}
}
static int wusb3801_map_port_type(enum typec_port_type type)
{
switch (type) {
case TYPEC_PORT_SRC:
return WUSB3801_CTRL0_ROLE_SRC;
case TYPEC_PORT_SNK:
return WUSB3801_CTRL0_ROLE_SNK;
case TYPEC_PORT_DRP:
default:
return WUSB3801_CTRL0_ROLE_DRP;
}
}
static int wusb3801_map_pwr_opmode(enum typec_pwr_opmode mode)
{
switch (mode) {
case TYPEC_PWR_MODE_USB:
default:
return WUSB3801_CTRL0_CURRENT_DEFAULT;
case TYPEC_PWR_MODE_1_5A:
return WUSB3801_CTRL0_CURRENT_1_5A;
case TYPEC_PWR_MODE_3_0A:
return WUSB3801_CTRL0_CURRENT_3_0A;
}
}
static unsigned int wusb3801_map_try_role(int role)
{
switch (role) {
case TYPEC_NO_PREFERRED_ROLE:
default:
return WUSB3801_CTRL0_TRY_NONE;
case TYPEC_SINK:
return WUSB3801_CTRL0_TRY_SNK;
case TYPEC_SOURCE:
return WUSB3801_CTRL0_TRY_SRC;
}
}
static enum typec_orientation wusb3801_unmap_orientation(unsigned int status)
{
switch (status & WUSB3801_STAT_ORIENTATION) {
case WUSB3801_STAT_ORIENTATION_NONE:
case WUSB3801_STAT_ORIENTATION_BOTH:
default:
return TYPEC_ORIENTATION_NONE;
case WUSB3801_STAT_ORIENTATION_CC1:
return TYPEC_ORIENTATION_NORMAL;
case WUSB3801_STAT_ORIENTATION_CC2:
return TYPEC_ORIENTATION_REVERSE;
}
}
static enum typec_pwr_opmode wusb3801_unmap_pwr_opmode(unsigned int status)
{
switch (status & WUSB3801_STAT_CURRENT) {
case WUSB3801_STAT_CURRENT_STANDBY:
case WUSB3801_STAT_CURRENT_DEFAULT:
default:
return TYPEC_PWR_MODE_USB;
case WUSB3801_STAT_CURRENT_1_5A:
return TYPEC_PWR_MODE_1_5A;
case WUSB3801_STAT_CURRENT_3_0A:
return TYPEC_PWR_MODE_3_0A;
}
}
static int wusb3801_try_role(struct typec_port *port, int role)
{
struct wusb3801 *wusb3801 = typec_get_drvdata(port);
return regmap_update_bits(wusb3801->regmap, WUSB3801_REG_CTRL0,
WUSB3801_CTRL0_TRY,
wusb3801_map_try_role(role));
}
static int wusb3801_port_type_set(struct typec_port *port,
enum typec_port_type type)
{
struct wusb3801 *wusb3801 = typec_get_drvdata(port);
int ret;
ret = regmap_update_bits(wusb3801->regmap, WUSB3801_REG_CTRL0,
WUSB3801_CTRL0_ROLE,
wusb3801_map_port_type(type));
if (ret)
return ret;
wusb3801->port_type = type;
return 0;
}
static const struct typec_operations wusb3801_typec_ops = {
.try_role = wusb3801_try_role,
.port_type_set = wusb3801_port_type_set,
};
static int wusb3801_hw_init(struct wusb3801 *wusb3801)
{
return regmap_write(wusb3801->regmap, WUSB3801_REG_CTRL0,
wusb3801_map_try_role(wusb3801->cap.prefer_role) |
wusb3801_map_pwr_opmode(wusb3801->pwr_opmode) |
wusb3801_map_port_type(wusb3801->port_type));
}
static void wusb3801_hw_update(struct wusb3801 *wusb3801)
{
struct typec_port *port = wusb3801->port;
struct device *dev = wusb3801->dev;
unsigned int partner_type, status;
int ret;
ret = regmap_read(wusb3801->regmap, WUSB3801_REG_STAT, &status);
if (ret) {
dev_warn(dev, "Failed to read port status: %d\n", ret);
status = 0;
}
dev_dbg(dev, "status = 0x%02x\n", status);
partner_type = status & WUSB3801_STAT_PARTNER;
if (partner_type == WUSB3801_STAT_PARTNER_SNK) {
if (!wusb3801->vbus_on) {
ret = regulator_enable(wusb3801->vbus_supply);
if (ret)
dev_warn(dev, "Failed to enable VBUS: %d\n", ret);
wusb3801->vbus_on = true;
}
} else {
if (wusb3801->vbus_on) {
regulator_disable(wusb3801->vbus_supply);
wusb3801->vbus_on = false;
}
}
if (partner_type != wusb3801->partner_type) {
struct typec_partner_desc desc = {};
enum typec_data_role data_role;
enum typec_role pwr_role = wusb3801_get_default_role(wusb3801);
switch (partner_type) {
case WUSB3801_STAT_PARTNER_STANDBY:
break;
case WUSB3801_STAT_PARTNER_SNK:
pwr_role = TYPEC_SOURCE;
break;
case WUSB3801_STAT_PARTNER_SRC:
pwr_role = TYPEC_SINK;
break;
case WUSB3801_STAT_PARTNER_AUDIO:
desc.accessory = TYPEC_ACCESSORY_AUDIO;
break;
case WUSB3801_STAT_PARTNER_DEBUG:
desc.accessory = TYPEC_ACCESSORY_DEBUG;
break;
}
if (wusb3801->partner) {
typec_unregister_partner(wusb3801->partner);
wusb3801->partner = NULL;
}
if (partner_type != WUSB3801_STAT_PARTNER_STANDBY) {
wusb3801->partner = typec_register_partner(port, &desc);
if (IS_ERR(wusb3801->partner))
dev_err(dev, "Failed to register partner: %ld\n",
PTR_ERR(wusb3801->partner));
}
data_role = pwr_role == TYPEC_SOURCE ? TYPEC_HOST : TYPEC_DEVICE;
typec_set_data_role(port, data_role);
typec_set_pwr_role(port, pwr_role);
typec_set_vconn_role(port, pwr_role);
}
typec_set_pwr_opmode(wusb3801->port,
partner_type == WUSB3801_STAT_PARTNER_SRC
? wusb3801_unmap_pwr_opmode(status)
: wusb3801->pwr_opmode);
typec_set_orientation(wusb3801->port,
wusb3801_unmap_orientation(status));
wusb3801->partner_type = partner_type;
}
static irqreturn_t wusb3801_irq(int irq, void *data)
{
struct wusb3801 *wusb3801 = data;
unsigned int dummy;
/*
* The interrupt register must be read in order to clear the IRQ,
* but all of the useful information is in the status register.
*/
regmap_read(wusb3801->regmap, WUSB3801_REG_INT, &dummy);
wusb3801_hw_update(wusb3801);
return IRQ_HANDLED;
}
static const struct regmap_config config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = WUSB3801_REG_TEST12,
};
static int wusb3801_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct fwnode_handle *connector;
struct wusb3801 *wusb3801;
const char *cap_str;
int ret;
wusb3801 = devm_kzalloc(dev, sizeof(*wusb3801), GFP_KERNEL);
if (!wusb3801)
return -ENOMEM;
i2c_set_clientdata(client, wusb3801);
wusb3801->dev = dev;
wusb3801->regmap = devm_regmap_init_i2c(client, &config);
if (IS_ERR(wusb3801->regmap))
return PTR_ERR(wusb3801->regmap);
wusb3801->vbus_supply = devm_regulator_get(dev, "vbus");
if (IS_ERR(wusb3801->vbus_supply))
return PTR_ERR(wusb3801->vbus_supply);
connector = device_get_named_child_node(dev, "connector");
if (!connector)
return -ENODEV;
ret = typec_get_fw_cap(&wusb3801->cap, connector);
if (ret)
goto err_put_connector;
wusb3801->port_type = wusb3801->cap.type;
ret = fwnode_property_read_string(connector, "typec-power-opmode", &cap_str);
if (ret)
goto err_put_connector;
ret = typec_find_pwr_opmode(cap_str);
if (ret < 0 || ret == TYPEC_PWR_MODE_PD)
goto err_put_connector;
wusb3801->pwr_opmode = ret;
/* Initialize the hardware with the devicetree settings. */
ret = wusb3801_hw_init(wusb3801);
if (ret)
goto err_put_connector;
wusb3801->cap.revision = USB_TYPEC_REV_1_2;
wusb3801->cap.accessory[0] = TYPEC_ACCESSORY_AUDIO;
wusb3801->cap.accessory[1] = TYPEC_ACCESSORY_DEBUG;
wusb3801->cap.orientation_aware = true;
wusb3801->cap.driver_data = wusb3801;
wusb3801->cap.ops = &wusb3801_typec_ops;
wusb3801->port = typec_register_port(dev, &wusb3801->cap);
if (IS_ERR(wusb3801->port)) {
ret = PTR_ERR(wusb3801->port);
goto err_put_connector;
}
/* Initialize the port attributes from the hardware state. */
wusb3801_hw_update(wusb3801);
ret = request_threaded_irq(client->irq, NULL, wusb3801_irq,
IRQF_ONESHOT, dev_name(dev), wusb3801);
if (ret)
goto err_unregister_port;
fwnode_handle_put(connector);
return 0;
err_unregister_port:
typec_unregister_port(wusb3801->port);
err_put_connector:
fwnode_handle_put(connector);
return ret;
}
static void wusb3801_remove(struct i2c_client *client)
{
struct wusb3801 *wusb3801 = i2c_get_clientdata(client);
free_irq(client->irq, wusb3801);
if (wusb3801->partner)
typec_unregister_partner(wusb3801->partner);
typec_unregister_port(wusb3801->port);
if (wusb3801->vbus_on)
regulator_disable(wusb3801->vbus_supply);
}
static const struct of_device_id wusb3801_of_match[] = {
{ .compatible = "willsemi,wusb3801" },
{}
};
MODULE_DEVICE_TABLE(of, wusb3801_of_match);
static struct i2c_driver wusb3801_driver = {
.probe = wusb3801_probe,
.remove = wusb3801_remove,
.driver = {
.name = "wusb3801",
.of_match_table = wusb3801_of_match,
},
};
module_i2c_driver(wusb3801_driver);
MODULE_AUTHOR("Samuel Holland <[email protected]>");
MODULE_DESCRIPTION("Willsemi WUSB3801 Type-C port controller driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/typec/wusb3801.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2022 Google LLC
*
* USB Type-C Retimer support.
* Author: Prashant Malani <[email protected]>
*
*/
#include <linux/device.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/slab.h>
#include "class.h"
#include "retimer.h"
static int retimer_fwnode_match(struct device *dev, const void *fwnode)
{
return is_typec_retimer(dev) && device_match_fwnode(dev, fwnode);
}
static void *typec_retimer_match(const struct fwnode_handle *fwnode, const char *id, void *data)
{
struct device *dev;
if (id && !fwnode_property_present(fwnode, id))
return NULL;
dev = class_find_device(&retimer_class, NULL, fwnode,
retimer_fwnode_match);
return dev ? to_typec_retimer(dev) : ERR_PTR(-EPROBE_DEFER);
}
/**
* fwnode_typec_retimer_get - Find USB Type-C retimer.
* @fwnode: The caller device node.
*
* Finds a retimer linked to the caller. This function is primarily meant for the
* Type-C drivers. Returns a reference to the retimer on success, NULL if no
* matching connection was found, or ERR_PTR(-EPROBE_DEFER) when a connection
* was found but the retimer has not been enumerated yet.
*/
struct typec_retimer *fwnode_typec_retimer_get(struct fwnode_handle *fwnode)
{
struct typec_retimer *retimer;
retimer = fwnode_connection_find_match(fwnode, "retimer-switch", NULL, typec_retimer_match);
if (!IS_ERR_OR_NULL(retimer))
WARN_ON(!try_module_get(retimer->dev.parent->driver->owner));
return retimer;
}
EXPORT_SYMBOL_GPL(fwnode_typec_retimer_get);
/**
* typec_retimer_put - Release handle to a retimer.
* @retimer: USB Type-C Connector Retimer.
*
* Decrements reference count for @retimer.
*/
void typec_retimer_put(struct typec_retimer *retimer)
{
if (!IS_ERR_OR_NULL(retimer)) {
module_put(retimer->dev.parent->driver->owner);
put_device(&retimer->dev);
}
}
EXPORT_SYMBOL_GPL(typec_retimer_put);
int typec_retimer_set(struct typec_retimer *retimer, struct typec_retimer_state *state)
{
if (IS_ERR_OR_NULL(retimer))
return 0;
return retimer->set(retimer, state);
}
EXPORT_SYMBOL_GPL(typec_retimer_set);
static void typec_retimer_release(struct device *dev)
{
kfree(to_typec_retimer(dev));
}
const struct device_type typec_retimer_dev_type = {
.name = "typec_retimer",
.release = typec_retimer_release,
};
/**
* typec_retimer_register - Register a retimer device.
* @parent: Parent device.
* @desc: Retimer description.
*
* Some USB Type-C connectors have their physical lines routed through retimers before they
* reach muxes or host controllers. In some cases (for example: using alternate modes)
* these retimers need to be reconfigured appropriately. This function registers retimer
* switches which route and potentially modify the signals on the Type C physical lines
* enroute to the host controllers.
*/
struct typec_retimer *
typec_retimer_register(struct device *parent, const struct typec_retimer_desc *desc)
{
struct typec_retimer *retimer;
int ret;
if (!desc || !desc->set)
return ERR_PTR(-EINVAL);
retimer = kzalloc(sizeof(*retimer), GFP_KERNEL);
if (!retimer)
return ERR_PTR(-ENOMEM);
retimer->set = desc->set;
device_initialize(&retimer->dev);
retimer->dev.parent = parent;
retimer->dev.fwnode = desc->fwnode;
retimer->dev.class = &retimer_class;
retimer->dev.type = &typec_retimer_dev_type;
retimer->dev.driver_data = desc->drvdata;
dev_set_name(&retimer->dev, "%s-retimer",
desc->name ? desc->name : dev_name(parent));
ret = device_add(&retimer->dev);
if (ret) {
dev_err(parent, "failed to register retimer (%d)\n", ret);
put_device(&retimer->dev);
return ERR_PTR(ret);
}
return retimer;
}
EXPORT_SYMBOL_GPL(typec_retimer_register);
/**
* typec_retimer_unregister - Unregister retimer device.
* @retimer: USB Type-C Connector retimer.
*
* Unregister retimer that was registered with typec_retimer_register().
*/
void typec_retimer_unregister(struct typec_retimer *retimer)
{
if (!IS_ERR_OR_NULL(retimer))
device_unregister(&retimer->dev);
}
EXPORT_SYMBOL_GPL(typec_retimer_unregister);
void *typec_retimer_get_drvdata(struct typec_retimer *retimer)
{
return dev_get_drvdata(&retimer->dev);
}
EXPORT_SYMBOL_GPL(typec_retimer_get_drvdata);
struct class retimer_class = {
.name = "retimer",
};
| linux-master | drivers/usb/typec/retimer.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB Typec-C DisplayPort Alternate Mode driver
*
* Copyright (C) 2018 Intel Corporation
* Author: Heikki Krogerus <[email protected]>
*
* DisplayPort is trademark of VESA (www.vesa.org)
*/
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/usb/pd_vdo.h>
#include <linux/usb/typec_dp.h>
#include <drm/drm_connector.h>
#include "displayport.h"
#define DP_HEADER(_dp, ver, cmd) (VDO((_dp)->alt->svid, 1, ver, cmd) \
| VDO_OPOS(USB_TYPEC_DP_MODE))
enum {
DP_CONF_USB,
DP_CONF_DFP_D,
DP_CONF_UFP_D,
DP_CONF_DUAL_D,
};
/* Pin assignments that use USB3.1 Gen2 signaling to carry DP protocol */
#define DP_PIN_ASSIGN_GEN2_BR_MASK (BIT(DP_PIN_ASSIGN_A) | \
BIT(DP_PIN_ASSIGN_B))
/* Pin assignments that use DP v1.3 signaling to carry DP protocol */
#define DP_PIN_ASSIGN_DP_BR_MASK (BIT(DP_PIN_ASSIGN_C) | \
BIT(DP_PIN_ASSIGN_D) | \
BIT(DP_PIN_ASSIGN_E) | \
BIT(DP_PIN_ASSIGN_F))
/* DP only pin assignments */
#define DP_PIN_ASSIGN_DP_ONLY_MASK (BIT(DP_PIN_ASSIGN_A) | \
BIT(DP_PIN_ASSIGN_C) | \
BIT(DP_PIN_ASSIGN_E))
/* Pin assignments where one channel is for USB */
#define DP_PIN_ASSIGN_MULTI_FUNC_MASK (BIT(DP_PIN_ASSIGN_B) | \
BIT(DP_PIN_ASSIGN_D) | \
BIT(DP_PIN_ASSIGN_F))
enum dp_state {
DP_STATE_IDLE,
DP_STATE_ENTER,
DP_STATE_UPDATE,
DP_STATE_CONFIGURE,
DP_STATE_EXIT,
};
struct dp_altmode {
struct typec_displayport_data data;
enum dp_state state;
bool hpd;
bool pending_hpd;
struct mutex lock; /* device lock */
struct work_struct work;
struct typec_altmode *alt;
const struct typec_altmode *port;
struct fwnode_handle *connector_fwnode;
};
static int dp_altmode_notify(struct dp_altmode *dp)
{
unsigned long conf;
u8 state;
if (dp->data.conf) {
state = get_count_order(DP_CONF_GET_PIN_ASSIGN(dp->data.conf));
conf = TYPEC_MODAL_STATE(state);
} else {
conf = TYPEC_STATE_USB;
}
return typec_altmode_notify(dp->alt, conf, &dp->data);
}
static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
{
u32 conf = DP_CONF_SIGNALING_DP; /* Only DP signaling supported */
u8 pin_assign = 0;
switch (con) {
case DP_STATUS_CON_DISABLED:
return 0;
case DP_STATUS_CON_DFP_D:
conf |= DP_CONF_UFP_U_AS_DFP_D;
pin_assign = DP_CAP_UFP_D_PIN_ASSIGN(dp->alt->vdo) &
DP_CAP_DFP_D_PIN_ASSIGN(dp->port->vdo);
break;
case DP_STATUS_CON_UFP_D:
case DP_STATUS_CON_BOTH: /* NOTE: First acting as DP source */
conf |= DP_CONF_UFP_U_AS_UFP_D;
pin_assign = DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo) &
DP_CAP_PIN_ASSIGN_DFP_D(dp->port->vdo);
break;
default:
break;
}
/* Determining the initial pin assignment. */
if (!DP_CONF_GET_PIN_ASSIGN(dp->data.conf)) {
/* Is USB together with DP preferred */
if (dp->data.status & DP_STATUS_PREFER_MULTI_FUNC &&
pin_assign & DP_PIN_ASSIGN_MULTI_FUNC_MASK)
pin_assign &= DP_PIN_ASSIGN_MULTI_FUNC_MASK;
else if (pin_assign & DP_PIN_ASSIGN_DP_ONLY_MASK) {
pin_assign &= DP_PIN_ASSIGN_DP_ONLY_MASK;
/* Default to pin assign C if available */
if (pin_assign & BIT(DP_PIN_ASSIGN_C))
pin_assign = BIT(DP_PIN_ASSIGN_C);
}
if (!pin_assign)
return -EINVAL;
conf |= DP_CONF_SET_PIN_ASSIGN(pin_assign);
}
dp->data.conf = conf;
return 0;
}
static int dp_altmode_status_update(struct dp_altmode *dp)
{
bool configured = !!DP_CONF_GET_PIN_ASSIGN(dp->data.conf);
bool hpd = !!(dp->data.status & DP_STATUS_HPD_STATE);
u8 con = DP_STATUS_CONNECTION(dp->data.status);
int ret = 0;
if (configured && (dp->data.status & DP_STATUS_SWITCH_TO_USB)) {
dp->data.conf = 0;
dp->state = DP_STATE_CONFIGURE;
} else if (dp->data.status & DP_STATUS_EXIT_DP_MODE) {
dp->state = DP_STATE_EXIT;
} else if (!(con & DP_CONF_CURRENTLY(dp->data.conf))) {
ret = dp_altmode_configure(dp, con);
if (!ret) {
dp->state = DP_STATE_CONFIGURE;
if (dp->hpd != hpd) {
dp->hpd = hpd;
dp->pending_hpd = true;
}
}
} else {
if (dp->hpd != hpd) {
drm_connector_oob_hotplug_event(dp->connector_fwnode);
dp->hpd = hpd;
sysfs_notify(&dp->alt->dev.kobj, "displayport", "hpd");
}
}
return ret;
}
static int dp_altmode_configured(struct dp_altmode *dp)
{
sysfs_notify(&dp->alt->dev.kobj, "displayport", "configuration");
sysfs_notify(&dp->alt->dev.kobj, "displayport", "pin_assignment");
/*
* If the DFP_D/UFP_D sends a change in HPD when first notifying the
* DisplayPort driver that it is connected, then we wait until
* configuration is complete to signal HPD.
*/
if (dp->pending_hpd) {
drm_connector_oob_hotplug_event(dp->connector_fwnode);
sysfs_notify(&dp->alt->dev.kobj, "displayport", "hpd");
dp->pending_hpd = false;
}
return dp_altmode_notify(dp);
}
static int dp_altmode_configure_vdm(struct dp_altmode *dp, u32 conf)
{
int svdm_version = typec_altmode_get_svdm_version(dp->alt);
u32 header;
int ret;
if (svdm_version < 0)
return svdm_version;
header = DP_HEADER(dp, svdm_version, DP_CMD_CONFIGURE);
ret = typec_altmode_notify(dp->alt, TYPEC_STATE_SAFE, &dp->data);
if (ret) {
dev_err(&dp->alt->dev,
"unable to put to connector to safe mode\n");
return ret;
}
ret = typec_altmode_vdm(dp->alt, header, &conf, 2);
if (ret)
dp_altmode_notify(dp);
return ret;
}
static void dp_altmode_work(struct work_struct *work)
{
struct dp_altmode *dp = container_of(work, struct dp_altmode, work);
int svdm_version;
u32 header;
u32 vdo;
int ret;
mutex_lock(&dp->lock);
switch (dp->state) {
case DP_STATE_ENTER:
ret = typec_altmode_enter(dp->alt, NULL);
if (ret && ret != -EBUSY)
dev_err(&dp->alt->dev, "failed to enter mode\n");
break;
case DP_STATE_UPDATE:
svdm_version = typec_altmode_get_svdm_version(dp->alt);
if (svdm_version < 0)
break;
header = DP_HEADER(dp, svdm_version, DP_CMD_STATUS_UPDATE);
vdo = 1;
ret = typec_altmode_vdm(dp->alt, header, &vdo, 2);
if (ret)
dev_err(&dp->alt->dev,
"unable to send Status Update command (%d)\n",
ret);
break;
case DP_STATE_CONFIGURE:
ret = dp_altmode_configure_vdm(dp, dp->data.conf);
if (ret)
dev_err(&dp->alt->dev,
"unable to send Configure command (%d)\n", ret);
break;
case DP_STATE_EXIT:
if (typec_altmode_exit(dp->alt))
dev_err(&dp->alt->dev, "Exit Mode Failed!\n");
break;
default:
break;
}
dp->state = DP_STATE_IDLE;
mutex_unlock(&dp->lock);
}
static void dp_altmode_attention(struct typec_altmode *alt, const u32 vdo)
{
struct dp_altmode *dp = typec_altmode_get_drvdata(alt);
u8 old_state;
mutex_lock(&dp->lock);
old_state = dp->state;
dp->data.status = vdo;
if (old_state != DP_STATE_IDLE)
dev_warn(&alt->dev, "ATTENTION while processing state %d\n",
old_state);
if (dp_altmode_status_update(dp))
dev_warn(&alt->dev, "%s: status update failed\n", __func__);
if (dp_altmode_notify(dp))
dev_err(&alt->dev, "%s: notification failed\n", __func__);
if (old_state == DP_STATE_IDLE && dp->state != DP_STATE_IDLE)
schedule_work(&dp->work);
mutex_unlock(&dp->lock);
}
static int dp_altmode_vdm(struct typec_altmode *alt,
const u32 hdr, const u32 *vdo, int count)
{
struct dp_altmode *dp = typec_altmode_get_drvdata(alt);
int cmd_type = PD_VDO_CMDT(hdr);
int cmd = PD_VDO_CMD(hdr);
int ret = 0;
mutex_lock(&dp->lock);
if (dp->state != DP_STATE_IDLE) {
ret = -EBUSY;
goto err_unlock;
}
switch (cmd_type) {
case CMDT_RSP_ACK:
switch (cmd) {
case CMD_ENTER_MODE:
typec_altmode_update_active(alt, true);
dp->state = DP_STATE_UPDATE;
break;
case CMD_EXIT_MODE:
typec_altmode_update_active(alt, false);
dp->data.status = 0;
dp->data.conf = 0;
break;
case DP_CMD_STATUS_UPDATE:
dp->data.status = *vdo;
ret = dp_altmode_status_update(dp);
break;
case DP_CMD_CONFIGURE:
ret = dp_altmode_configured(dp);
break;
default:
break;
}
break;
case CMDT_RSP_NAK:
switch (cmd) {
case DP_CMD_CONFIGURE:
dp->data.conf = 0;
ret = dp_altmode_configured(dp);
break;
default:
break;
}
break;
default:
break;
}
if (dp->state != DP_STATE_IDLE)
schedule_work(&dp->work);
err_unlock:
mutex_unlock(&dp->lock);
return ret;
}
static int dp_altmode_activate(struct typec_altmode *alt, int activate)
{
return activate ? typec_altmode_enter(alt, NULL) :
typec_altmode_exit(alt);
}
static const struct typec_altmode_ops dp_altmode_ops = {
.attention = dp_altmode_attention,
.vdm = dp_altmode_vdm,
.activate = dp_altmode_activate,
};
static const char * const configurations[] = {
[DP_CONF_USB] = "USB",
[DP_CONF_DFP_D] = "source",
[DP_CONF_UFP_D] = "sink",
};
static ssize_t
configuration_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
struct dp_altmode *dp = dev_get_drvdata(dev);
u32 conf;
u32 cap;
int con;
int ret = 0;
con = sysfs_match_string(configurations, buf);
if (con < 0)
return con;
mutex_lock(&dp->lock);
if (dp->state != DP_STATE_IDLE) {
ret = -EBUSY;
goto err_unlock;
}
cap = DP_CAP_CAPABILITY(dp->alt->vdo);
if ((con == DP_CONF_DFP_D && !(cap & DP_CAP_DFP_D)) ||
(con == DP_CONF_UFP_D && !(cap & DP_CAP_UFP_D))) {
ret = -EINVAL;
goto err_unlock;
}
conf = dp->data.conf & ~DP_CONF_DUAL_D;
conf |= con;
if (dp->alt->active) {
ret = dp_altmode_configure_vdm(dp, conf);
if (ret)
goto err_unlock;
}
dp->data.conf = conf;
err_unlock:
mutex_unlock(&dp->lock);
return ret ? ret : size;
}
static ssize_t configuration_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dp_altmode *dp = dev_get_drvdata(dev);
int len;
u8 cap;
u8 cur;
int i;
mutex_lock(&dp->lock);
cap = DP_CAP_CAPABILITY(dp->alt->vdo);
cur = DP_CONF_CURRENTLY(dp->data.conf);
len = sprintf(buf, "%s ", cur ? "USB" : "[USB]");
for (i = 1; i < ARRAY_SIZE(configurations); i++) {
if (i == cur)
len += sprintf(buf + len, "[%s] ", configurations[i]);
else if ((i == DP_CONF_DFP_D && cap & DP_CAP_DFP_D) ||
(i == DP_CONF_UFP_D && cap & DP_CAP_UFP_D))
len += sprintf(buf + len, "%s ", configurations[i]);
}
mutex_unlock(&dp->lock);
buf[len - 1] = '\n';
return len;
}
static DEVICE_ATTR_RW(configuration);
static const char * const pin_assignments[] = {
[DP_PIN_ASSIGN_A] = "A",
[DP_PIN_ASSIGN_B] = "B",
[DP_PIN_ASSIGN_C] = "C",
[DP_PIN_ASSIGN_D] = "D",
[DP_PIN_ASSIGN_E] = "E",
[DP_PIN_ASSIGN_F] = "F",
};
/*
* Helper function to extract a peripheral's currently supported
* Pin Assignments from its DisplayPort alternate mode state.
*/
static u8 get_current_pin_assignments(struct dp_altmode *dp)
{
if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_UFP_U_AS_DFP_D)
return DP_CAP_PIN_ASSIGN_DFP_D(dp->alt->vdo);
else
return DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo);
}
static ssize_t
pin_assignment_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
struct dp_altmode *dp = dev_get_drvdata(dev);
u8 assignments;
u32 conf;
int ret;
ret = sysfs_match_string(pin_assignments, buf);
if (ret < 0)
return ret;
conf = DP_CONF_SET_PIN_ASSIGN(BIT(ret));
ret = 0;
mutex_lock(&dp->lock);
if (conf & dp->data.conf)
goto out_unlock;
if (dp->state != DP_STATE_IDLE) {
ret = -EBUSY;
goto out_unlock;
}
assignments = get_current_pin_assignments(dp);
if (!(DP_CONF_GET_PIN_ASSIGN(conf) & assignments)) {
ret = -EINVAL;
goto out_unlock;
}
conf |= dp->data.conf & ~DP_CONF_PIN_ASSIGNEMENT_MASK;
/* Only send Configure command if a configuration has been set */
if (dp->alt->active && DP_CONF_CURRENTLY(dp->data.conf)) {
ret = dp_altmode_configure_vdm(dp, conf);
if (ret)
goto out_unlock;
}
dp->data.conf = conf;
out_unlock:
mutex_unlock(&dp->lock);
return ret ? ret : size;
}
static ssize_t pin_assignment_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dp_altmode *dp = dev_get_drvdata(dev);
u8 assignments;
int len = 0;
u8 cur;
int i;
mutex_lock(&dp->lock);
cur = get_count_order(DP_CONF_GET_PIN_ASSIGN(dp->data.conf));
assignments = get_current_pin_assignments(dp);
for (i = 0; assignments; assignments >>= 1, i++) {
if (assignments & 1) {
if (i == cur)
len += sprintf(buf + len, "[%s] ",
pin_assignments[i]);
else
len += sprintf(buf + len, "%s ",
pin_assignments[i]);
}
}
mutex_unlock(&dp->lock);
/* get_current_pin_assignments can return 0 when no matching pin assignments are found */
if (len == 0)
len++;
buf[len - 1] = '\n';
return len;
}
static DEVICE_ATTR_RW(pin_assignment);
static ssize_t hpd_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dp_altmode *dp = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", dp->hpd);
}
static DEVICE_ATTR_RO(hpd);
static struct attribute *dp_altmode_attrs[] = {
&dev_attr_configuration.attr,
&dev_attr_pin_assignment.attr,
&dev_attr_hpd.attr,
NULL
};
static const struct attribute_group dp_altmode_group = {
.name = "displayport",
.attrs = dp_altmode_attrs,
};
int dp_altmode_probe(struct typec_altmode *alt)
{
const struct typec_altmode *port = typec_altmode_get_partner(alt);
struct fwnode_handle *fwnode;
struct dp_altmode *dp;
int ret;
/* FIXME: Port can only be DFP_U. */
/* Make sure we have compatiple pin configurations */
if (!(DP_CAP_PIN_ASSIGN_DFP_D(port->vdo) &
DP_CAP_PIN_ASSIGN_UFP_D(alt->vdo)) &&
!(DP_CAP_PIN_ASSIGN_UFP_D(port->vdo) &
DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo)))
return -ENODEV;
ret = sysfs_create_group(&alt->dev.kobj, &dp_altmode_group);
if (ret)
return ret;
dp = devm_kzalloc(&alt->dev, sizeof(*dp), GFP_KERNEL);
if (!dp)
return -ENOMEM;
INIT_WORK(&dp->work, dp_altmode_work);
mutex_init(&dp->lock);
dp->port = port;
dp->alt = alt;
alt->desc = "DisplayPort";
alt->ops = &dp_altmode_ops;
fwnode = dev_fwnode(alt->dev.parent->parent); /* typec_port fwnode */
if (fwnode_property_present(fwnode, "displayport"))
dp->connector_fwnode = fwnode_find_reference(fwnode, "displayport", 0);
else
dp->connector_fwnode = fwnode_handle_get(fwnode); /* embedded DP */
if (IS_ERR(dp->connector_fwnode))
dp->connector_fwnode = NULL;
typec_altmode_set_drvdata(alt, dp);
dp->state = DP_STATE_ENTER;
schedule_work(&dp->work);
return 0;
}
EXPORT_SYMBOL_GPL(dp_altmode_probe);
void dp_altmode_remove(struct typec_altmode *alt)
{
struct dp_altmode *dp = typec_altmode_get_drvdata(alt);
sysfs_remove_group(&alt->dev.kobj, &dp_altmode_group);
cancel_work_sync(&dp->work);
if (dp->connector_fwnode) {
if (dp->hpd)
drm_connector_oob_hotplug_event(dp->connector_fwnode);
fwnode_handle_put(dp->connector_fwnode);
}
}
EXPORT_SYMBOL_GPL(dp_altmode_remove);
static const struct typec_device_id dp_typec_id[] = {
{ USB_TYPEC_DP_SID, USB_TYPEC_DP_MODE },
{ },
};
MODULE_DEVICE_TABLE(typec, dp_typec_id);
static struct typec_altmode_driver dp_altmode_driver = {
.id_table = dp_typec_id,
.probe = dp_altmode_probe,
.remove = dp_altmode_remove,
.driver = {
.name = "typec_displayport",
.owner = THIS_MODULE,
},
};
module_typec_altmode_driver(dp_altmode_driver);
MODULE_AUTHOR("Heikki Krogerus <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DisplayPort Alternate Mode");
| linux-master | drivers/usb/typec/altmodes/displayport.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
*
* NVIDIA USB Type-C Alt Mode Driver
*/
#include <linux/module.h>
#include <linux/usb/typec_altmode.h>
#include <linux/usb/typec_dp.h>
#include "displayport.h"
static int nvidia_altmode_probe(struct typec_altmode *alt)
{
if (alt->svid == USB_TYPEC_NVIDIA_VLINK_SID)
return dp_altmode_probe(alt);
else
return -ENOTSUPP;
}
static void nvidia_altmode_remove(struct typec_altmode *alt)
{
if (alt->svid == USB_TYPEC_NVIDIA_VLINK_SID)
dp_altmode_remove(alt);
}
static const struct typec_device_id nvidia_typec_id[] = {
{ USB_TYPEC_NVIDIA_VLINK_SID, TYPEC_ANY_MODE },
{ },
};
MODULE_DEVICE_TABLE(typec, nvidia_typec_id);
static struct typec_altmode_driver nvidia_altmode_driver = {
.id_table = nvidia_typec_id,
.probe = nvidia_altmode_probe,
.remove = nvidia_altmode_remove,
.driver = {
.name = "typec_nvidia",
.owner = THIS_MODULE,
},
};
module_typec_altmode_driver(nvidia_altmode_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("NVIDIA USB Type-C Alt Mode Driver");
| linux-master | drivers/usb/typec/altmodes/nvidia.c |
// SPDX-License-Identifier: GPL-2.0
/*
* TI TPS6598x USB Power Delivery Controller Trace Support
*
* Copyright (C) 2021, Intel Corporation
* Author: Heikki Krogerus <[email protected]>
*/
#define CREATE_TRACE_POINTS
#include "trace.h"
| linux-master | drivers/usb/typec/tipd/trace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for TI TPS6598x USB Power Delivery controller family
*
* Copyright (C) 2017, Intel Corporation
* Author: Heikki Krogerus <[email protected]>
*/
#include <linux/i2c.h>
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/power_supply.h>
#include <linux/regmap.h>
#include <linux/interrupt.h>
#include <linux/usb/typec.h>
#include <linux/usb/typec_altmode.h>
#include <linux/usb/role.h>
#include <linux/workqueue.h>
#include "tps6598x.h"
#include "trace.h"
/* Register offsets */
#define TPS_REG_VID 0x00
#define TPS_REG_MODE 0x03
#define TPS_REG_CMD1 0x08
#define TPS_REG_DATA1 0x09
#define TPS_REG_INT_EVENT1 0x14
#define TPS_REG_INT_EVENT2 0x15
#define TPS_REG_INT_MASK1 0x16
#define TPS_REG_INT_MASK2 0x17
#define TPS_REG_INT_CLEAR1 0x18
#define TPS_REG_INT_CLEAR2 0x19
#define TPS_REG_SYSTEM_POWER_STATE 0x20
#define TPS_REG_STATUS 0x1a
#define TPS_REG_SYSTEM_CONF 0x28
#define TPS_REG_CTRL_CONF 0x29
#define TPS_REG_POWER_STATUS 0x3f
#define TPS_REG_RX_IDENTITY_SOP 0x48
#define TPS_REG_DATA_STATUS 0x5f
/* TPS_REG_SYSTEM_CONF bits */
#define TPS_SYSCONF_PORTINFO(c) ((c) & 7)
enum {
TPS_PORTINFO_SINK,
TPS_PORTINFO_SINK_ACCESSORY,
TPS_PORTINFO_DRP_UFP,
TPS_PORTINFO_DRP_UFP_DRD,
TPS_PORTINFO_DRP_DFP,
TPS_PORTINFO_DRP_DFP_DRD,
TPS_PORTINFO_SOURCE,
};
/* TPS_REG_RX_IDENTITY_SOP */
struct tps6598x_rx_identity_reg {
u8 status;
struct usb_pd_identity identity;
} __packed;
/* Standard Task return codes */
#define TPS_TASK_TIMEOUT 1
#define TPS_TASK_REJECTED 3
enum {
TPS_MODE_APP,
TPS_MODE_BOOT,
TPS_MODE_BIST,
TPS_MODE_DISC,
};
static const char *const modes[] = {
[TPS_MODE_APP] = "APP ",
[TPS_MODE_BOOT] = "BOOT",
[TPS_MODE_BIST] = "BIST",
[TPS_MODE_DISC] = "DISC",
};
/* Unrecognized commands will be replaced with "!CMD" */
#define INVALID_CMD(_cmd_) (_cmd_ == 0x444d4321)
struct tps6598x {
struct device *dev;
struct regmap *regmap;
struct mutex lock; /* device lock */
u8 i2c_protocol:1;
struct typec_port *port;
struct typec_partner *partner;
struct usb_pd_identity partner_identity;
struct usb_role_switch *role_sw;
struct typec_capability typec_cap;
struct power_supply *psy;
struct power_supply_desc psy_desc;
enum power_supply_usb_type usb_type;
int wakeup;
u16 pwr_status;
struct delayed_work wq_poll;
irq_handler_t irq_handler;
};
static enum power_supply_property tps6598x_psy_props[] = {
POWER_SUPPLY_PROP_USB_TYPE,
POWER_SUPPLY_PROP_ONLINE,
};
static enum power_supply_usb_type tps6598x_psy_usb_types[] = {
POWER_SUPPLY_USB_TYPE_C,
POWER_SUPPLY_USB_TYPE_PD,
};
static const char *tps6598x_psy_name_prefix = "tps6598x-source-psy-";
/*
* Max data bytes for Data1, Data2, and other registers. See ch 1.3.2:
* https://www.ti.com/lit/ug/slvuan1a/slvuan1a.pdf
*/
#define TPS_MAX_LEN 64
static int
tps6598x_block_read(struct tps6598x *tps, u8 reg, void *val, size_t len)
{
u8 data[TPS_MAX_LEN + 1];
int ret;
if (len + 1 > sizeof(data))
return -EINVAL;
if (!tps->i2c_protocol)
return regmap_raw_read(tps->regmap, reg, val, len);
ret = regmap_raw_read(tps->regmap, reg, data, len + 1);
if (ret)
return ret;
if (data[0] < len)
return -EIO;
memcpy(val, &data[1], len);
return 0;
}
static int tps6598x_block_write(struct tps6598x *tps, u8 reg,
const void *val, size_t len)
{
u8 data[TPS_MAX_LEN + 1];
if (len + 1 > sizeof(data))
return -EINVAL;
if (!tps->i2c_protocol)
return regmap_raw_write(tps->regmap, reg, val, len);
data[0] = len;
memcpy(&data[1], val, len);
return regmap_raw_write(tps->regmap, reg, data, len + 1);
}
static inline int tps6598x_read8(struct tps6598x *tps, u8 reg, u8 *val)
{
return tps6598x_block_read(tps, reg, val, sizeof(u8));
}
static inline int tps6598x_read16(struct tps6598x *tps, u8 reg, u16 *val)
{
return tps6598x_block_read(tps, reg, val, sizeof(u16));
}
static inline int tps6598x_read32(struct tps6598x *tps, u8 reg, u32 *val)
{
return tps6598x_block_read(tps, reg, val, sizeof(u32));
}
static inline int tps6598x_read64(struct tps6598x *tps, u8 reg, u64 *val)
{
return tps6598x_block_read(tps, reg, val, sizeof(u64));
}
static inline int tps6598x_write64(struct tps6598x *tps, u8 reg, u64 val)
{
return tps6598x_block_write(tps, reg, &val, sizeof(u64));
}
static inline int
tps6598x_write_4cc(struct tps6598x *tps, u8 reg, const char *val)
{
return tps6598x_block_write(tps, reg, val, 4);
}
static int tps6598x_read_partner_identity(struct tps6598x *tps)
{
struct tps6598x_rx_identity_reg id;
int ret;
ret = tps6598x_block_read(tps, TPS_REG_RX_IDENTITY_SOP,
&id, sizeof(id));
if (ret)
return ret;
tps->partner_identity = id.identity;
return 0;
}
static void tps6598x_set_data_role(struct tps6598x *tps,
enum typec_data_role role, bool connected)
{
enum usb_role role_val;
if (role == TYPEC_HOST)
role_val = USB_ROLE_HOST;
else
role_val = USB_ROLE_DEVICE;
if (!connected)
role_val = USB_ROLE_NONE;
usb_role_switch_set_role(tps->role_sw, role_val);
typec_set_data_role(tps->port, role);
}
static int tps6598x_connect(struct tps6598x *tps, u32 status)
{
struct typec_partner_desc desc;
enum typec_pwr_opmode mode;
int ret;
if (tps->partner)
return 0;
mode = TPS_POWER_STATUS_PWROPMODE(tps->pwr_status);
desc.usb_pd = mode == TYPEC_PWR_MODE_PD;
desc.accessory = TYPEC_ACCESSORY_NONE; /* XXX: handle accessories */
desc.identity = NULL;
if (desc.usb_pd) {
ret = tps6598x_read_partner_identity(tps);
if (ret)
return ret;
desc.identity = &tps->partner_identity;
}
typec_set_pwr_opmode(tps->port, mode);
typec_set_pwr_role(tps->port, TPS_STATUS_TO_TYPEC_PORTROLE(status));
typec_set_vconn_role(tps->port, TPS_STATUS_TO_TYPEC_VCONN(status));
if (TPS_STATUS_TO_UPSIDE_DOWN(status))
typec_set_orientation(tps->port, TYPEC_ORIENTATION_REVERSE);
else
typec_set_orientation(tps->port, TYPEC_ORIENTATION_NORMAL);
typec_set_mode(tps->port, TYPEC_STATE_USB);
tps6598x_set_data_role(tps, TPS_STATUS_TO_TYPEC_DATAROLE(status), true);
tps->partner = typec_register_partner(tps->port, &desc);
if (IS_ERR(tps->partner))
return PTR_ERR(tps->partner);
if (desc.identity)
typec_partner_set_identity(tps->partner);
power_supply_changed(tps->psy);
return 0;
}
static void tps6598x_disconnect(struct tps6598x *tps, u32 status)
{
if (!IS_ERR(tps->partner))
typec_unregister_partner(tps->partner);
tps->partner = NULL;
typec_set_pwr_opmode(tps->port, TYPEC_PWR_MODE_USB);
typec_set_pwr_role(tps->port, TPS_STATUS_TO_TYPEC_PORTROLE(status));
typec_set_vconn_role(tps->port, TPS_STATUS_TO_TYPEC_VCONN(status));
typec_set_orientation(tps->port, TYPEC_ORIENTATION_NONE);
typec_set_mode(tps->port, TYPEC_STATE_SAFE);
tps6598x_set_data_role(tps, TPS_STATUS_TO_TYPEC_DATAROLE(status), false);
power_supply_changed(tps->psy);
}
static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
size_t in_len, u8 *in_data,
size_t out_len, u8 *out_data)
{
unsigned long timeout;
u32 val;
int ret;
ret = tps6598x_read32(tps, TPS_REG_CMD1, &val);
if (ret)
return ret;
if (val && !INVALID_CMD(val))
return -EBUSY;
if (in_len) {
ret = tps6598x_block_write(tps, TPS_REG_DATA1,
in_data, in_len);
if (ret)
return ret;
}
ret = tps6598x_write_4cc(tps, TPS_REG_CMD1, cmd);
if (ret < 0)
return ret;
/* XXX: Using 1s for now, but it may not be enough for every command. */
timeout = jiffies + msecs_to_jiffies(1000);
do {
ret = tps6598x_read32(tps, TPS_REG_CMD1, &val);
if (ret)
return ret;
if (INVALID_CMD(val))
return -EINVAL;
if (time_is_before_jiffies(timeout))
return -ETIMEDOUT;
} while (val);
if (out_len) {
ret = tps6598x_block_read(tps, TPS_REG_DATA1,
out_data, out_len);
if (ret)
return ret;
val = out_data[0];
} else {
ret = tps6598x_block_read(tps, TPS_REG_DATA1, &val, sizeof(u8));
if (ret)
return ret;
}
switch (val) {
case TPS_TASK_TIMEOUT:
return -ETIMEDOUT;
case TPS_TASK_REJECTED:
return -EPERM;
default:
break;
}
return 0;
}
static int tps6598x_dr_set(struct typec_port *port, enum typec_data_role role)
{
const char *cmd = (role == TYPEC_DEVICE) ? "SWUF" : "SWDF";
struct tps6598x *tps = typec_get_drvdata(port);
u32 status;
int ret;
mutex_lock(&tps->lock);
ret = tps6598x_exec_cmd(tps, cmd, 0, NULL, 0, NULL);
if (ret)
goto out_unlock;
ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
if (ret)
goto out_unlock;
if (role != TPS_STATUS_TO_TYPEC_DATAROLE(status)) {
ret = -EPROTO;
goto out_unlock;
}
tps6598x_set_data_role(tps, role, true);
out_unlock:
mutex_unlock(&tps->lock);
return ret;
}
static int tps6598x_pr_set(struct typec_port *port, enum typec_role role)
{
const char *cmd = (role == TYPEC_SINK) ? "SWSk" : "SWSr";
struct tps6598x *tps = typec_get_drvdata(port);
u32 status;
int ret;
mutex_lock(&tps->lock);
ret = tps6598x_exec_cmd(tps, cmd, 0, NULL, 0, NULL);
if (ret)
goto out_unlock;
ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
if (ret)
goto out_unlock;
if (role != TPS_STATUS_TO_TYPEC_PORTROLE(status)) {
ret = -EPROTO;
goto out_unlock;
}
typec_set_pwr_role(tps->port, role);
out_unlock:
mutex_unlock(&tps->lock);
return ret;
}
static const struct typec_operations tps6598x_ops = {
.dr_set = tps6598x_dr_set,
.pr_set = tps6598x_pr_set,
};
static bool tps6598x_read_status(struct tps6598x *tps, u32 *status)
{
int ret;
ret = tps6598x_read32(tps, TPS_REG_STATUS, status);
if (ret) {
dev_err(tps->dev, "%s: failed to read status\n", __func__);
return false;
}
trace_tps6598x_status(*status);
return true;
}
static bool tps6598x_read_data_status(struct tps6598x *tps)
{
u32 data_status;
int ret;
ret = tps6598x_read32(tps, TPS_REG_DATA_STATUS, &data_status);
if (ret < 0) {
dev_err(tps->dev, "failed to read data status: %d\n", ret);
return false;
}
trace_tps6598x_data_status(data_status);
return true;
}
static bool tps6598x_read_power_status(struct tps6598x *tps)
{
u16 pwr_status;
int ret;
ret = tps6598x_read16(tps, TPS_REG_POWER_STATUS, &pwr_status);
if (ret < 0) {
dev_err(tps->dev, "failed to read power status: %d\n", ret);
return false;
}
tps->pwr_status = pwr_status;
trace_tps6598x_power_status(pwr_status);
return true;
}
static void tps6598x_handle_plug_event(struct tps6598x *tps, u32 status)
{
int ret;
if (status & TPS_STATUS_PLUG_PRESENT) {
ret = tps6598x_connect(tps, status);
if (ret)
dev_err(tps->dev, "failed to register partner\n");
} else {
tps6598x_disconnect(tps, status);
}
}
static irqreturn_t cd321x_interrupt(int irq, void *data)
{
struct tps6598x *tps = data;
u64 event = 0;
u32 status;
int ret;
mutex_lock(&tps->lock);
ret = tps6598x_read64(tps, TPS_REG_INT_EVENT1, &event);
if (ret) {
dev_err(tps->dev, "%s: failed to read events\n", __func__);
goto err_unlock;
}
trace_cd321x_irq(event);
if (!event)
goto err_unlock;
if (!tps6598x_read_status(tps, &status))
goto err_clear_ints;
if (event & APPLE_CD_REG_INT_POWER_STATUS_UPDATE)
if (!tps6598x_read_power_status(tps))
goto err_clear_ints;
if (event & APPLE_CD_REG_INT_DATA_STATUS_UPDATE)
if (!tps6598x_read_data_status(tps))
goto err_clear_ints;
/* Handle plug insert or removal */
if (event & APPLE_CD_REG_INT_PLUG_EVENT)
tps6598x_handle_plug_event(tps, status);
err_clear_ints:
tps6598x_write64(tps, TPS_REG_INT_CLEAR1, event);
err_unlock:
mutex_unlock(&tps->lock);
if (event)
return IRQ_HANDLED;
return IRQ_NONE;
}
static irqreturn_t tps6598x_interrupt(int irq, void *data)
{
struct tps6598x *tps = data;
u64 event1 = 0;
u64 event2 = 0;
u32 status;
int ret;
mutex_lock(&tps->lock);
ret = tps6598x_read64(tps, TPS_REG_INT_EVENT1, &event1);
ret |= tps6598x_read64(tps, TPS_REG_INT_EVENT2, &event2);
if (ret) {
dev_err(tps->dev, "%s: failed to read events\n", __func__);
goto err_unlock;
}
trace_tps6598x_irq(event1, event2);
if (!(event1 | event2))
goto err_unlock;
if (!tps6598x_read_status(tps, &status))
goto err_clear_ints;
if ((event1 | event2) & TPS_REG_INT_POWER_STATUS_UPDATE)
if (!tps6598x_read_power_status(tps))
goto err_clear_ints;
if ((event1 | event2) & TPS_REG_INT_DATA_STATUS_UPDATE)
if (!tps6598x_read_data_status(tps))
goto err_clear_ints;
/* Handle plug insert or removal */
if ((event1 | event2) & TPS_REG_INT_PLUG_EVENT)
tps6598x_handle_plug_event(tps, status);
err_clear_ints:
tps6598x_write64(tps, TPS_REG_INT_CLEAR1, event1);
tps6598x_write64(tps, TPS_REG_INT_CLEAR2, event2);
err_unlock:
mutex_unlock(&tps->lock);
if (event1 | event2)
return IRQ_HANDLED;
return IRQ_NONE;
}
/* Time interval for Polling */
#define POLL_INTERVAL 500 /* msecs */
static void tps6598x_poll_work(struct work_struct *work)
{
struct tps6598x *tps = container_of(to_delayed_work(work),
struct tps6598x, wq_poll);
tps->irq_handler(0, tps);
queue_delayed_work(system_power_efficient_wq,
&tps->wq_poll, msecs_to_jiffies(POLL_INTERVAL));
}
static int tps6598x_check_mode(struct tps6598x *tps)
{
char mode[5] = { };
int ret;
ret = tps6598x_read32(tps, TPS_REG_MODE, (void *)mode);
if (ret)
return ret;
switch (match_string(modes, ARRAY_SIZE(modes), mode)) {
case TPS_MODE_APP:
return 0;
case TPS_MODE_BOOT:
dev_warn(tps->dev, "dead-battery condition\n");
return 0;
case TPS_MODE_BIST:
case TPS_MODE_DISC:
default:
dev_err(tps->dev, "controller in unsupported mode \"%s\"\n",
mode);
break;
}
return -ENODEV;
}
static const struct regmap_config tps6598x_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0x7F,
};
static int tps6598x_psy_get_online(struct tps6598x *tps,
union power_supply_propval *val)
{
if (TPS_POWER_STATUS_CONNECTION(tps->pwr_status) &&
TPS_POWER_STATUS_SOURCESINK(tps->pwr_status)) {
val->intval = 1;
} else {
val->intval = 0;
}
return 0;
}
static int tps6598x_psy_get_prop(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct tps6598x *tps = power_supply_get_drvdata(psy);
int ret = 0;
switch (psp) {
case POWER_SUPPLY_PROP_USB_TYPE:
if (TPS_POWER_STATUS_PWROPMODE(tps->pwr_status) == TYPEC_PWR_MODE_PD)
val->intval = POWER_SUPPLY_USB_TYPE_PD;
else
val->intval = POWER_SUPPLY_USB_TYPE_C;
break;
case POWER_SUPPLY_PROP_ONLINE:
ret = tps6598x_psy_get_online(tps, val);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int cd321x_switch_power_state(struct tps6598x *tps, u8 target_state)
{
u8 state;
int ret;
ret = tps6598x_read8(tps, TPS_REG_SYSTEM_POWER_STATE, &state);
if (ret)
return ret;
if (state == target_state)
return 0;
ret = tps6598x_exec_cmd(tps, "SSPS", sizeof(u8), &target_state, 0, NULL);
if (ret)
return ret;
ret = tps6598x_read8(tps, TPS_REG_SYSTEM_POWER_STATE, &state);
if (ret)
return ret;
if (state != target_state)
return -EINVAL;
return 0;
}
static int devm_tps6598_psy_register(struct tps6598x *tps)
{
struct power_supply_config psy_cfg = {};
const char *port_dev_name = dev_name(tps->dev);
char *psy_name;
psy_cfg.drv_data = tps;
psy_cfg.fwnode = dev_fwnode(tps->dev);
psy_name = devm_kasprintf(tps->dev, GFP_KERNEL, "%s%s", tps6598x_psy_name_prefix,
port_dev_name);
if (!psy_name)
return -ENOMEM;
tps->psy_desc.name = psy_name;
tps->psy_desc.type = POWER_SUPPLY_TYPE_USB;
tps->psy_desc.usb_types = tps6598x_psy_usb_types;
tps->psy_desc.num_usb_types = ARRAY_SIZE(tps6598x_psy_usb_types);
tps->psy_desc.properties = tps6598x_psy_props;
tps->psy_desc.num_properties = ARRAY_SIZE(tps6598x_psy_props);
tps->psy_desc.get_property = tps6598x_psy_get_prop;
tps->usb_type = POWER_SUPPLY_USB_TYPE_C;
tps->psy = devm_power_supply_register(tps->dev, &tps->psy_desc,
&psy_cfg);
return PTR_ERR_OR_ZERO(tps->psy);
}
static int tps6598x_probe(struct i2c_client *client)
{
irq_handler_t irq_handler = tps6598x_interrupt;
struct device_node *np = client->dev.of_node;
struct typec_capability typec_cap = { };
struct tps6598x *tps;
struct fwnode_handle *fwnode;
u32 status;
u32 conf;
u32 vid;
int ret;
u64 mask1;
tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
if (!tps)
return -ENOMEM;
mutex_init(&tps->lock);
tps->dev = &client->dev;
tps->regmap = devm_regmap_init_i2c(client, &tps6598x_regmap_config);
if (IS_ERR(tps->regmap))
return PTR_ERR(tps->regmap);
ret = tps6598x_read32(tps, TPS_REG_VID, &vid);
if (ret < 0 || !vid)
return -ENODEV;
/*
* Checking can the adapter handle SMBus protocol. If it can not, the
* driver needs to take care of block reads separately.
*/
if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
tps->i2c_protocol = true;
if (np && of_device_is_compatible(np, "apple,cd321x")) {
/* Switch CD321X chips to the correct system power state */
ret = cd321x_switch_power_state(tps, TPS_SYSTEM_POWER_STATE_S0);
if (ret)
return ret;
/* CD321X chips have all interrupts masked initially */
mask1 = APPLE_CD_REG_INT_POWER_STATUS_UPDATE |
APPLE_CD_REG_INT_DATA_STATUS_UPDATE |
APPLE_CD_REG_INT_PLUG_EVENT;
irq_handler = cd321x_interrupt;
} else {
/* Enable power status, data status and plug event interrupts */
mask1 = TPS_REG_INT_POWER_STATUS_UPDATE |
TPS_REG_INT_DATA_STATUS_UPDATE |
TPS_REG_INT_PLUG_EVENT;
}
tps->irq_handler = irq_handler;
/* Make sure the controller has application firmware running */
ret = tps6598x_check_mode(tps);
if (ret)
return ret;
ret = tps6598x_write64(tps, TPS_REG_INT_MASK1, mask1);
if (ret)
return ret;
ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
if (ret < 0)
goto err_clear_mask;
trace_tps6598x_status(status);
ret = tps6598x_read32(tps, TPS_REG_SYSTEM_CONF, &conf);
if (ret < 0)
goto err_clear_mask;
/*
* This fwnode has a "compatible" property, but is never populated as a
* struct device. Instead we simply parse it to read the properties.
* This breaks fw_devlink=on. To maintain backward compatibility
* with existing DT files, we work around this by deleting any
* fwnode_links to/from this fwnode.
*/
fwnode = device_get_named_child_node(&client->dev, "connector");
if (fwnode)
fw_devlink_purge_absent_suppliers(fwnode);
tps->role_sw = fwnode_usb_role_switch_get(fwnode);
if (IS_ERR(tps->role_sw)) {
ret = PTR_ERR(tps->role_sw);
goto err_fwnode_put;
}
typec_cap.revision = USB_TYPEC_REV_1_2;
typec_cap.pd_revision = 0x200;
typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
typec_cap.driver_data = tps;
typec_cap.ops = &tps6598x_ops;
typec_cap.fwnode = fwnode;
switch (TPS_SYSCONF_PORTINFO(conf)) {
case TPS_PORTINFO_SINK_ACCESSORY:
case TPS_PORTINFO_SINK:
typec_cap.type = TYPEC_PORT_SNK;
typec_cap.data = TYPEC_PORT_UFP;
break;
case TPS_PORTINFO_DRP_UFP_DRD:
case TPS_PORTINFO_DRP_DFP_DRD:
typec_cap.type = TYPEC_PORT_DRP;
typec_cap.data = TYPEC_PORT_DRD;
break;
case TPS_PORTINFO_DRP_UFP:
typec_cap.type = TYPEC_PORT_DRP;
typec_cap.data = TYPEC_PORT_UFP;
break;
case TPS_PORTINFO_DRP_DFP:
typec_cap.type = TYPEC_PORT_DRP;
typec_cap.data = TYPEC_PORT_DFP;
break;
case TPS_PORTINFO_SOURCE:
typec_cap.type = TYPEC_PORT_SRC;
typec_cap.data = TYPEC_PORT_DFP;
break;
default:
ret = -ENODEV;
goto err_role_put;
}
ret = devm_tps6598_psy_register(tps);
if (ret)
goto err_role_put;
tps->port = typec_register_port(&client->dev, &typec_cap);
if (IS_ERR(tps->port)) {
ret = PTR_ERR(tps->port);
goto err_role_put;
}
if (status & TPS_STATUS_PLUG_PRESENT) {
ret = tps6598x_read16(tps, TPS_REG_POWER_STATUS, &tps->pwr_status);
if (ret < 0) {
dev_err(tps->dev, "failed to read power status: %d\n", ret);
goto err_unregister_port;
}
ret = tps6598x_connect(tps, status);
if (ret)
dev_err(&client->dev, "failed to register partner\n");
}
if (client->irq) {
ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
irq_handler,
IRQF_SHARED | IRQF_ONESHOT,
dev_name(&client->dev), tps);
} else {
dev_warn(tps->dev, "Unable to find the interrupt, switching to polling\n");
INIT_DELAYED_WORK(&tps->wq_poll, tps6598x_poll_work);
queue_delayed_work(system_power_efficient_wq, &tps->wq_poll,
msecs_to_jiffies(POLL_INTERVAL));
}
if (ret)
goto err_disconnect;
i2c_set_clientdata(client, tps);
fwnode_handle_put(fwnode);
tps->wakeup = device_property_read_bool(tps->dev, "wakeup-source");
if (tps->wakeup && client->irq) {
device_init_wakeup(&client->dev, true);
enable_irq_wake(client->irq);
}
return 0;
err_disconnect:
tps6598x_disconnect(tps, 0);
err_unregister_port:
typec_unregister_port(tps->port);
err_role_put:
usb_role_switch_put(tps->role_sw);
err_fwnode_put:
fwnode_handle_put(fwnode);
err_clear_mask:
tps6598x_write64(tps, TPS_REG_INT_MASK1, 0);
return ret;
}
static void tps6598x_remove(struct i2c_client *client)
{
struct tps6598x *tps = i2c_get_clientdata(client);
if (!client->irq)
cancel_delayed_work_sync(&tps->wq_poll);
tps6598x_disconnect(tps, 0);
typec_unregister_port(tps->port);
usb_role_switch_put(tps->role_sw);
}
static int __maybe_unused tps6598x_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct tps6598x *tps = i2c_get_clientdata(client);
if (tps->wakeup) {
disable_irq(client->irq);
enable_irq_wake(client->irq);
}
if (!client->irq)
cancel_delayed_work_sync(&tps->wq_poll);
return 0;
}
static int __maybe_unused tps6598x_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct tps6598x *tps = i2c_get_clientdata(client);
if (tps->wakeup) {
disable_irq_wake(client->irq);
enable_irq(client->irq);
}
if (!client->irq)
queue_delayed_work(system_power_efficient_wq, &tps->wq_poll,
msecs_to_jiffies(POLL_INTERVAL));
return 0;
}
static const struct dev_pm_ops tps6598x_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tps6598x_suspend, tps6598x_resume)
};
static const struct of_device_id tps6598x_of_match[] = {
{ .compatible = "ti,tps6598x", },
{ .compatible = "apple,cd321x", },
{}
};
MODULE_DEVICE_TABLE(of, tps6598x_of_match);
static const struct i2c_device_id tps6598x_id[] = {
{ "tps6598x" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tps6598x_id);
static struct i2c_driver tps6598x_i2c_driver = {
.driver = {
.name = "tps6598x",
.pm = &tps6598x_pm_ops,
.of_match_table = tps6598x_of_match,
},
.probe = tps6598x_probe,
.remove = tps6598x_remove,
.id_table = tps6598x_id,
};
module_i2c_driver(tps6598x_i2c_driver);
MODULE_AUTHOR("Heikki Krogerus <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("TI TPS6598x USB Power Delivery Controller Driver");
| linux-master | drivers/usb/typec/tipd/core.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2016-2017 Google, Inc
*
* Fairchild FUSB302 Type-C Chip Driver
*/
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/extcon.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/proc_fs.h>
#include <linux/regulator/consumer.h>
#include <linux/sched/clock.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/usb.h>
#include <linux/usb/typec.h>
#include <linux/usb/tcpm.h>
#include <linux/usb/pd.h>
#include <linux/workqueue.h>
#include "fusb302_reg.h"
/*
* When the device is SNK, BC_LVL interrupt is used to monitor cc pins
* for the current capability offered by the SRC. As FUSB302 chip fires
* the BC_LVL interrupt on PD signalings, cc lvl should be handled after
* a delay to avoid measuring on PD activities. The delay is slightly
* longer than PD_T_PD_DEBPUNCE (10-20ms).
*/
#define T_BC_LVL_DEBOUNCE_DELAY_MS 30
enum toggling_mode {
TOGGLING_MODE_OFF,
TOGGLING_MODE_DRP,
TOGGLING_MODE_SNK,
TOGGLING_MODE_SRC,
};
enum src_current_status {
SRC_CURRENT_DEFAULT,
SRC_CURRENT_MEDIUM,
SRC_CURRENT_HIGH,
};
static const u8 ra_mda_value[] = {
[SRC_CURRENT_DEFAULT] = 4, /* 210mV */
[SRC_CURRENT_MEDIUM] = 9, /* 420mV */
[SRC_CURRENT_HIGH] = 18, /* 798mV */
};
static const u8 rd_mda_value[] = {
[SRC_CURRENT_DEFAULT] = 38, /* 1638mV */
[SRC_CURRENT_MEDIUM] = 38, /* 1638mV */
[SRC_CURRENT_HIGH] = 61, /* 2604mV */
};
#define LOG_BUFFER_ENTRIES 1024
#define LOG_BUFFER_ENTRY_SIZE 128
struct fusb302_chip {
struct device *dev;
struct i2c_client *i2c_client;
struct tcpm_port *tcpm_port;
struct tcpc_dev tcpc_dev;
struct regulator *vbus;
spinlock_t irq_lock;
struct work_struct irq_work;
bool irq_suspended;
bool irq_while_suspended;
struct gpio_desc *gpio_int_n;
int gpio_int_n_irq;
struct extcon_dev *extcon;
struct workqueue_struct *wq;
struct delayed_work bc_lvl_handler;
/* lock for sharing chip states */
struct mutex lock;
/* chip status */
enum toggling_mode toggling_mode;
enum src_current_status src_current_status;
bool intr_togdone;
bool intr_bc_lvl;
bool intr_comp_chng;
/* port status */
bool vconn_on;
bool vbus_on;
bool charge_on;
bool vbus_present;
enum typec_cc_polarity cc_polarity;
enum typec_cc_status cc1;
enum typec_cc_status cc2;
u32 snk_pdo[PDO_MAX_OBJECTS];
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
/* lock for log buffer access */
struct mutex logbuffer_lock;
int logbuffer_head;
int logbuffer_tail;
u8 *logbuffer[LOG_BUFFER_ENTRIES];
#endif
};
/*
* Logging
*/
#ifdef CONFIG_DEBUG_FS
static bool fusb302_log_full(struct fusb302_chip *chip)
{
return chip->logbuffer_tail ==
(chip->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
}
__printf(2, 0)
static void _fusb302_log(struct fusb302_chip *chip, const char *fmt,
va_list args)
{
char tmpbuffer[LOG_BUFFER_ENTRY_SIZE];
u64 ts_nsec = local_clock();
unsigned long rem_nsec;
if (!chip->logbuffer[chip->logbuffer_head]) {
chip->logbuffer[chip->logbuffer_head] =
kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
if (!chip->logbuffer[chip->logbuffer_head])
return;
}
vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
mutex_lock(&chip->logbuffer_lock);
if (fusb302_log_full(chip)) {
chip->logbuffer_head = max(chip->logbuffer_head - 1, 0);
strscpy(tmpbuffer, "overflow", sizeof(tmpbuffer));
}
if (chip->logbuffer_head < 0 ||
chip->logbuffer_head >= LOG_BUFFER_ENTRIES) {
dev_warn(chip->dev,
"Bad log buffer index %d\n", chip->logbuffer_head);
goto abort;
}
if (!chip->logbuffer[chip->logbuffer_head]) {
dev_warn(chip->dev,
"Log buffer index %d is NULL\n", chip->logbuffer_head);
goto abort;
}
rem_nsec = do_div(ts_nsec, 1000000000);
scnprintf(chip->logbuffer[chip->logbuffer_head],
LOG_BUFFER_ENTRY_SIZE, "[%5lu.%06lu] %s",
(unsigned long)ts_nsec, rem_nsec / 1000,
tmpbuffer);
chip->logbuffer_head = (chip->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
abort:
mutex_unlock(&chip->logbuffer_lock);
}
__printf(2, 3)
static void fusb302_log(struct fusb302_chip *chip, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
_fusb302_log(chip, fmt, args);
va_end(args);
}
static int fusb302_debug_show(struct seq_file *s, void *v)
{
struct fusb302_chip *chip = s->private;
int tail;
mutex_lock(&chip->logbuffer_lock);
tail = chip->logbuffer_tail;
while (tail != chip->logbuffer_head) {
seq_printf(s, "%s\n", chip->logbuffer[tail]);
tail = (tail + 1) % LOG_BUFFER_ENTRIES;
}
if (!seq_has_overflowed(s))
chip->logbuffer_tail = tail;
mutex_unlock(&chip->logbuffer_lock);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(fusb302_debug);
static void fusb302_debugfs_init(struct fusb302_chip *chip)
{
char name[NAME_MAX];
mutex_init(&chip->logbuffer_lock);
snprintf(name, NAME_MAX, "fusb302-%s", dev_name(chip->dev));
chip->dentry = debugfs_create_dir(name, usb_debug_root);
debugfs_create_file("log", S_IFREG | 0444, chip->dentry, chip,
&fusb302_debug_fops);
}
static void fusb302_debugfs_exit(struct fusb302_chip *chip)
{
debugfs_remove(chip->dentry);
}
#else
static void fusb302_log(const struct fusb302_chip *chip,
const char *fmt, ...) { }
static void fusb302_debugfs_init(const struct fusb302_chip *chip) { }
static void fusb302_debugfs_exit(const struct fusb302_chip *chip) { }
#endif
static int fusb302_i2c_write(struct fusb302_chip *chip,
u8 address, u8 data)
{
int ret = 0;
ret = i2c_smbus_write_byte_data(chip->i2c_client, address, data);
if (ret < 0)
fusb302_log(chip, "cannot write 0x%02x to 0x%02x, ret=%d",
data, address, ret);
return ret;
}
static int fusb302_i2c_block_write(struct fusb302_chip *chip, u8 address,
u8 length, const u8 *data)
{
int ret = 0;
if (length <= 0)
return ret;
ret = i2c_smbus_write_i2c_block_data(chip->i2c_client, address,
length, data);
if (ret < 0)
fusb302_log(chip, "cannot block write 0x%02x, len=%d, ret=%d",
address, length, ret);
return ret;
}
static int fusb302_i2c_read(struct fusb302_chip *chip,
u8 address, u8 *data)
{
int ret = 0;
ret = i2c_smbus_read_byte_data(chip->i2c_client, address);
*data = (u8)ret;
if (ret < 0)
fusb302_log(chip, "cannot read %02x, ret=%d", address, ret);
return ret;
}
static int fusb302_i2c_block_read(struct fusb302_chip *chip, u8 address,
u8 length, u8 *data)
{
int ret = 0;
if (length <= 0)
return ret;
ret = i2c_smbus_read_i2c_block_data(chip->i2c_client, address,
length, data);
if (ret < 0) {
fusb302_log(chip, "cannot block read 0x%02x, len=%d, ret=%d",
address, length, ret);
goto done;
}
if (ret != length) {
fusb302_log(chip, "only read %d/%d bytes from 0x%02x",
ret, length, address);
ret = -EIO;
}
done:
return ret;
}
static int fusb302_i2c_mask_write(struct fusb302_chip *chip, u8 address,
u8 mask, u8 value)
{
int ret = 0;
u8 data;
ret = fusb302_i2c_read(chip, address, &data);
if (ret < 0)
return ret;
data &= ~mask;
data |= value;
ret = fusb302_i2c_write(chip, address, data);
if (ret < 0)
return ret;
return ret;
}
static int fusb302_i2c_set_bits(struct fusb302_chip *chip, u8 address,
u8 set_bits)
{
return fusb302_i2c_mask_write(chip, address, 0x00, set_bits);
}
static int fusb302_i2c_clear_bits(struct fusb302_chip *chip, u8 address,
u8 clear_bits)
{
return fusb302_i2c_mask_write(chip, address, clear_bits, 0x00);
}
static int fusb302_sw_reset(struct fusb302_chip *chip)
{
int ret = 0;
ret = fusb302_i2c_write(chip, FUSB_REG_RESET,
FUSB_REG_RESET_SW_RESET);
if (ret < 0)
fusb302_log(chip, "cannot sw reset the chip, ret=%d", ret);
else
fusb302_log(chip, "sw reset");
return ret;
}
static int fusb302_enable_tx_auto_retries(struct fusb302_chip *chip, u8 retry_count)
{
int ret = 0;
ret = fusb302_i2c_set_bits(chip, FUSB_REG_CONTROL3, retry_count |
FUSB_REG_CONTROL3_AUTO_RETRY);
return ret;
}
/*
* initialize interrupt on the chip
* - unmasked interrupt: VBUS_OK
*/
static int fusb302_init_interrupt(struct fusb302_chip *chip)
{
int ret = 0;
ret = fusb302_i2c_write(chip, FUSB_REG_MASK,
0xFF & ~FUSB_REG_MASK_VBUSOK);
if (ret < 0)
return ret;
ret = fusb302_i2c_write(chip, FUSB_REG_MASKA, 0xFF);
if (ret < 0)
return ret;
ret = fusb302_i2c_write(chip, FUSB_REG_MASKB, 0xFF);
if (ret < 0)
return ret;
ret = fusb302_i2c_clear_bits(chip, FUSB_REG_CONTROL0,
FUSB_REG_CONTROL0_INT_MASK);
if (ret < 0)
return ret;
return ret;
}
static int fusb302_set_power_mode(struct fusb302_chip *chip, u8 power_mode)
{
int ret = 0;
ret = fusb302_i2c_write(chip, FUSB_REG_POWER, power_mode);
return ret;
}
static int tcpm_init(struct tcpc_dev *dev)
{
struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
tcpc_dev);
int ret = 0;
u8 data;
ret = fusb302_sw_reset(chip);
if (ret < 0)
return ret;
ret = fusb302_enable_tx_auto_retries(chip, FUSB_REG_CONTROL3_N_RETRIES_3);
if (ret < 0)
return ret;
ret = fusb302_init_interrupt(chip);
if (ret < 0)
return ret;
ret = fusb302_set_power_mode(chip, FUSB_REG_POWER_PWR_ALL);
if (ret < 0)
return ret;
ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &data);
if (ret < 0)
return ret;
chip->vbus_present = !!(data & FUSB_REG_STATUS0_VBUSOK);
ret = fusb302_i2c_read(chip, FUSB_REG_DEVICE_ID, &data);
if (ret < 0)
return ret;
fusb302_log(chip, "fusb302 device ID: 0x%02x", data);
return ret;
}
static int tcpm_get_vbus(struct tcpc_dev *dev)
{
struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
tcpc_dev);
int ret = 0;
mutex_lock(&chip->lock);
ret = chip->vbus_present ? 1 : 0;
mutex_unlock(&chip->lock);
return ret;
}
static int tcpm_get_current_limit(struct tcpc_dev *dev)
{
struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
tcpc_dev);
int current_limit = 0;
unsigned long timeout;
if (!chip->extcon)
return 0;
/*
* USB2 Charger detection may still be in progress when we get here,
* this can take upto 600ms, wait 800ms max.
*/
timeout = jiffies + msecs_to_jiffies(800);
do {
if (extcon_get_state(chip->extcon, EXTCON_CHG_USB_SDP) == 1)
current_limit = 500;
if (extcon_get_state(chip->extcon, EXTCON_CHG_USB_CDP) == 1 ||
extcon_get_state(chip->extcon, EXTCON_CHG_USB_ACA) == 1)
current_limit = 1500;
if (extcon_get_state(chip->extcon, EXTCON_CHG_USB_DCP) == 1)
current_limit = 2000;
msleep(50);
} while (current_limit == 0 && time_before(jiffies, timeout));
return current_limit;
}
static int fusb302_set_src_current(struct fusb302_chip *chip,
enum src_current_status status)
{
int ret = 0;
chip->src_current_status = status;
switch (status) {
case SRC_CURRENT_DEFAULT:
ret = fusb302_i2c_mask_write(chip, FUSB_REG_CONTROL0,
FUSB_REG_CONTROL0_HOST_CUR_MASK,
FUSB_REG_CONTROL0_HOST_CUR_DEF);
break;
case SRC_CURRENT_MEDIUM:
ret = fusb302_i2c_mask_write(chip, FUSB_REG_CONTROL0,
FUSB_REG_CONTROL0_HOST_CUR_MASK,
FUSB_REG_CONTROL0_HOST_CUR_MED);
break;
case SRC_CURRENT_HIGH:
ret = fusb302_i2c_mask_write(chip, FUSB_REG_CONTROL0,
FUSB_REG_CONTROL0_HOST_CUR_MASK,
FUSB_REG_CONTROL0_HOST_CUR_HIGH);
break;
default:
break;
}
return ret;
}
static int fusb302_set_toggling(struct fusb302_chip *chip,
enum toggling_mode mode)
{
int ret = 0;
/* first disable toggling */
ret = fusb302_i2c_clear_bits(chip, FUSB_REG_CONTROL2,
FUSB_REG_CONTROL2_TOGGLE);
if (ret < 0)
return ret;
/* mask interrupts for SRC or SNK */
ret = fusb302_i2c_set_bits(chip, FUSB_REG_MASK,
FUSB_REG_MASK_BC_LVL |
FUSB_REG_MASK_COMP_CHNG);
if (ret < 0)
return ret;
chip->intr_bc_lvl = false;
chip->intr_comp_chng = false;
/* configure toggling mode: none/snk/src/drp */
switch (mode) {
case TOGGLING_MODE_OFF:
ret = fusb302_i2c_mask_write(chip, FUSB_REG_CONTROL2,
FUSB_REG_CONTROL2_MODE_MASK,
FUSB_REG_CONTROL2_MODE_NONE);
if (ret < 0)
return ret;
break;
case TOGGLING_MODE_SNK:
ret = fusb302_i2c_mask_write(chip, FUSB_REG_CONTROL2,
FUSB_REG_CONTROL2_MODE_MASK,
FUSB_REG_CONTROL2_MODE_UFP);
if (ret < 0)
return ret;
break;
case TOGGLING_MODE_SRC:
ret = fusb302_i2c_mask_write(chip, FUSB_REG_CONTROL2,
FUSB_REG_CONTROL2_MODE_MASK,
FUSB_REG_CONTROL2_MODE_DFP);
if (ret < 0)
return ret;
break;
case TOGGLING_MODE_DRP:
ret = fusb302_i2c_mask_write(chip, FUSB_REG_CONTROL2,
FUSB_REG_CONTROL2_MODE_MASK,
FUSB_REG_CONTROL2_MODE_DRP);
if (ret < 0)
return ret;
break;
default:
break;
}
if (mode == TOGGLING_MODE_OFF) {
/* mask TOGDONE interrupt */
ret = fusb302_i2c_set_bits(chip, FUSB_REG_MASKA,
FUSB_REG_MASKA_TOGDONE);
if (ret < 0)
return ret;
chip->intr_togdone = false;
} else {
/* Datasheet says vconn MUST be off when toggling */
WARN(chip->vconn_on, "Vconn is on during toggle start");
/* unmask TOGDONE interrupt */
ret = fusb302_i2c_clear_bits(chip, FUSB_REG_MASKA,
FUSB_REG_MASKA_TOGDONE);
if (ret < 0)
return ret;
chip->intr_togdone = true;
/* start toggling */
ret = fusb302_i2c_set_bits(chip, FUSB_REG_CONTROL2,
FUSB_REG_CONTROL2_TOGGLE);
if (ret < 0)
return ret;
/* during toggling, consider cc as Open */
chip->cc1 = TYPEC_CC_OPEN;
chip->cc2 = TYPEC_CC_OPEN;
}
chip->toggling_mode = mode;
return ret;
}
static const char * const typec_cc_status_name[] = {
[TYPEC_CC_OPEN] = "Open",
[TYPEC_CC_RA] = "Ra",
[TYPEC_CC_RD] = "Rd",
[TYPEC_CC_RP_DEF] = "Rp-def",
[TYPEC_CC_RP_1_5] = "Rp-1.5",
[TYPEC_CC_RP_3_0] = "Rp-3.0",
};
static const enum src_current_status cc_src_current[] = {
[TYPEC_CC_OPEN] = SRC_CURRENT_DEFAULT,
[TYPEC_CC_RA] = SRC_CURRENT_DEFAULT,
[TYPEC_CC_RD] = SRC_CURRENT_DEFAULT,
[TYPEC_CC_RP_DEF] = SRC_CURRENT_DEFAULT,
[TYPEC_CC_RP_1_5] = SRC_CURRENT_MEDIUM,
[TYPEC_CC_RP_3_0] = SRC_CURRENT_HIGH,
};
static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc)
{
struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
tcpc_dev);
u8 switches0_mask = FUSB_REG_SWITCHES0_CC1_PU_EN |
FUSB_REG_SWITCHES0_CC2_PU_EN |
FUSB_REG_SWITCHES0_CC1_PD_EN |
FUSB_REG_SWITCHES0_CC2_PD_EN;
u8 rd_mda, switches0_data = 0x00;
int ret = 0;
mutex_lock(&chip->lock);
switch (cc) {
case TYPEC_CC_OPEN:
break;
case TYPEC_CC_RD:
switches0_data |= FUSB_REG_SWITCHES0_CC1_PD_EN |
FUSB_REG_SWITCHES0_CC2_PD_EN;
break;
case TYPEC_CC_RP_DEF:
case TYPEC_CC_RP_1_5:
case TYPEC_CC_RP_3_0:
switches0_data |= (chip->cc_polarity == TYPEC_POLARITY_CC1) ?
FUSB_REG_SWITCHES0_CC1_PU_EN :
FUSB_REG_SWITCHES0_CC2_PU_EN;
break;
default:
fusb302_log(chip, "unsupported cc value %s",
typec_cc_status_name[cc]);
ret = -EINVAL;
goto done;
}
fusb302_log(chip, "cc := %s", typec_cc_status_name[cc]);
ret = fusb302_set_toggling(chip, TOGGLING_MODE_OFF);
if (ret < 0) {
fusb302_log(chip, "cannot set toggling mode, ret=%d", ret);
goto done;
}
ret = fusb302_i2c_mask_write(chip, FUSB_REG_SWITCHES0,
switches0_mask, switches0_data);
if (ret < 0) {
fusb302_log(chip, "cannot set pull-up/-down, ret = %d", ret);
goto done;
}
/* reset the cc status */
chip->cc1 = TYPEC_CC_OPEN;
chip->cc2 = TYPEC_CC_OPEN;
/* adjust current for SRC */
ret = fusb302_set_src_current(chip, cc_src_current[cc]);
if (ret < 0) {
fusb302_log(chip, "cannot set src current %s, ret=%d",
typec_cc_status_name[cc], ret);
goto done;
}
/* enable/disable interrupts, BC_LVL for SNK and COMP_CHNG for SRC */
switch (cc) {
case TYPEC_CC_RP_DEF:
case TYPEC_CC_RP_1_5:
case TYPEC_CC_RP_3_0:
rd_mda = rd_mda_value[cc_src_current[cc]];
ret = fusb302_i2c_write(chip, FUSB_REG_MEASURE, rd_mda);
if (ret < 0) {
fusb302_log(chip,
"cannot set SRC measure value, ret=%d",
ret);
goto done;
}
ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK,
FUSB_REG_MASK_BC_LVL |
FUSB_REG_MASK_COMP_CHNG,
FUSB_REG_MASK_BC_LVL);
if (ret < 0) {
fusb302_log(chip, "cannot set SRC interrupt, ret=%d",
ret);
goto done;
}
chip->intr_comp_chng = true;
chip->intr_bc_lvl = false;
break;
case TYPEC_CC_RD:
ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK,
FUSB_REG_MASK_BC_LVL |
FUSB_REG_MASK_COMP_CHNG,
FUSB_REG_MASK_COMP_CHNG);
if (ret < 0) {
fusb302_log(chip, "cannot set SRC interrupt, ret=%d",
ret);
goto done;
}
chip->intr_bc_lvl = true;
chip->intr_comp_chng = false;
break;
default:
break;
}
done:
mutex_unlock(&chip->lock);
return ret;
}
static int tcpm_get_cc(struct tcpc_dev *dev, enum typec_cc_status *cc1,
enum typec_cc_status *cc2)
{
struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
tcpc_dev);
mutex_lock(&chip->lock);
*cc1 = chip->cc1;
*cc2 = chip->cc2;
fusb302_log(chip, "cc1=%s, cc2=%s", typec_cc_status_name[*cc1],
typec_cc_status_name[*cc2]);
mutex_unlock(&chip->lock);
return 0;
}
static int tcpm_set_polarity(struct tcpc_dev *dev,
enum typec_cc_polarity polarity)
{
return 0;
}
static int tcpm_set_vconn(struct tcpc_dev *dev, bool on)
{
struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
tcpc_dev);
int ret = 0;
u8 switches0_data = 0x00;
u8 switches0_mask = FUSB_REG_SWITCHES0_VCONN_CC1 |
FUSB_REG_SWITCHES0_VCONN_CC2;
mutex_lock(&chip->lock);
if (chip->vconn_on == on) {
fusb302_log(chip, "vconn is already %s", on ? "On" : "Off");
goto done;
}
if (on) {
switches0_data = (chip->cc_polarity == TYPEC_POLARITY_CC1) ?
FUSB_REG_SWITCHES0_VCONN_CC2 :
FUSB_REG_SWITCHES0_VCONN_CC1;
}
ret = fusb302_i2c_mask_write(chip, FUSB_REG_SWITCHES0,
switches0_mask, switches0_data);
if (ret < 0)
goto done;
chip->vconn_on = on;
fusb302_log(chip, "vconn := %s", on ? "On" : "Off");
done:
mutex_unlock(&chip->lock);
return ret;
}
static int tcpm_set_vbus(struct tcpc_dev *dev, bool on, bool charge)
{
struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
tcpc_dev);
int ret = 0;
mutex_lock(&chip->lock);
if (chip->vbus_on == on) {
fusb302_log(chip, "vbus is already %s", on ? "On" : "Off");
} else {
if (on)
ret = regulator_enable(chip->vbus);
else
ret = regulator_disable(chip->vbus);
if (ret < 0) {
fusb302_log(chip, "cannot %s vbus regulator, ret=%d",
on ? "enable" : "disable", ret);
goto done;
}
chip->vbus_on = on;
fusb302_log(chip, "vbus := %s", on ? "On" : "Off");
}
if (chip->charge_on == charge)
fusb302_log(chip, "charge is already %s",
charge ? "On" : "Off");
else
chip->charge_on = charge;
done:
mutex_unlock(&chip->lock);
return ret;
}
static int fusb302_pd_tx_flush(struct fusb302_chip *chip)
{
return fusb302_i2c_set_bits(chip, FUSB_REG_CONTROL0,
FUSB_REG_CONTROL0_TX_FLUSH);
}
static int fusb302_pd_rx_flush(struct fusb302_chip *chip)
{
return fusb302_i2c_set_bits(chip, FUSB_REG_CONTROL1,
FUSB_REG_CONTROL1_RX_FLUSH);
}
static int fusb302_pd_set_auto_goodcrc(struct fusb302_chip *chip, bool on)
{
if (on)
return fusb302_i2c_set_bits(chip, FUSB_REG_SWITCHES1,
FUSB_REG_SWITCHES1_AUTO_GCRC);
return fusb302_i2c_clear_bits(chip, FUSB_REG_SWITCHES1,
FUSB_REG_SWITCHES1_AUTO_GCRC);
}
static int fusb302_pd_set_interrupts(struct fusb302_chip *chip, bool on)
{
int ret = 0;
u8 mask_interrupts = FUSB_REG_MASK_COLLISION;
u8 maska_interrupts = FUSB_REG_MASKA_RETRYFAIL |
FUSB_REG_MASKA_HARDSENT |
FUSB_REG_MASKA_TX_SUCCESS |
FUSB_REG_MASKA_HARDRESET;
u8 maskb_interrupts = FUSB_REG_MASKB_GCRCSENT;
ret = on ?
fusb302_i2c_clear_bits(chip, FUSB_REG_MASK, mask_interrupts) :
fusb302_i2c_set_bits(chip, FUSB_REG_MASK, mask_interrupts);
if (ret < 0)
return ret;
ret = on ?
fusb302_i2c_clear_bits(chip, FUSB_REG_MASKA, maska_interrupts) :
fusb302_i2c_set_bits(chip, FUSB_REG_MASKA, maska_interrupts);
if (ret < 0)
return ret;
ret = on ?
fusb302_i2c_clear_bits(chip, FUSB_REG_MASKB, maskb_interrupts) :
fusb302_i2c_set_bits(chip, FUSB_REG_MASKB, maskb_interrupts);
return ret;
}
static int tcpm_set_pd_rx(struct tcpc_dev *dev, bool on)
{
struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
tcpc_dev);
int ret = 0;
mutex_lock(&chip->lock);
ret = fusb302_pd_rx_flush(chip);
if (ret < 0) {
fusb302_log(chip, "cannot flush pd rx buffer, ret=%d", ret);
goto done;
}
ret = fusb302_pd_tx_flush(chip);
if (ret < 0) {
fusb302_log(chip, "cannot flush pd tx buffer, ret=%d", ret);
goto done;
}
ret = fusb302_pd_set_auto_goodcrc(chip, on);
if (ret < 0) {
fusb302_log(chip, "cannot turn %s auto GCRC, ret=%d",
on ? "on" : "off", ret);
goto done;
}
ret = fusb302_pd_set_interrupts(chip, on);
if (ret < 0) {
fusb302_log(chip, "cannot turn %s pd interrupts, ret=%d",
on ? "on" : "off", ret);
goto done;
}
fusb302_log(chip, "pd := %s", on ? "on" : "off");
done:
mutex_unlock(&chip->lock);
return ret;
}
static const char * const typec_role_name[] = {
[TYPEC_SINK] = "Sink",
[TYPEC_SOURCE] = "Source",
};
static const char * const typec_data_role_name[] = {
[TYPEC_DEVICE] = "Device",
[TYPEC_HOST] = "Host",
};
static int tcpm_set_roles(struct tcpc_dev *dev, bool attached,
enum typec_role pwr, enum typec_data_role data)
{
struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
tcpc_dev);
int ret = 0;
u8 switches1_mask = FUSB_REG_SWITCHES1_POWERROLE |
FUSB_REG_SWITCHES1_DATAROLE;
u8 switches1_data = 0x00;
mutex_lock(&chip->lock);
if (pwr == TYPEC_SOURCE)
switches1_data |= FUSB_REG_SWITCHES1_POWERROLE;
if (data == TYPEC_HOST)
switches1_data |= FUSB_REG_SWITCHES1_DATAROLE;
ret = fusb302_i2c_mask_write(chip, FUSB_REG_SWITCHES1,
switches1_mask, switches1_data);
if (ret < 0) {
fusb302_log(chip, "unable to set pd header %s, %s, ret=%d",
typec_role_name[pwr], typec_data_role_name[data],
ret);
goto done;
}
fusb302_log(chip, "pd header := %s, %s", typec_role_name[pwr],
typec_data_role_name[data]);
done:
mutex_unlock(&chip->lock);
return ret;
}
static int tcpm_start_toggling(struct tcpc_dev *dev,
enum typec_port_type port_type,
enum typec_cc_status cc)
{
struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
tcpc_dev);
enum toggling_mode mode = TOGGLING_MODE_OFF;
int ret = 0;
switch (port_type) {
case TYPEC_PORT_SRC:
mode = TOGGLING_MODE_SRC;
break;
case TYPEC_PORT_SNK:
mode = TOGGLING_MODE_SNK;
break;
case TYPEC_PORT_DRP:
mode = TOGGLING_MODE_DRP;
break;
}
mutex_lock(&chip->lock);
ret = fusb302_set_src_current(chip, cc_src_current[cc]);
if (ret < 0) {
fusb302_log(chip, "unable to set src current %s, ret=%d",
typec_cc_status_name[cc], ret);
goto done;
}
ret = fusb302_set_toggling(chip, mode);
if (ret < 0) {
fusb302_log(chip,
"unable to start drp toggling, ret=%d", ret);
goto done;
}
fusb302_log(chip, "start drp toggling");
done:
mutex_unlock(&chip->lock);
return ret;
}
static int fusb302_pd_send_message(struct fusb302_chip *chip,
const struct pd_message *msg)
{
int ret = 0;
u8 buf[40];
u8 pos = 0;
int len;
/* SOP tokens */
buf[pos++] = FUSB302_TKN_SYNC1;
buf[pos++] = FUSB302_TKN_SYNC1;
buf[pos++] = FUSB302_TKN_SYNC1;
buf[pos++] = FUSB302_TKN_SYNC2;
len = pd_header_cnt_le(msg->header) * 4;
/* plug 2 for header */
len += 2;
if (len > 0x1F) {
fusb302_log(chip,
"PD message too long %d (incl. header)", len);
return -EINVAL;
}
/* packsym tells the FUSB302 chip that the next X bytes are payload */
buf[pos++] = FUSB302_TKN_PACKSYM | (len & 0x1F);
memcpy(&buf[pos], &msg->header, sizeof(msg->header));
pos += sizeof(msg->header);
len -= 2;
memcpy(&buf[pos], msg->payload, len);
pos += len;
/* CRC */
buf[pos++] = FUSB302_TKN_JAMCRC;
/* EOP */
buf[pos++] = FUSB302_TKN_EOP;
/* turn tx off after sending message */
buf[pos++] = FUSB302_TKN_TXOFF;
/* start transmission */
buf[pos++] = FUSB302_TKN_TXON;
ret = fusb302_i2c_block_write(chip, FUSB_REG_FIFOS, pos, buf);
if (ret < 0)
return ret;
fusb302_log(chip, "sending PD message header: %x", msg->header);
fusb302_log(chip, "sending PD message len: %d", len);
return ret;
}
static int fusb302_pd_send_hardreset(struct fusb302_chip *chip)
{
return fusb302_i2c_set_bits(chip, FUSB_REG_CONTROL3,
FUSB_REG_CONTROL3_SEND_HARDRESET);
}
static const char * const transmit_type_name[] = {
[TCPC_TX_SOP] = "SOP",
[TCPC_TX_SOP_PRIME] = "SOP'",
[TCPC_TX_SOP_PRIME_PRIME] = "SOP''",
[TCPC_TX_SOP_DEBUG_PRIME] = "DEBUG'",
[TCPC_TX_SOP_DEBUG_PRIME_PRIME] = "DEBUG''",
[TCPC_TX_HARD_RESET] = "HARD_RESET",
[TCPC_TX_CABLE_RESET] = "CABLE_RESET",
[TCPC_TX_BIST_MODE_2] = "BIST_MODE_2",
};
static int tcpm_pd_transmit(struct tcpc_dev *dev, enum tcpm_transmit_type type,
const struct pd_message *msg, unsigned int negotiated_rev)
{
struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
tcpc_dev);
int ret = 0;
mutex_lock(&chip->lock);
switch (type) {
case TCPC_TX_SOP:
/* nRetryCount 3 in P2.0 spec, whereas 2 in PD3.0 spec */
ret = fusb302_enable_tx_auto_retries(chip, negotiated_rev > PD_REV20 ?
FUSB_REG_CONTROL3_N_RETRIES_2 :
FUSB_REG_CONTROL3_N_RETRIES_3);
if (ret < 0)
fusb302_log(chip, "Cannot update retry count ret=%d", ret);
ret = fusb302_pd_send_message(chip, msg);
if (ret < 0)
fusb302_log(chip,
"cannot send PD message, ret=%d", ret);
break;
case TCPC_TX_HARD_RESET:
ret = fusb302_pd_send_hardreset(chip);
if (ret < 0)
fusb302_log(chip,
"cannot send hardreset, ret=%d", ret);
break;
default:
fusb302_log(chip, "type %s not supported",
transmit_type_name[type]);
ret = -EINVAL;
}
mutex_unlock(&chip->lock);
return ret;
}
static enum typec_cc_status fusb302_bc_lvl_to_cc(u8 bc_lvl)
{
if (bc_lvl == FUSB_REG_STATUS0_BC_LVL_1230_MAX)
return TYPEC_CC_RP_3_0;
if (bc_lvl == FUSB_REG_STATUS0_BC_LVL_600_1230)
return TYPEC_CC_RP_1_5;
if (bc_lvl == FUSB_REG_STATUS0_BC_LVL_200_600)
return TYPEC_CC_RP_DEF;
return TYPEC_CC_OPEN;
}
static void fusb302_bc_lvl_handler_work(struct work_struct *work)
{
struct fusb302_chip *chip = container_of(work, struct fusb302_chip,
bc_lvl_handler.work);
int ret = 0;
u8 status0;
u8 bc_lvl;
enum typec_cc_status cc_status;
mutex_lock(&chip->lock);
if (!chip->intr_bc_lvl) {
fusb302_log(chip, "BC_LVL interrupt is turned off, abort");
goto done;
}
ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &status0);
if (ret < 0)
goto done;
fusb302_log(chip, "BC_LVL handler, status0=0x%02x", status0);
if (status0 & FUSB_REG_STATUS0_ACTIVITY) {
fusb302_log(chip, "CC activities detected, delay handling");
mod_delayed_work(chip->wq, &chip->bc_lvl_handler,
msecs_to_jiffies(T_BC_LVL_DEBOUNCE_DELAY_MS));
goto done;
}
bc_lvl = status0 & FUSB_REG_STATUS0_BC_LVL_MASK;
cc_status = fusb302_bc_lvl_to_cc(bc_lvl);
if (chip->cc_polarity == TYPEC_POLARITY_CC1) {
if (chip->cc1 != cc_status) {
fusb302_log(chip, "cc1: %s -> %s",
typec_cc_status_name[chip->cc1],
typec_cc_status_name[cc_status]);
chip->cc1 = cc_status;
tcpm_cc_change(chip->tcpm_port);
}
} else {
if (chip->cc2 != cc_status) {
fusb302_log(chip, "cc2: %s -> %s",
typec_cc_status_name[chip->cc2],
typec_cc_status_name[cc_status]);
chip->cc2 = cc_status;
tcpm_cc_change(chip->tcpm_port);
}
}
done:
mutex_unlock(&chip->lock);
}
static void init_tcpc_dev(struct tcpc_dev *fusb302_tcpc_dev)
{
fusb302_tcpc_dev->init = tcpm_init;
fusb302_tcpc_dev->get_vbus = tcpm_get_vbus;
fusb302_tcpc_dev->get_current_limit = tcpm_get_current_limit;
fusb302_tcpc_dev->set_cc = tcpm_set_cc;
fusb302_tcpc_dev->get_cc = tcpm_get_cc;
fusb302_tcpc_dev->set_polarity = tcpm_set_polarity;
fusb302_tcpc_dev->set_vconn = tcpm_set_vconn;
fusb302_tcpc_dev->set_vbus = tcpm_set_vbus;
fusb302_tcpc_dev->set_pd_rx = tcpm_set_pd_rx;
fusb302_tcpc_dev->set_roles = tcpm_set_roles;
fusb302_tcpc_dev->start_toggling = tcpm_start_toggling;
fusb302_tcpc_dev->pd_transmit = tcpm_pd_transmit;
}
static const char * const cc_polarity_name[] = {
[TYPEC_POLARITY_CC1] = "Polarity_CC1",
[TYPEC_POLARITY_CC2] = "Polarity_CC2",
};
static int fusb302_set_cc_polarity_and_pull(struct fusb302_chip *chip,
enum typec_cc_polarity cc_polarity,
bool pull_up, bool pull_down)
{
int ret = 0;
u8 switches0_data = 0x00;
u8 switches1_mask = FUSB_REG_SWITCHES1_TXCC1_EN |
FUSB_REG_SWITCHES1_TXCC2_EN;
u8 switches1_data = 0x00;
if (pull_down)
switches0_data |= FUSB_REG_SWITCHES0_CC1_PD_EN |
FUSB_REG_SWITCHES0_CC2_PD_EN;
if (cc_polarity == TYPEC_POLARITY_CC1) {
switches0_data |= FUSB_REG_SWITCHES0_MEAS_CC1;
if (chip->vconn_on)
switches0_data |= FUSB_REG_SWITCHES0_VCONN_CC2;
if (pull_up)
switches0_data |= FUSB_REG_SWITCHES0_CC1_PU_EN;
switches1_data = FUSB_REG_SWITCHES1_TXCC1_EN;
} else {
switches0_data |= FUSB_REG_SWITCHES0_MEAS_CC2;
if (chip->vconn_on)
switches0_data |= FUSB_REG_SWITCHES0_VCONN_CC1;
if (pull_up)
switches0_data |= FUSB_REG_SWITCHES0_CC2_PU_EN;
switches1_data = FUSB_REG_SWITCHES1_TXCC2_EN;
}
ret = fusb302_i2c_write(chip, FUSB_REG_SWITCHES0, switches0_data);
if (ret < 0)
return ret;
ret = fusb302_i2c_mask_write(chip, FUSB_REG_SWITCHES1,
switches1_mask, switches1_data);
if (ret < 0)
return ret;
chip->cc_polarity = cc_polarity;
return ret;
}
static int fusb302_handle_togdone_snk(struct fusb302_chip *chip,
u8 togdone_result)
{
int ret = 0;
u8 status0;
u8 bc_lvl;
enum typec_cc_polarity cc_polarity;
enum typec_cc_status cc_status_active, cc1, cc2;
/* set polarity and pull_up, pull_down */
cc_polarity = (togdone_result == FUSB_REG_STATUS1A_TOGSS_SNK1) ?
TYPEC_POLARITY_CC1 : TYPEC_POLARITY_CC2;
ret = fusb302_set_cc_polarity_and_pull(chip, cc_polarity, false, true);
if (ret < 0) {
fusb302_log(chip, "cannot set cc polarity %s, ret=%d",
cc_polarity_name[cc_polarity], ret);
return ret;
}
/* fusb302_set_cc_polarity() has set the correct measure block */
ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &status0);
if (ret < 0)
return ret;
bc_lvl = status0 & FUSB_REG_STATUS0_BC_LVL_MASK;
cc_status_active = fusb302_bc_lvl_to_cc(bc_lvl);
/* restart toggling if the cc status on the active line is OPEN */
if (cc_status_active == TYPEC_CC_OPEN) {
fusb302_log(chip, "restart toggling as CC_OPEN detected");
ret = fusb302_set_toggling(chip, chip->toggling_mode);
return ret;
}
/* update tcpm with the new cc value */
cc1 = (cc_polarity == TYPEC_POLARITY_CC1) ?
cc_status_active : TYPEC_CC_OPEN;
cc2 = (cc_polarity == TYPEC_POLARITY_CC2) ?
cc_status_active : TYPEC_CC_OPEN;
if ((chip->cc1 != cc1) || (chip->cc2 != cc2)) {
chip->cc1 = cc1;
chip->cc2 = cc2;
tcpm_cc_change(chip->tcpm_port);
}
/* turn off toggling */
ret = fusb302_set_toggling(chip, TOGGLING_MODE_OFF);
if (ret < 0) {
fusb302_log(chip,
"cannot set toggling mode off, ret=%d", ret);
return ret;
}
/* unmask bc_lvl interrupt */
ret = fusb302_i2c_clear_bits(chip, FUSB_REG_MASK, FUSB_REG_MASK_BC_LVL);
if (ret < 0) {
fusb302_log(chip,
"cannot unmask bc_lcl interrupt, ret=%d", ret);
return ret;
}
chip->intr_bc_lvl = true;
fusb302_log(chip, "detected cc1=%s, cc2=%s",
typec_cc_status_name[cc1],
typec_cc_status_name[cc2]);
return ret;
}
/* On error returns < 0, otherwise a typec_cc_status value */
static int fusb302_get_src_cc_status(struct fusb302_chip *chip,
enum typec_cc_polarity cc_polarity,
enum typec_cc_status *cc)
{
u8 ra_mda = ra_mda_value[chip->src_current_status];
u8 rd_mda = rd_mda_value[chip->src_current_status];
u8 switches0_data, status0;
int ret;
/* Step 1: Set switches so that we measure the right CC pin */
switches0_data = (cc_polarity == TYPEC_POLARITY_CC1) ?
FUSB_REG_SWITCHES0_CC1_PU_EN | FUSB_REG_SWITCHES0_MEAS_CC1 :
FUSB_REG_SWITCHES0_CC2_PU_EN | FUSB_REG_SWITCHES0_MEAS_CC2;
ret = fusb302_i2c_write(chip, FUSB_REG_SWITCHES0, switches0_data);
if (ret < 0)
return ret;
fusb302_i2c_read(chip, FUSB_REG_SWITCHES0, &status0);
fusb302_log(chip, "get_src_cc_status switches: 0x%0x", status0);
/* Step 2: Set compararator volt to differentiate between Open and Rd */
ret = fusb302_i2c_write(chip, FUSB_REG_MEASURE, rd_mda);
if (ret < 0)
return ret;
usleep_range(50, 100);
ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &status0);
if (ret < 0)
return ret;
fusb302_log(chip, "get_src_cc_status rd_mda status0: 0x%0x", status0);
if (status0 & FUSB_REG_STATUS0_COMP) {
*cc = TYPEC_CC_OPEN;
return 0;
}
/* Step 3: Set compararator input to differentiate between Rd and Ra. */
ret = fusb302_i2c_write(chip, FUSB_REG_MEASURE, ra_mda);
if (ret < 0)
return ret;
usleep_range(50, 100);
ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &status0);
if (ret < 0)
return ret;
fusb302_log(chip, "get_src_cc_status ra_mda status0: 0x%0x", status0);
if (status0 & FUSB_REG_STATUS0_COMP)
*cc = TYPEC_CC_RD;
else
*cc = TYPEC_CC_RA;
return 0;
}
static int fusb302_handle_togdone_src(struct fusb302_chip *chip,
u8 togdone_result)
{
/*
* - set polarity (measure cc, vconn, tx)
* - set pull_up, pull_down
* - set cc1, cc2, and update to tcpm_port
* - set I_COMP interrupt on
*/
int ret = 0;
u8 rd_mda = rd_mda_value[chip->src_current_status];
enum toggling_mode toggling_mode = chip->toggling_mode;
enum typec_cc_polarity cc_polarity;
enum typec_cc_status cc1, cc2;
/*
* The toggle-engine will stop in a src state if it sees either Ra or
* Rd. Determine the status for both CC pins, starting with the one
* where toggling stopped, as that is where the switches point now.
*/
if (togdone_result == FUSB_REG_STATUS1A_TOGSS_SRC1)
ret = fusb302_get_src_cc_status(chip, TYPEC_POLARITY_CC1, &cc1);
else
ret = fusb302_get_src_cc_status(chip, TYPEC_POLARITY_CC2, &cc2);
if (ret < 0)
return ret;
/* we must turn off toggling before we can measure the other pin */
ret = fusb302_set_toggling(chip, TOGGLING_MODE_OFF);
if (ret < 0) {
fusb302_log(chip, "cannot set toggling mode off, ret=%d", ret);
return ret;
}
/* get the status of the other pin */
if (togdone_result == FUSB_REG_STATUS1A_TOGSS_SRC1)
ret = fusb302_get_src_cc_status(chip, TYPEC_POLARITY_CC2, &cc2);
else
ret = fusb302_get_src_cc_status(chip, TYPEC_POLARITY_CC1, &cc1);
if (ret < 0)
return ret;
/* determine polarity based on the status of both pins */
if (cc1 == TYPEC_CC_RD &&
(cc2 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_RA)) {
cc_polarity = TYPEC_POLARITY_CC1;
} else if (cc2 == TYPEC_CC_RD &&
(cc1 == TYPEC_CC_OPEN || cc1 == TYPEC_CC_RA)) {
cc_polarity = TYPEC_POLARITY_CC2;
} else {
fusb302_log(chip, "unexpected CC status cc1=%s, cc2=%s, restarting toggling",
typec_cc_status_name[cc1],
typec_cc_status_name[cc2]);
return fusb302_set_toggling(chip, toggling_mode);
}
/* set polarity and pull_up, pull_down */
ret = fusb302_set_cc_polarity_and_pull(chip, cc_polarity, true, false);
if (ret < 0) {
fusb302_log(chip, "cannot set cc polarity %s, ret=%d",
cc_polarity_name[cc_polarity], ret);
return ret;
}
/* update tcpm with the new cc value */
if ((chip->cc1 != cc1) || (chip->cc2 != cc2)) {
chip->cc1 = cc1;
chip->cc2 = cc2;
tcpm_cc_change(chip->tcpm_port);
}
/* set MDAC to Rd threshold, and unmask I_COMP for unplug detection */
ret = fusb302_i2c_write(chip, FUSB_REG_MEASURE, rd_mda);
if (ret < 0)
return ret;
/* unmask comp_chng interrupt */
ret = fusb302_i2c_clear_bits(chip, FUSB_REG_MASK,
FUSB_REG_MASK_COMP_CHNG);
if (ret < 0) {
fusb302_log(chip,
"cannot unmask comp_chng interrupt, ret=%d", ret);
return ret;
}
chip->intr_comp_chng = true;
fusb302_log(chip, "detected cc1=%s, cc2=%s",
typec_cc_status_name[cc1],
typec_cc_status_name[cc2]);
return ret;
}
static int fusb302_handle_togdone(struct fusb302_chip *chip)
{
int ret = 0;
u8 status1a;
u8 togdone_result;
ret = fusb302_i2c_read(chip, FUSB_REG_STATUS1A, &status1a);
if (ret < 0)
return ret;
togdone_result = (status1a >> FUSB_REG_STATUS1A_TOGSS_POS) &
FUSB_REG_STATUS1A_TOGSS_MASK;
switch (togdone_result) {
case FUSB_REG_STATUS1A_TOGSS_SNK1:
case FUSB_REG_STATUS1A_TOGSS_SNK2:
return fusb302_handle_togdone_snk(chip, togdone_result);
case FUSB_REG_STATUS1A_TOGSS_SRC1:
case FUSB_REG_STATUS1A_TOGSS_SRC2:
return fusb302_handle_togdone_src(chip, togdone_result);
case FUSB_REG_STATUS1A_TOGSS_AA:
/* doesn't support */
fusb302_log(chip, "AudioAccessory not supported");
fusb302_set_toggling(chip, chip->toggling_mode);
break;
default:
fusb302_log(chip, "TOGDONE with an invalid state: %d",
togdone_result);
fusb302_set_toggling(chip, chip->toggling_mode);
break;
}
return ret;
}
static int fusb302_pd_reset(struct fusb302_chip *chip)
{
return fusb302_i2c_set_bits(chip, FUSB_REG_RESET,
FUSB_REG_RESET_PD_RESET);
}
static int fusb302_pd_read_message(struct fusb302_chip *chip,
struct pd_message *msg)
{
int ret = 0;
u8 token;
u8 crc[4];
int len;
/* first SOP token */
ret = fusb302_i2c_read(chip, FUSB_REG_FIFOS, &token);
if (ret < 0)
return ret;
ret = fusb302_i2c_block_read(chip, FUSB_REG_FIFOS, 2,
(u8 *)&msg->header);
if (ret < 0)
return ret;
len = pd_header_cnt_le(msg->header) * 4;
/* add 4 to length to include the CRC */
if (len > PD_MAX_PAYLOAD * 4) {
fusb302_log(chip, "PD message too long %d", len);
return -EINVAL;
}
if (len > 0) {
ret = fusb302_i2c_block_read(chip, FUSB_REG_FIFOS, len,
(u8 *)msg->payload);
if (ret < 0)
return ret;
}
/* another 4 bytes to read CRC out */
ret = fusb302_i2c_block_read(chip, FUSB_REG_FIFOS, 4, crc);
if (ret < 0)
return ret;
fusb302_log(chip, "PD message header: %x", msg->header);
fusb302_log(chip, "PD message len: %d", len);
/*
* Check if we've read off a GoodCRC message. If so then indicate to
* TCPM that the previous transmission has completed. Otherwise we pass
* the received message over to TCPM for processing.
*
* We make this check here instead of basing the reporting decision on
* the IRQ event type, as it's possible for the chip to report the
* TX_SUCCESS and GCRCSENT events out of order on occasion, so we need
* to check the message type to ensure correct reporting to TCPM.
*/
if ((!len) && (pd_header_type_le(msg->header) == PD_CTRL_GOOD_CRC))
tcpm_pd_transmit_complete(chip->tcpm_port, TCPC_TX_SUCCESS);
else
tcpm_pd_receive(chip->tcpm_port, msg);
return ret;
}
static irqreturn_t fusb302_irq_intn(int irq, void *dev_id)
{
struct fusb302_chip *chip = dev_id;
unsigned long flags;
/* Disable our level triggered IRQ until our irq_work has cleared it */
disable_irq_nosync(chip->gpio_int_n_irq);
spin_lock_irqsave(&chip->irq_lock, flags);
if (chip->irq_suspended)
chip->irq_while_suspended = true;
else
schedule_work(&chip->irq_work);
spin_unlock_irqrestore(&chip->irq_lock, flags);
return IRQ_HANDLED;
}
static void fusb302_irq_work(struct work_struct *work)
{
struct fusb302_chip *chip = container_of(work, struct fusb302_chip,
irq_work);
int ret = 0;
u8 interrupt;
u8 interrupta;
u8 interruptb;
u8 status0;
bool vbus_present;
bool comp_result;
bool intr_togdone;
bool intr_bc_lvl;
bool intr_comp_chng;
struct pd_message pd_msg;
mutex_lock(&chip->lock);
/* grab a snapshot of intr flags */
intr_togdone = chip->intr_togdone;
intr_bc_lvl = chip->intr_bc_lvl;
intr_comp_chng = chip->intr_comp_chng;
ret = fusb302_i2c_read(chip, FUSB_REG_INTERRUPT, &interrupt);
if (ret < 0)
goto done;
ret = fusb302_i2c_read(chip, FUSB_REG_INTERRUPTA, &interrupta);
if (ret < 0)
goto done;
ret = fusb302_i2c_read(chip, FUSB_REG_INTERRUPTB, &interruptb);
if (ret < 0)
goto done;
ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &status0);
if (ret < 0)
goto done;
fusb302_log(chip,
"IRQ: 0x%02x, a: 0x%02x, b: 0x%02x, status0: 0x%02x",
interrupt, interrupta, interruptb, status0);
if (interrupt & FUSB_REG_INTERRUPT_VBUSOK) {
vbus_present = !!(status0 & FUSB_REG_STATUS0_VBUSOK);
fusb302_log(chip, "IRQ: VBUS_OK, vbus=%s",
vbus_present ? "On" : "Off");
if (vbus_present != chip->vbus_present) {
chip->vbus_present = vbus_present;
tcpm_vbus_change(chip->tcpm_port);
}
}
if ((interrupta & FUSB_REG_INTERRUPTA_TOGDONE) && intr_togdone) {
fusb302_log(chip, "IRQ: TOGDONE");
ret = fusb302_handle_togdone(chip);
if (ret < 0) {
fusb302_log(chip,
"handle togdone error, ret=%d", ret);
goto done;
}
}
if ((interrupt & FUSB_REG_INTERRUPT_BC_LVL) && intr_bc_lvl) {
fusb302_log(chip, "IRQ: BC_LVL, handler pending");
/*
* as BC_LVL interrupt can be affected by PD activity,
* apply delay to for the handler to wait for the PD
* signaling to finish.
*/
mod_delayed_work(chip->wq, &chip->bc_lvl_handler,
msecs_to_jiffies(T_BC_LVL_DEBOUNCE_DELAY_MS));
}
if ((interrupt & FUSB_REG_INTERRUPT_COMP_CHNG) && intr_comp_chng) {
comp_result = !!(status0 & FUSB_REG_STATUS0_COMP);
fusb302_log(chip, "IRQ: COMP_CHNG, comp=%s",
comp_result ? "true" : "false");
if (comp_result) {
/* cc level > Rd_threshold, detach */
chip->cc1 = TYPEC_CC_OPEN;
chip->cc2 = TYPEC_CC_OPEN;
tcpm_cc_change(chip->tcpm_port);
}
}
if (interrupt & FUSB_REG_INTERRUPT_COLLISION) {
fusb302_log(chip, "IRQ: PD collision");
tcpm_pd_transmit_complete(chip->tcpm_port, TCPC_TX_FAILED);
}
if (interrupta & FUSB_REG_INTERRUPTA_RETRYFAIL) {
fusb302_log(chip, "IRQ: PD retry failed");
tcpm_pd_transmit_complete(chip->tcpm_port, TCPC_TX_FAILED);
}
if (interrupta & FUSB_REG_INTERRUPTA_HARDSENT) {
fusb302_log(chip, "IRQ: PD hardreset sent");
ret = fusb302_pd_reset(chip);
if (ret < 0) {
fusb302_log(chip, "cannot PD reset, ret=%d", ret);
goto done;
}
tcpm_pd_transmit_complete(chip->tcpm_port, TCPC_TX_SUCCESS);
}
if (interrupta & FUSB_REG_INTERRUPTA_TX_SUCCESS) {
fusb302_log(chip, "IRQ: PD tx success");
ret = fusb302_pd_read_message(chip, &pd_msg);
if (ret < 0) {
fusb302_log(chip,
"cannot read in PD message, ret=%d", ret);
goto done;
}
}
if (interrupta & FUSB_REG_INTERRUPTA_HARDRESET) {
fusb302_log(chip, "IRQ: PD received hardreset");
ret = fusb302_pd_reset(chip);
if (ret < 0) {
fusb302_log(chip, "cannot PD reset, ret=%d", ret);
goto done;
}
tcpm_pd_hard_reset(chip->tcpm_port);
}
if (interruptb & FUSB_REG_INTERRUPTB_GCRCSENT) {
fusb302_log(chip, "IRQ: PD sent good CRC");
ret = fusb302_pd_read_message(chip, &pd_msg);
if (ret < 0) {
fusb302_log(chip,
"cannot read in PD message, ret=%d", ret);
goto done;
}
}
done:
mutex_unlock(&chip->lock);
enable_irq(chip->gpio_int_n_irq);
}
static int init_gpio(struct fusb302_chip *chip)
{
struct device *dev = chip->dev;
int ret = 0;
chip->gpio_int_n = devm_gpiod_get(dev, "fcs,int_n", GPIOD_IN);
if (IS_ERR(chip->gpio_int_n)) {
dev_err(dev, "failed to request gpio_int_n\n");
return PTR_ERR(chip->gpio_int_n);
}
ret = gpiod_to_irq(chip->gpio_int_n);
if (ret < 0) {
dev_err(dev,
"cannot request IRQ for GPIO Int_N, ret=%d", ret);
return ret;
}
chip->gpio_int_n_irq = ret;
return 0;
}
#define PDO_FIXED_FLAGS \
(PDO_FIXED_DUAL_ROLE | PDO_FIXED_DATA_SWAP | PDO_FIXED_USB_COMM)
static const u32 src_pdo[] = {
PDO_FIXED(5000, 400, PDO_FIXED_FLAGS)
};
static const u32 snk_pdo[] = {
PDO_FIXED(5000, 400, PDO_FIXED_FLAGS)
};
static const struct property_entry port_props[] = {
PROPERTY_ENTRY_STRING("data-role", "dual"),
PROPERTY_ENTRY_STRING("power-role", "dual"),
PROPERTY_ENTRY_STRING("try-power-role", "sink"),
PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo),
PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo),
PROPERTY_ENTRY_U32("op-sink-microwatt", 2500000),
{ }
};
static struct fwnode_handle *fusb302_fwnode_get(struct device *dev)
{
struct fwnode_handle *fwnode;
fwnode = device_get_named_child_node(dev, "connector");
if (!fwnode)
fwnode = fwnode_create_software_node(port_props, NULL);
return fwnode;
}
static int fusb302_probe(struct i2c_client *client)
{
struct fusb302_chip *chip;
struct i2c_adapter *adapter = client->adapter;
struct device *dev = &client->dev;
const char *name;
int ret = 0;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) {
dev_err(&client->dev,
"I2C/SMBus block functionality not supported!\n");
return -ENODEV;
}
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
chip->i2c_client = client;
chip->dev = &client->dev;
mutex_init(&chip->lock);
/*
* Devicetree platforms should get extcon via phandle (not yet
* supported). On ACPI platforms, we get the name from a device prop.
* This device prop is for kernel internal use only and is expected
* to be set by the platform code which also registers the i2c client
* for the fusb302.
*/
if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) {
chip->extcon = extcon_get_extcon_dev(name);
if (IS_ERR(chip->extcon))
return PTR_ERR(chip->extcon);
}
chip->vbus = devm_regulator_get(chip->dev, "vbus");
if (IS_ERR(chip->vbus))
return PTR_ERR(chip->vbus);
chip->wq = create_singlethread_workqueue(dev_name(chip->dev));
if (!chip->wq)
return -ENOMEM;
spin_lock_init(&chip->irq_lock);
INIT_WORK(&chip->irq_work, fusb302_irq_work);
INIT_DELAYED_WORK(&chip->bc_lvl_handler, fusb302_bc_lvl_handler_work);
init_tcpc_dev(&chip->tcpc_dev);
fusb302_debugfs_init(chip);
if (client->irq) {
chip->gpio_int_n_irq = client->irq;
} else {
ret = init_gpio(chip);
if (ret < 0)
goto destroy_workqueue;
}
chip->tcpc_dev.fwnode = fusb302_fwnode_get(dev);
if (IS_ERR(chip->tcpc_dev.fwnode)) {
ret = PTR_ERR(chip->tcpc_dev.fwnode);
goto destroy_workqueue;
}
chip->tcpm_port = tcpm_register_port(&client->dev, &chip->tcpc_dev);
if (IS_ERR(chip->tcpm_port)) {
fwnode_handle_put(chip->tcpc_dev.fwnode);
ret = dev_err_probe(dev, PTR_ERR(chip->tcpm_port),
"cannot register tcpm port\n");
goto destroy_workqueue;
}
ret = request_irq(chip->gpio_int_n_irq, fusb302_irq_intn,
IRQF_ONESHOT | IRQF_TRIGGER_LOW,
"fsc_interrupt_int_n", chip);
if (ret < 0) {
dev_err(dev, "cannot request IRQ for GPIO Int_N, ret=%d", ret);
goto tcpm_unregister_port;
}
enable_irq_wake(chip->gpio_int_n_irq);
i2c_set_clientdata(client, chip);
return ret;
tcpm_unregister_port:
tcpm_unregister_port(chip->tcpm_port);
fwnode_handle_put(chip->tcpc_dev.fwnode);
destroy_workqueue:
fusb302_debugfs_exit(chip);
destroy_workqueue(chip->wq);
return ret;
}
static void fusb302_remove(struct i2c_client *client)
{
struct fusb302_chip *chip = i2c_get_clientdata(client);
disable_irq_wake(chip->gpio_int_n_irq);
free_irq(chip->gpio_int_n_irq, chip);
cancel_work_sync(&chip->irq_work);
cancel_delayed_work_sync(&chip->bc_lvl_handler);
tcpm_unregister_port(chip->tcpm_port);
fwnode_handle_put(chip->tcpc_dev.fwnode);
destroy_workqueue(chip->wq);
fusb302_debugfs_exit(chip);
}
static int fusb302_pm_suspend(struct device *dev)
{
struct fusb302_chip *chip = dev->driver_data;
unsigned long flags;
spin_lock_irqsave(&chip->irq_lock, flags);
chip->irq_suspended = true;
spin_unlock_irqrestore(&chip->irq_lock, flags);
/* Make sure any pending irq_work is finished before the bus suspends */
flush_work(&chip->irq_work);
return 0;
}
static int fusb302_pm_resume(struct device *dev)
{
struct fusb302_chip *chip = dev->driver_data;
unsigned long flags;
spin_lock_irqsave(&chip->irq_lock, flags);
if (chip->irq_while_suspended) {
schedule_work(&chip->irq_work);
chip->irq_while_suspended = false;
}
chip->irq_suspended = false;
spin_unlock_irqrestore(&chip->irq_lock, flags);
return 0;
}
static const struct of_device_id fusb302_dt_match[] __maybe_unused = {
{.compatible = "fcs,fusb302"},
{},
};
MODULE_DEVICE_TABLE(of, fusb302_dt_match);
static const struct i2c_device_id fusb302_i2c_device_id[] = {
{"typec_fusb302", 0},
{},
};
MODULE_DEVICE_TABLE(i2c, fusb302_i2c_device_id);
static const struct dev_pm_ops fusb302_pm_ops = {
.suspend = fusb302_pm_suspend,
.resume = fusb302_pm_resume,
};
static struct i2c_driver fusb302_driver = {
.driver = {
.name = "typec_fusb302",
.pm = &fusb302_pm_ops,
.of_match_table = of_match_ptr(fusb302_dt_match),
},
.probe = fusb302_probe,
.remove = fusb302_remove,
.id_table = fusb302_i2c_device_id,
};
module_i2c_driver(fusb302_driver);
MODULE_AUTHOR("Yueyao Zhu <[email protected]>");
MODULE_DESCRIPTION("Fairchild FUSB302 Type-C Chip Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/typec/tcpm/fusb302.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2015-2017 Google, Inc
*
* USB Type-C Port Controller Interface.
*/
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/usb/pd.h>
#include <linux/usb/tcpci.h>
#include <linux/usb/tcpm.h>
#include <linux/usb/typec.h>
#define PD_RETRY_COUNT_DEFAULT 3
#define PD_RETRY_COUNT_3_0_OR_HIGHER 2
#define AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV 3500
#define VSINKPD_MIN_IR_DROP_MV 750
#define VSRC_NEW_MIN_PERCENT 95
#define VSRC_VALID_MIN_MV 500
#define VPPS_NEW_MIN_PERCENT 95
#define VPPS_VALID_MIN_MV 100
#define VSINKDISCONNECT_PD_MIN_PERCENT 90
struct tcpci {
struct device *dev;
struct tcpm_port *port;
struct regmap *regmap;
unsigned int alert_mask;
bool controls_vbus;
struct tcpc_dev tcpc;
struct tcpci_data *data;
};
struct tcpci_chip {
struct tcpci *tcpci;
struct tcpci_data data;
};
struct tcpm_port *tcpci_get_tcpm_port(struct tcpci *tcpci)
{
return tcpci->port;
}
EXPORT_SYMBOL_GPL(tcpci_get_tcpm_port);
static inline struct tcpci *tcpc_to_tcpci(struct tcpc_dev *tcpc)
{
return container_of(tcpc, struct tcpci, tcpc);
}
static int tcpci_read16(struct tcpci *tcpci, unsigned int reg, u16 *val)
{
return regmap_raw_read(tcpci->regmap, reg, val, sizeof(u16));
}
static int tcpci_write16(struct tcpci *tcpci, unsigned int reg, u16 val)
{
return regmap_raw_write(tcpci->regmap, reg, &val, sizeof(u16));
}
static int tcpci_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
bool vconn_pres;
enum typec_cc_polarity polarity = TYPEC_POLARITY_CC1;
unsigned int reg;
int ret;
ret = regmap_read(tcpci->regmap, TCPC_POWER_STATUS, ®);
if (ret < 0)
return ret;
vconn_pres = !!(reg & TCPC_POWER_STATUS_VCONN_PRES);
if (vconn_pres) {
ret = regmap_read(tcpci->regmap, TCPC_TCPC_CTRL, ®);
if (ret < 0)
return ret;
if (reg & TCPC_TCPC_CTRL_ORIENTATION)
polarity = TYPEC_POLARITY_CC2;
}
switch (cc) {
case TYPEC_CC_RA:
reg = (TCPC_ROLE_CTRL_CC_RA << TCPC_ROLE_CTRL_CC1_SHIFT) |
(TCPC_ROLE_CTRL_CC_RA << TCPC_ROLE_CTRL_CC2_SHIFT);
break;
case TYPEC_CC_RD:
reg = (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT) |
(TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT);
break;
case TYPEC_CC_RP_DEF:
reg = (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
(TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT) |
(TCPC_ROLE_CTRL_RP_VAL_DEF <<
TCPC_ROLE_CTRL_RP_VAL_SHIFT);
break;
case TYPEC_CC_RP_1_5:
reg = (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
(TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT) |
(TCPC_ROLE_CTRL_RP_VAL_1_5 <<
TCPC_ROLE_CTRL_RP_VAL_SHIFT);
break;
case TYPEC_CC_RP_3_0:
reg = (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
(TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT) |
(TCPC_ROLE_CTRL_RP_VAL_3_0 <<
TCPC_ROLE_CTRL_RP_VAL_SHIFT);
break;
case TYPEC_CC_OPEN:
default:
reg = (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT) |
(TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT);
break;
}
if (vconn_pres) {
if (polarity == TYPEC_POLARITY_CC2) {
reg &= ~(TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT);
reg |= (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT);
} else {
reg &= ~(TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT);
reg |= (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT);
}
}
ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg);
if (ret < 0)
return ret;
return 0;
}
static int tcpci_apply_rc(struct tcpc_dev *tcpc, enum typec_cc_status cc,
enum typec_cc_polarity polarity)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
unsigned int reg;
int ret;
ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, ®);
if (ret < 0)
return ret;
/*
* APPLY_RC state is when ROLE_CONTROL.CC1 != ROLE_CONTROL.CC2 and vbus autodischarge on
* disconnect is disabled. Bail out when ROLE_CONTROL.CC1 != ROLE_CONTROL.CC2.
*/
if (((reg & (TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT)) >>
TCPC_ROLE_CTRL_CC2_SHIFT) !=
((reg & (TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT)) >>
TCPC_ROLE_CTRL_CC1_SHIFT))
return 0;
return regmap_update_bits(tcpci->regmap, TCPC_ROLE_CTRL, polarity == TYPEC_POLARITY_CC1 ?
TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT :
TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT,
TCPC_ROLE_CTRL_CC_OPEN);
}
static int tcpci_start_toggling(struct tcpc_dev *tcpc,
enum typec_port_type port_type,
enum typec_cc_status cc)
{
int ret;
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
unsigned int reg = TCPC_ROLE_CTRL_DRP;
if (port_type != TYPEC_PORT_DRP)
return -EOPNOTSUPP;
/* Handle vendor drp toggling */
if (tcpci->data->start_drp_toggling) {
ret = tcpci->data->start_drp_toggling(tcpci, tcpci->data, cc);
if (ret < 0)
return ret;
}
switch (cc) {
default:
case TYPEC_CC_RP_DEF:
reg |= (TCPC_ROLE_CTRL_RP_VAL_DEF <<
TCPC_ROLE_CTRL_RP_VAL_SHIFT);
break;
case TYPEC_CC_RP_1_5:
reg |= (TCPC_ROLE_CTRL_RP_VAL_1_5 <<
TCPC_ROLE_CTRL_RP_VAL_SHIFT);
break;
case TYPEC_CC_RP_3_0:
reg |= (TCPC_ROLE_CTRL_RP_VAL_3_0 <<
TCPC_ROLE_CTRL_RP_VAL_SHIFT);
break;
}
if (cc == TYPEC_CC_RD)
reg |= (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT) |
(TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT);
else
reg |= (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
(TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT);
ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg);
if (ret < 0)
return ret;
return regmap_write(tcpci->regmap, TCPC_COMMAND,
TCPC_CMD_LOOK4CONNECTION);
}
static int tcpci_get_cc(struct tcpc_dev *tcpc,
enum typec_cc_status *cc1, enum typec_cc_status *cc2)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
unsigned int reg, role_control;
int ret;
ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, &role_control);
if (ret < 0)
return ret;
ret = regmap_read(tcpci->regmap, TCPC_CC_STATUS, ®);
if (ret < 0)
return ret;
*cc1 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC1_SHIFT) &
TCPC_CC_STATUS_CC1_MASK,
reg & TCPC_CC_STATUS_TERM ||
tcpc_presenting_rd(role_control, CC1));
*cc2 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC2_SHIFT) &
TCPC_CC_STATUS_CC2_MASK,
reg & TCPC_CC_STATUS_TERM ||
tcpc_presenting_rd(role_control, CC2));
return 0;
}
static int tcpci_set_polarity(struct tcpc_dev *tcpc,
enum typec_cc_polarity polarity)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
unsigned int reg;
int ret;
enum typec_cc_status cc1, cc2;
/* Obtain Rp setting from role control */
ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, ®);
if (ret < 0)
return ret;
ret = tcpci_get_cc(tcpc, &cc1, &cc2);
if (ret < 0)
return ret;
/*
* When port has drp toggling enabled, ROLE_CONTROL would only have the initial
* terminations for the toggling and does not indicate the final cc
* terminations when ConnectionResult is 0 i.e. drp toggling stops and
* the connection is resolved. Infer port role from TCPC_CC_STATUS based on the
* terminations seen. The port role is then used to set the cc terminations.
*/
if (reg & TCPC_ROLE_CTRL_DRP) {
/* Disable DRP for the OPEN setting to take effect */
reg = reg & ~TCPC_ROLE_CTRL_DRP;
if (polarity == TYPEC_POLARITY_CC2) {
reg &= ~(TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT);
/* Local port is source */
if (cc2 == TYPEC_CC_RD)
/* Role control would have the Rp setting when DRP was enabled */
reg |= TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT;
else
reg |= TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT;
} else {
reg &= ~(TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT);
/* Local port is source */
if (cc1 == TYPEC_CC_RD)
/* Role control would have the Rp setting when DRP was enabled */
reg |= TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT;
else
reg |= TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT;
}
}
if (polarity == TYPEC_POLARITY_CC2)
reg |= TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT;
else
reg |= TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT;
ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg);
if (ret < 0)
return ret;
return regmap_write(tcpci->regmap, TCPC_TCPC_CTRL,
(polarity == TYPEC_POLARITY_CC2) ?
TCPC_TCPC_CTRL_ORIENTATION : 0);
}
static void tcpci_set_partner_usb_comm_capable(struct tcpc_dev *tcpc, bool capable)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
if (tcpci->data->set_partner_usb_comm_capable)
tcpci->data->set_partner_usb_comm_capable(tcpci, tcpci->data, capable);
}
static int tcpci_set_vconn(struct tcpc_dev *tcpc, bool enable)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
int ret;
/* Handle vendor set vconn */
if (tcpci->data->set_vconn) {
ret = tcpci->data->set_vconn(tcpci, tcpci->data, enable);
if (ret < 0)
return ret;
}
return regmap_update_bits(tcpci->regmap, TCPC_POWER_CTRL,
TCPC_POWER_CTRL_VCONN_ENABLE,
enable ? TCPC_POWER_CTRL_VCONN_ENABLE : 0);
}
static int tcpci_enable_auto_vbus_discharge(struct tcpc_dev *dev, bool enable)
{
struct tcpci *tcpci = tcpc_to_tcpci(dev);
int ret;
ret = regmap_update_bits(tcpci->regmap, TCPC_POWER_CTRL, TCPC_POWER_CTRL_AUTO_DISCHARGE,
enable ? TCPC_POWER_CTRL_AUTO_DISCHARGE : 0);
return ret;
}
static int tcpci_set_auto_vbus_discharge_threshold(struct tcpc_dev *dev, enum typec_pwr_opmode mode,
bool pps_active, u32 requested_vbus_voltage_mv)
{
struct tcpci *tcpci = tcpc_to_tcpci(dev);
unsigned int pwr_ctrl, threshold = 0;
int ret;
/*
* Indicates that vbus is going to go away due PR_SWAP, hard reset etc.
* Do not discharge vbus here.
*/
if (requested_vbus_voltage_mv == 0)
goto write_thresh;
ret = regmap_read(tcpci->regmap, TCPC_POWER_CTRL, &pwr_ctrl);
if (ret < 0)
return ret;
if (pwr_ctrl & TCPC_FAST_ROLE_SWAP_EN) {
/* To prevent disconnect when the source is fast role swap is capable. */
threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV;
} else if (mode == TYPEC_PWR_MODE_PD) {
if (pps_active)
threshold = ((VPPS_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
VSINKPD_MIN_IR_DROP_MV - VPPS_VALID_MIN_MV) *
VSINKDISCONNECT_PD_MIN_PERCENT / 100;
else
threshold = ((VSRC_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
VSINKPD_MIN_IR_DROP_MV - VSRC_VALID_MIN_MV) *
VSINKDISCONNECT_PD_MIN_PERCENT / 100;
} else {
/* 3.5V for non-pd sink */
threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV;
}
threshold = threshold / TCPC_VBUS_SINK_DISCONNECT_THRESH_LSB_MV;
if (threshold > TCPC_VBUS_SINK_DISCONNECT_THRESH_MAX)
return -EINVAL;
write_thresh:
return tcpci_write16(tcpci, TCPC_VBUS_SINK_DISCONNECT_THRESH, threshold);
}
static int tcpci_enable_frs(struct tcpc_dev *dev, bool enable)
{
struct tcpci *tcpci = tcpc_to_tcpci(dev);
int ret;
/* To prevent disconnect during FRS, set disconnect threshold to 3.5V */
ret = tcpci_write16(tcpci, TCPC_VBUS_SINK_DISCONNECT_THRESH, enable ? 0 : 0x8c);
if (ret < 0)
return ret;
ret = regmap_update_bits(tcpci->regmap, TCPC_POWER_CTRL, TCPC_FAST_ROLE_SWAP_EN, enable ?
TCPC_FAST_ROLE_SWAP_EN : 0);
return ret;
}
static void tcpci_frs_sourcing_vbus(struct tcpc_dev *dev)
{
struct tcpci *tcpci = tcpc_to_tcpci(dev);
if (tcpci->data->frs_sourcing_vbus)
tcpci->data->frs_sourcing_vbus(tcpci, tcpci->data);
}
static void tcpci_check_contaminant(struct tcpc_dev *dev)
{
struct tcpci *tcpci = tcpc_to_tcpci(dev);
if (tcpci->data->check_contaminant)
tcpci->data->check_contaminant(tcpci, tcpci->data);
}
static int tcpci_set_bist_data(struct tcpc_dev *tcpc, bool enable)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
return regmap_update_bits(tcpci->regmap, TCPC_TCPC_CTRL, TCPC_TCPC_CTRL_BIST_TM,
enable ? TCPC_TCPC_CTRL_BIST_TM : 0);
}
static int tcpci_set_roles(struct tcpc_dev *tcpc, bool attached,
enum typec_role role, enum typec_data_role data)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
unsigned int reg;
int ret;
reg = PD_REV20 << TCPC_MSG_HDR_INFO_REV_SHIFT;
if (role == TYPEC_SOURCE)
reg |= TCPC_MSG_HDR_INFO_PWR_ROLE;
if (data == TYPEC_HOST)
reg |= TCPC_MSG_HDR_INFO_DATA_ROLE;
ret = regmap_write(tcpci->regmap, TCPC_MSG_HDR_INFO, reg);
if (ret < 0)
return ret;
return 0;
}
static int tcpci_set_pd_rx(struct tcpc_dev *tcpc, bool enable)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
unsigned int reg = 0;
int ret;
if (enable)
reg = TCPC_RX_DETECT_SOP | TCPC_RX_DETECT_HARD_RESET;
ret = regmap_write(tcpci->regmap, TCPC_RX_DETECT, reg);
if (ret < 0)
return ret;
return 0;
}
static int tcpci_get_vbus(struct tcpc_dev *tcpc)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
unsigned int reg;
int ret;
ret = regmap_read(tcpci->regmap, TCPC_POWER_STATUS, ®);
if (ret < 0)
return ret;
return !!(reg & TCPC_POWER_STATUS_VBUS_PRES);
}
static bool tcpci_is_vbus_vsafe0v(struct tcpc_dev *tcpc)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
unsigned int reg;
int ret;
ret = regmap_read(tcpci->regmap, TCPC_EXTENDED_STATUS, ®);
if (ret < 0)
return false;
return !!(reg & TCPC_EXTENDED_STATUS_VSAFE0V);
}
static int tcpci_set_vbus(struct tcpc_dev *tcpc, bool source, bool sink)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
int ret;
if (tcpci->data->set_vbus) {
ret = tcpci->data->set_vbus(tcpci, tcpci->data, source, sink);
/* Bypass when ret > 0 */
if (ret != 0)
return ret < 0 ? ret : 0;
}
/* Disable both source and sink first before enabling anything */
if (!source) {
ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
TCPC_CMD_DISABLE_SRC_VBUS);
if (ret < 0)
return ret;
}
if (!sink) {
ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
TCPC_CMD_DISABLE_SINK_VBUS);
if (ret < 0)
return ret;
}
if (source) {
ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
TCPC_CMD_SRC_VBUS_DEFAULT);
if (ret < 0)
return ret;
}
if (sink) {
ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
TCPC_CMD_SINK_VBUS);
if (ret < 0)
return ret;
}
return 0;
}
static int tcpci_pd_transmit(struct tcpc_dev *tcpc, enum tcpm_transmit_type type,
const struct pd_message *msg, unsigned int negotiated_rev)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
u16 header = msg ? le16_to_cpu(msg->header) : 0;
unsigned int reg, cnt;
int ret;
cnt = msg ? pd_header_cnt(header) * 4 : 0;
/**
* TCPCI spec forbids direct access of TCPC_TX_DATA.
* But, since some of the chipsets offer this capability,
* it's fair to support both.
*/
if (tcpci->data->TX_BUF_BYTE_x_hidden) {
u8 buf[TCPC_TRANSMIT_BUFFER_MAX_LEN] = {0,};
u8 pos = 0;
/* Payload + header + TCPC_TX_BYTE_CNT */
buf[pos++] = cnt + 2;
if (msg)
memcpy(&buf[pos], &msg->header, sizeof(msg->header));
pos += sizeof(header);
if (cnt > 0)
memcpy(&buf[pos], msg->payload, cnt);
pos += cnt;
ret = regmap_raw_write(tcpci->regmap, TCPC_TX_BYTE_CNT, buf, pos);
if (ret < 0)
return ret;
} else {
ret = regmap_write(tcpci->regmap, TCPC_TX_BYTE_CNT, cnt + 2);
if (ret < 0)
return ret;
ret = tcpci_write16(tcpci, TCPC_TX_HDR, header);
if (ret < 0)
return ret;
if (cnt > 0) {
ret = regmap_raw_write(tcpci->regmap, TCPC_TX_DATA, &msg->payload, cnt);
if (ret < 0)
return ret;
}
}
/* nRetryCount is 3 in PD2.0 spec where 2 in PD3.0 spec */
reg = ((negotiated_rev > PD_REV20 ? PD_RETRY_COUNT_3_0_OR_HIGHER : PD_RETRY_COUNT_DEFAULT)
<< TCPC_TRANSMIT_RETRY_SHIFT) | (type << TCPC_TRANSMIT_TYPE_SHIFT);
ret = regmap_write(tcpci->regmap, TCPC_TRANSMIT, reg);
if (ret < 0)
return ret;
return 0;
}
static int tcpci_init(struct tcpc_dev *tcpc)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
unsigned long timeout = jiffies + msecs_to_jiffies(2000); /* XXX */
unsigned int reg;
int ret;
while (time_before_eq(jiffies, timeout)) {
ret = regmap_read(tcpci->regmap, TCPC_POWER_STATUS, ®);
if (ret < 0)
return ret;
if (!(reg & TCPC_POWER_STATUS_UNINIT))
break;
usleep_range(10000, 20000);
}
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
ret = tcpci_write16(tcpci, TCPC_FAULT_STATUS, TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT);
if (ret < 0)
return ret;
/* Handle vendor init */
if (tcpci->data->init) {
ret = tcpci->data->init(tcpci, tcpci->data);
if (ret < 0)
return ret;
}
/* Clear all events */
ret = tcpci_write16(tcpci, TCPC_ALERT, 0xffff);
if (ret < 0)
return ret;
if (tcpci->controls_vbus)
reg = TCPC_POWER_STATUS_VBUS_PRES;
else
reg = 0;
ret = regmap_write(tcpci->regmap, TCPC_POWER_STATUS_MASK, reg);
if (ret < 0)
return ret;
/* Enable Vbus detection */
ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
TCPC_CMD_ENABLE_VBUS_DETECT);
if (ret < 0)
return ret;
reg = TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_FAILED |
TCPC_ALERT_TX_DISCARDED | TCPC_ALERT_RX_STATUS |
TCPC_ALERT_RX_HARD_RST | TCPC_ALERT_CC_STATUS;
if (tcpci->controls_vbus)
reg |= TCPC_ALERT_POWER_STATUS;
/* Enable VSAFE0V status interrupt when detecting VSAFE0V is supported */
if (tcpci->data->vbus_vsafe0v) {
reg |= TCPC_ALERT_EXTENDED_STATUS;
ret = regmap_write(tcpci->regmap, TCPC_EXTENDED_STATUS_MASK,
TCPC_EXTENDED_STATUS_VSAFE0V);
if (ret < 0)
return ret;
}
tcpci->alert_mask = reg;
return tcpci_write16(tcpci, TCPC_ALERT_MASK, reg);
}
irqreturn_t tcpci_irq(struct tcpci *tcpci)
{
u16 status;
int ret;
unsigned int raw;
tcpci_read16(tcpci, TCPC_ALERT, &status);
/*
* Clear alert status for everything except RX_STATUS, which shouldn't
* be cleared until we have successfully retrieved message.
*/
if (status & ~TCPC_ALERT_RX_STATUS)
tcpci_write16(tcpci, TCPC_ALERT,
status & ~TCPC_ALERT_RX_STATUS);
if (status & TCPC_ALERT_CC_STATUS)
tcpm_cc_change(tcpci->port);
if (status & TCPC_ALERT_POWER_STATUS) {
regmap_read(tcpci->regmap, TCPC_POWER_STATUS_MASK, &raw);
/*
* If power status mask has been reset, then the TCPC
* has reset.
*/
if (raw == 0xff)
tcpm_tcpc_reset(tcpci->port);
else
tcpm_vbus_change(tcpci->port);
}
if (status & TCPC_ALERT_RX_STATUS) {
struct pd_message msg;
unsigned int cnt, payload_cnt;
u16 header;
regmap_read(tcpci->regmap, TCPC_RX_BYTE_CNT, &cnt);
/*
* 'cnt' corresponds to READABLE_BYTE_COUNT in section 4.4.14
* of the TCPCI spec [Rev 2.0 Ver 1.0 October 2017] and is
* defined in table 4-36 as one greater than the number of
* bytes received. And that number includes the header. So:
*/
if (cnt > 3)
payload_cnt = cnt - (1 + sizeof(msg.header));
else
payload_cnt = 0;
tcpci_read16(tcpci, TCPC_RX_HDR, &header);
msg.header = cpu_to_le16(header);
if (WARN_ON(payload_cnt > sizeof(msg.payload)))
payload_cnt = sizeof(msg.payload);
if (payload_cnt > 0)
regmap_raw_read(tcpci->regmap, TCPC_RX_DATA,
&msg.payload, payload_cnt);
/* Read complete, clear RX status alert bit */
tcpci_write16(tcpci, TCPC_ALERT, TCPC_ALERT_RX_STATUS);
tcpm_pd_receive(tcpci->port, &msg);
}
if (tcpci->data->vbus_vsafe0v && (status & TCPC_ALERT_EXTENDED_STATUS)) {
ret = regmap_read(tcpci->regmap, TCPC_EXTENDED_STATUS, &raw);
if (!ret && (raw & TCPC_EXTENDED_STATUS_VSAFE0V))
tcpm_vbus_change(tcpci->port);
}
if (status & TCPC_ALERT_RX_HARD_RST)
tcpm_pd_hard_reset(tcpci->port);
if (status & TCPC_ALERT_TX_SUCCESS)
tcpm_pd_transmit_complete(tcpci->port, TCPC_TX_SUCCESS);
else if (status & TCPC_ALERT_TX_DISCARDED)
tcpm_pd_transmit_complete(tcpci->port, TCPC_TX_DISCARDED);
else if (status & TCPC_ALERT_TX_FAILED)
tcpm_pd_transmit_complete(tcpci->port, TCPC_TX_FAILED);
return IRQ_RETVAL(status & tcpci->alert_mask);
}
EXPORT_SYMBOL_GPL(tcpci_irq);
static irqreturn_t _tcpci_irq(int irq, void *dev_id)
{
struct tcpci_chip *chip = dev_id;
return tcpci_irq(chip->tcpci);
}
static const struct regmap_config tcpci_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0x7F, /* 0x80 .. 0xFF are vendor defined */
};
static int tcpci_parse_config(struct tcpci *tcpci)
{
tcpci->controls_vbus = true; /* XXX */
tcpci->tcpc.fwnode = device_get_named_child_node(tcpci->dev,
"connector");
if (!tcpci->tcpc.fwnode) {
dev_err(tcpci->dev, "Can't find connector node.\n");
return -EINVAL;
}
return 0;
}
struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data)
{
struct tcpci *tcpci;
int err;
tcpci = devm_kzalloc(dev, sizeof(*tcpci), GFP_KERNEL);
if (!tcpci)
return ERR_PTR(-ENOMEM);
tcpci->dev = dev;
tcpci->data = data;
tcpci->regmap = data->regmap;
tcpci->tcpc.init = tcpci_init;
tcpci->tcpc.get_vbus = tcpci_get_vbus;
tcpci->tcpc.set_vbus = tcpci_set_vbus;
tcpci->tcpc.set_cc = tcpci_set_cc;
tcpci->tcpc.apply_rc = tcpci_apply_rc;
tcpci->tcpc.get_cc = tcpci_get_cc;
tcpci->tcpc.set_polarity = tcpci_set_polarity;
tcpci->tcpc.set_vconn = tcpci_set_vconn;
tcpci->tcpc.start_toggling = tcpci_start_toggling;
tcpci->tcpc.set_pd_rx = tcpci_set_pd_rx;
tcpci->tcpc.set_roles = tcpci_set_roles;
tcpci->tcpc.pd_transmit = tcpci_pd_transmit;
tcpci->tcpc.set_bist_data = tcpci_set_bist_data;
tcpci->tcpc.enable_frs = tcpci_enable_frs;
tcpci->tcpc.frs_sourcing_vbus = tcpci_frs_sourcing_vbus;
tcpci->tcpc.set_partner_usb_comm_capable = tcpci_set_partner_usb_comm_capable;
if (tcpci->data->check_contaminant)
tcpci->tcpc.check_contaminant = tcpci_check_contaminant;
if (tcpci->data->auto_discharge_disconnect) {
tcpci->tcpc.enable_auto_vbus_discharge = tcpci_enable_auto_vbus_discharge;
tcpci->tcpc.set_auto_vbus_discharge_threshold =
tcpci_set_auto_vbus_discharge_threshold;
regmap_update_bits(tcpci->regmap, TCPC_POWER_CTRL, TCPC_POWER_CTRL_BLEED_DISCHARGE,
TCPC_POWER_CTRL_BLEED_DISCHARGE);
}
if (tcpci->data->vbus_vsafe0v)
tcpci->tcpc.is_vbus_vsafe0v = tcpci_is_vbus_vsafe0v;
err = tcpci_parse_config(tcpci);
if (err < 0)
return ERR_PTR(err);
tcpci->port = tcpm_register_port(tcpci->dev, &tcpci->tcpc);
if (IS_ERR(tcpci->port)) {
fwnode_handle_put(tcpci->tcpc.fwnode);
return ERR_CAST(tcpci->port);
}
return tcpci;
}
EXPORT_SYMBOL_GPL(tcpci_register_port);
void tcpci_unregister_port(struct tcpci *tcpci)
{
tcpm_unregister_port(tcpci->port);
fwnode_handle_put(tcpci->tcpc.fwnode);
}
EXPORT_SYMBOL_GPL(tcpci_unregister_port);
static int tcpci_probe(struct i2c_client *client)
{
struct tcpci_chip *chip;
int err;
u16 val = 0;
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
chip->data.regmap = devm_regmap_init_i2c(client, &tcpci_regmap_config);
if (IS_ERR(chip->data.regmap))
return PTR_ERR(chip->data.regmap);
i2c_set_clientdata(client, chip);
/* Disable chip interrupts before requesting irq */
err = regmap_raw_write(chip->data.regmap, TCPC_ALERT_MASK, &val,
sizeof(u16));
if (err < 0)
return err;
chip->tcpci = tcpci_register_port(&client->dev, &chip->data);
if (IS_ERR(chip->tcpci))
return PTR_ERR(chip->tcpci);
err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
_tcpci_irq,
IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_LOW,
dev_name(&client->dev), chip);
if (err < 0) {
tcpci_unregister_port(chip->tcpci);
return err;
}
return 0;
}
static void tcpci_remove(struct i2c_client *client)
{
struct tcpci_chip *chip = i2c_get_clientdata(client);
int err;
/* Disable chip interrupts before unregistering port */
err = tcpci_write16(chip->tcpci, TCPC_ALERT_MASK, 0);
if (err < 0)
dev_warn(&client->dev, "Failed to disable irqs (%pe)\n", ERR_PTR(err));
tcpci_unregister_port(chip->tcpci);
}
static const struct i2c_device_id tcpci_id[] = {
{ "tcpci", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, tcpci_id);
#ifdef CONFIG_OF
static const struct of_device_id tcpci_of_match[] = {
{ .compatible = "nxp,ptn5110", },
{},
};
MODULE_DEVICE_TABLE(of, tcpci_of_match);
#endif
static struct i2c_driver tcpci_i2c_driver = {
.driver = {
.name = "tcpci",
.of_match_table = of_match_ptr(tcpci_of_match),
},
.probe = tcpci_probe,
.remove = tcpci_remove,
.id_table = tcpci_id,
};
module_i2c_driver(tcpci_i2c_driver);
MODULE_DESCRIPTION("USB Type-C Port Controller Interface driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/typec/tcpm/tcpci.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2022 Google, Inc
*
* USB-C module to reduce wakeups due to contaminants.
*/
#include <linux/device.h>
#include <linux/irqreturn.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/usb/tcpci.h>
#include <linux/usb/tcpm.h>
#include <linux/usb/typec.h>
#include "tcpci_maxim.h"
enum fladc_select {
CC1_SCALE1 = 1,
CC1_SCALE2,
CC2_SCALE1,
CC2_SCALE2,
SBU1,
SBU2,
};
#define FLADC_1uA_LSB_MV 25
/* High range CC */
#define FLADC_CC_HIGH_RANGE_LSB_MV 208
/* Low range CC */
#define FLADC_CC_LOW_RANGE_LSB_MV 126
/* 1uA current source */
#define FLADC_CC_SCALE1 1
/* 5 uA current source */
#define FLADC_CC_SCALE2 5
#define FLADC_1uA_CC_OFFSET_MV 300
#define FLADC_CC_HIGH_RANGE_OFFSET_MV 624
#define FLADC_CC_LOW_RANGE_OFFSET_MV 378
#define CONTAMINANT_THRESHOLD_SBU_K 1000
#define CONTAMINANT_THRESHOLD_CC_K 1000
#define READ1_SLEEP_MS 10
#define READ2_SLEEP_MS 5
#define STATUS_CHECK(reg, mask, val) (((reg) & (mask)) == (val))
#define IS_CC_OPEN(cc_status) \
(STATUS_CHECK((cc_status), TCPC_CC_STATUS_CC1_MASK << TCPC_CC_STATUS_CC1_SHIFT, \
TCPC_CC_STATE_SRC_OPEN) && STATUS_CHECK((cc_status), \
TCPC_CC_STATUS_CC2_MASK << \
TCPC_CC_STATUS_CC2_SHIFT, \
TCPC_CC_STATE_SRC_OPEN))
static int max_contaminant_adc_to_mv(struct max_tcpci_chip *chip, enum fladc_select channel,
bool ua_src, u8 fladc)
{
/* SBU channels only have 1 scale with 1uA. */
if ((ua_src && (channel == CC1_SCALE2 || channel == CC2_SCALE2 || channel == SBU1 ||
channel == SBU2)))
/* Mean of range */
return FLADC_1uA_CC_OFFSET_MV + (fladc * FLADC_1uA_LSB_MV);
else if (!ua_src && (channel == CC1_SCALE1 || channel == CC2_SCALE1))
return FLADC_CC_HIGH_RANGE_OFFSET_MV + (fladc * FLADC_CC_HIGH_RANGE_LSB_MV);
else if (!ua_src && (channel == CC1_SCALE2 || channel == CC2_SCALE2))
return FLADC_CC_LOW_RANGE_OFFSET_MV + (fladc * FLADC_CC_LOW_RANGE_LSB_MV);
dev_err_once(chip->dev, "ADC ERROR: SCALE UNKNOWN");
return -EINVAL;
}
static int max_contaminant_read_adc_mv(struct max_tcpci_chip *chip, enum fladc_select channel,
int sleep_msec, bool raw, bool ua_src)
{
struct regmap *regmap = chip->data.regmap;
u8 fladc;
int ret;
/* Channel & scale select */
ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCINSEL_MASK,
channel << ADC_CHANNEL_OFFSET);
if (ret < 0)
return ret;
/* Enable ADC */
ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCEN, ADCEN);
if (ret < 0)
return ret;
usleep_range(sleep_msec * 1000, (sleep_msec + 1) * 1000);
ret = max_tcpci_read8(chip, TCPC_VENDOR_FLADC_STATUS, &fladc);
if (ret < 0)
return ret;
/* Disable ADC */
ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCEN, 0);
if (ret < 0)
return ret;
ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCINSEL_MASK, 0);
if (ret < 0)
return ret;
if (!raw)
return max_contaminant_adc_to_mv(chip, channel, ua_src, fladc);
else
return fladc;
}
static int max_contaminant_read_resistance_kohm(struct max_tcpci_chip *chip,
enum fladc_select channel, int sleep_msec, bool raw)
{
struct regmap *regmap = chip->data.regmap;
int mv;
int ret;
if (channel == CC1_SCALE1 || channel == CC2_SCALE1 || channel == CC1_SCALE2 ||
channel == CC2_SCALE2) {
/* Enable 1uA current source */
ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL_MASK,
ULTRA_LOW_POWER_MODE);
if (ret < 0)
return ret;
/* Enable 1uA current source */
ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL_MASK, UA_1_SRC);
if (ret < 0)
return ret;
/* OVP disable */
ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCOVPDIS, CCOVPDIS);
if (ret < 0)
return ret;
mv = max_contaminant_read_adc_mv(chip, channel, sleep_msec, raw, true);
if (mv < 0)
return ret;
/* OVP enable */
ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCOVPDIS, 0);
if (ret < 0)
return ret;
/* returns KOhm as 1uA source is used. */
return mv;
}
ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, SBUOVPDIS, SBUOVPDIS);
if (ret < 0)
return ret;
/* SBU switches auto configure when channel is selected. */
/* Enable 1ua current source */
ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, SBURPCTRL, SBURPCTRL);
if (ret < 0)
return ret;
mv = max_contaminant_read_adc_mv(chip, channel, sleep_msec, raw, true);
if (mv < 0)
return ret;
/* Disable current source */
ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, SBURPCTRL, 0);
if (ret < 0)
return ret;
/* OVP disable */
ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, SBUOVPDIS, 0);
if (ret < 0)
return ret;
return mv;
}
static int max_contaminant_read_comparators(struct max_tcpci_chip *chip, u8 *vendor_cc_status2_cc1,
u8 *vendor_cc_status2_cc2)
{
struct regmap *regmap = chip->data.regmap;
int ret;
/* Enable 80uA source */
ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL_MASK, UA_80_SRC);
if (ret < 0)
return ret;
/* Enable comparators */
ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL1, CCCOMPEN, CCCOMPEN);
if (ret < 0)
return ret;
/* Sleep to allow comparators settle */
usleep_range(5000, 6000);
ret = regmap_update_bits(regmap, TCPC_TCPC_CTRL, TCPC_TCPC_CTRL_ORIENTATION, PLUG_ORNT_CC1);
if (ret < 0)
return ret;
usleep_range(5000, 6000);
ret = max_tcpci_read8(chip, VENDOR_CC_STATUS2, vendor_cc_status2_cc1);
if (ret < 0)
return ret;
ret = regmap_update_bits(regmap, TCPC_TCPC_CTRL, TCPC_TCPC_CTRL_ORIENTATION, PLUG_ORNT_CC2);
if (ret < 0)
return ret;
usleep_range(5000, 6000);
ret = max_tcpci_read8(chip, VENDOR_CC_STATUS2, vendor_cc_status2_cc2);
if (ret < 0)
return ret;
ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL1, CCCOMPEN, 0);
if (ret < 0)
return ret;
ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL_MASK, 0);
if (ret < 0)
return ret;
return 0;
}
static int max_contaminant_detect_contaminant(struct max_tcpci_chip *chip)
{
int cc1_k, cc2_k, sbu1_k, sbu2_k, ret;
u8 vendor_cc_status2_cc1 = 0xff, vendor_cc_status2_cc2 = 0xff;
u8 role_ctrl = 0, role_ctrl_backup = 0;
int inferred_state = NOT_DETECTED;
ret = max_tcpci_read8(chip, TCPC_ROLE_CTRL, &role_ctrl);
if (ret < 0)
return NOT_DETECTED;
role_ctrl_backup = role_ctrl;
role_ctrl = 0x0F;
ret = max_tcpci_write8(chip, TCPC_ROLE_CTRL, role_ctrl);
if (ret < 0)
return NOT_DETECTED;
cc1_k = max_contaminant_read_resistance_kohm(chip, CC1_SCALE2, READ1_SLEEP_MS, false);
if (cc1_k < 0)
goto exit;
cc2_k = max_contaminant_read_resistance_kohm(chip, CC2_SCALE2, READ2_SLEEP_MS, false);
if (cc2_k < 0)
goto exit;
sbu1_k = max_contaminant_read_resistance_kohm(chip, SBU1, READ1_SLEEP_MS, false);
if (sbu1_k < 0)
goto exit;
sbu2_k = max_contaminant_read_resistance_kohm(chip, SBU2, READ2_SLEEP_MS, false);
if (sbu2_k < 0)
goto exit;
ret = max_contaminant_read_comparators(chip, &vendor_cc_status2_cc1,
&vendor_cc_status2_cc2);
if (ret < 0)
goto exit;
if ((!(CC1_VUFP_RD0P5 & vendor_cc_status2_cc1) ||
!(CC2_VUFP_RD0P5 & vendor_cc_status2_cc2)) &&
!(CC1_VUFP_RD0P5 & vendor_cc_status2_cc1 && CC2_VUFP_RD0P5 & vendor_cc_status2_cc2))
inferred_state = SINK;
else if ((cc1_k < CONTAMINANT_THRESHOLD_CC_K || cc2_k < CONTAMINANT_THRESHOLD_CC_K) &&
(sbu1_k < CONTAMINANT_THRESHOLD_SBU_K || sbu2_k < CONTAMINANT_THRESHOLD_SBU_K))
inferred_state = DETECTED;
if (inferred_state == NOT_DETECTED)
max_tcpci_write8(chip, TCPC_ROLE_CTRL, role_ctrl_backup);
else
max_tcpci_write8(chip, TCPC_ROLE_CTRL, (TCPC_ROLE_CTRL_DRP | 0xA));
return inferred_state;
exit:
max_tcpci_write8(chip, TCPC_ROLE_CTRL, role_ctrl_backup);
return NOT_DETECTED;
}
static int max_contaminant_enable_dry_detection(struct max_tcpci_chip *chip)
{
struct regmap *regmap = chip->data.regmap;
u8 temp;
int ret;
ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL3, CCWTRDEB_MASK | CCWTRSEL_MASK
| WTRCYCLE_MASK, CCWTRDEB_1MS << CCWTRDEB_SHIFT |
CCWTRSEL_1V << CCWTRSEL_SHIFT | WTRCYCLE_4_8_S <<
WTRCYCLE_SHIFT);
if (ret < 0)
return ret;
ret = regmap_update_bits(regmap, TCPC_ROLE_CTRL, TCPC_ROLE_CTRL_DRP, TCPC_ROLE_CTRL_DRP);
if (ret < 0)
return ret;
ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL1, CCCONNDRY, CCCONNDRY);
if (ret < 0)
return ret;
ret = max_tcpci_read8(chip, TCPC_VENDOR_CC_CTRL1, &temp);
if (ret < 0)
return ret;
ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL_MASK,
ULTRA_LOW_POWER_MODE);
if (ret < 0)
return ret;
ret = max_tcpci_read8(chip, TCPC_VENDOR_CC_CTRL2, &temp);
if (ret < 0)
return ret;
/* Enable Look4Connection before sending the command */
ret = regmap_update_bits(regmap, TCPC_TCPC_CTRL, TCPC_TCPC_CTRL_EN_LK4CONN_ALRT,
TCPC_TCPC_CTRL_EN_LK4CONN_ALRT);
if (ret < 0)
return ret;
ret = max_tcpci_write8(chip, TCPC_COMMAND, TCPC_CMD_LOOK4CONNECTION);
if (ret < 0)
return ret;
return 0;
}
bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect_while_debounce)
{
u8 cc_status, pwr_cntl;
int ret;
ret = max_tcpci_read8(chip, TCPC_CC_STATUS, &cc_status);
if (ret < 0)
return false;
ret = max_tcpci_read8(chip, TCPC_POWER_CTRL, &pwr_cntl);
if (ret < 0)
return false;
if (chip->contaminant_state == NOT_DETECTED || chip->contaminant_state == SINK) {
if (!disconnect_while_debounce)
msleep(100);
ret = max_tcpci_read8(chip, TCPC_CC_STATUS, &cc_status);
if (ret < 0)
return false;
if (IS_CC_OPEN(cc_status)) {
u8 role_ctrl, role_ctrl_backup;
ret = max_tcpci_read8(chip, TCPC_ROLE_CTRL, &role_ctrl);
if (ret < 0)
return false;
role_ctrl_backup = role_ctrl;
role_ctrl |= 0x0F;
role_ctrl &= ~(TCPC_ROLE_CTRL_DRP);
ret = max_tcpci_write8(chip, TCPC_ROLE_CTRL, role_ctrl);
if (ret < 0)
return false;
chip->contaminant_state = max_contaminant_detect_contaminant(chip);
ret = max_tcpci_write8(chip, TCPC_ROLE_CTRL, role_ctrl_backup);
if (ret < 0)
return false;
if (chip->contaminant_state == DETECTED) {
max_contaminant_enable_dry_detection(chip);
return true;
}
}
return false;
} else if (chip->contaminant_state == DETECTED) {
if (STATUS_CHECK(cc_status, TCPC_CC_STATUS_TOGGLING, 0)) {
chip->contaminant_state = max_contaminant_detect_contaminant(chip);
if (chip->contaminant_state == DETECTED) {
max_contaminant_enable_dry_detection(chip);
return true;
}
}
}
return false;
}
MODULE_DESCRIPTION("MAXIM TCPC CONTAMINANT Module");
MODULE_AUTHOR("Badhri Jagan Sridharan <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/typec/tcpm/maxim_contaminant.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2018, Richtek Technology Corporation
*
* Richtek RT1711H Type-C Chip Driver
*/
#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/gpio/consumer.h>
#include <linux/usb/tcpci.h>
#include <linux/usb/tcpm.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#define RT1711H_VID 0x29CF
#define RT1711H_PID 0x1711
#define RT1711H_DID 0x2171
#define RT1715_DID 0x2173
#define RT1711H_PHYCTRL1 0x80
#define RT1711H_PHYCTRL2 0x81
#define RT1711H_RTCTRL4 0x93
/* rx threshold of rd/rp: 1b0 for level 0.4V/0.7V, 1b1 for 0.35V/0.75V */
#define RT1711H_BMCIO_RXDZSEL BIT(0)
#define RT1711H_RTCTRL8 0x9B
/* Autoidle timeout = (tout * 2 + 1) * 6.4ms */
#define RT1711H_RTCTRL8_SET(ck300, ship_off, auto_idle, tout) \
(((ck300) << 7) | ((ship_off) << 5) | \
((auto_idle) << 3) | ((tout) & 0x07))
#define RT1711H_AUTOIDLEEN BIT(3)
#define RT1711H_ENEXTMSG BIT(4)
#define RT1711H_RTCTRL11 0x9E
/* I2C timeout = (tout + 1) * 12.5ms */
#define RT1711H_RTCTRL11_SET(en, tout) \
(((en) << 7) | ((tout) & 0x0F))
#define RT1711H_RTCTRL13 0xA0
#define RT1711H_RTCTRL14 0xA1
#define RT1711H_RTCTRL15 0xA2
#define RT1711H_RTCTRL16 0xA3
#define RT1711H_RTCTRL18 0xAF
/* 1b0 as fixed rx threshold of rd/rp 0.55V, 1b1 depends on RTCRTL4[0] */
#define BMCIO_RXDZEN BIT(0)
struct rt1711h_chip {
struct tcpci_data data;
struct tcpci *tcpci;
struct device *dev;
struct regulator *vbus;
bool src_en;
u16 did;
};
static int rt1711h_read16(struct rt1711h_chip *chip, unsigned int reg, u16 *val)
{
return regmap_raw_read(chip->data.regmap, reg, val, sizeof(u16));
}
static int rt1711h_write16(struct rt1711h_chip *chip, unsigned int reg, u16 val)
{
return regmap_raw_write(chip->data.regmap, reg, &val, sizeof(u16));
}
static int rt1711h_read8(struct rt1711h_chip *chip, unsigned int reg, u8 *val)
{
return regmap_raw_read(chip->data.regmap, reg, val, sizeof(u8));
}
static int rt1711h_write8(struct rt1711h_chip *chip, unsigned int reg, u8 val)
{
return regmap_raw_write(chip->data.regmap, reg, &val, sizeof(u8));
}
static const struct regmap_config rt1711h_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0xFF, /* 0x80 .. 0xFF are vendor defined */
};
static struct rt1711h_chip *tdata_to_rt1711h(struct tcpci_data *tdata)
{
return container_of(tdata, struct rt1711h_chip, data);
}
static int rt1711h_init(struct tcpci *tcpci, struct tcpci_data *tdata)
{
struct rt1711h_chip *chip = tdata_to_rt1711h(tdata);
struct regmap *regmap = chip->data.regmap;
int ret;
/* CK 300K from 320K, shipping off, auto_idle enable, tout = 32ms */
ret = rt1711h_write8(chip, RT1711H_RTCTRL8,
RT1711H_RTCTRL8_SET(0, 1, 1, 2));
if (ret < 0)
return ret;
/* Enable PD30 extended message for RT1715 */
if (chip->did == RT1715_DID) {
ret = regmap_update_bits(regmap, RT1711H_RTCTRL8,
RT1711H_ENEXTMSG, RT1711H_ENEXTMSG);
if (ret < 0)
return ret;
}
/* I2C reset : (val + 1) * 12.5ms */
ret = rt1711h_write8(chip, RT1711H_RTCTRL11,
RT1711H_RTCTRL11_SET(1, 0x0F));
if (ret < 0)
return ret;
/* tTCPCfilter : (26.7 * val) us */
ret = rt1711h_write8(chip, RT1711H_RTCTRL14, 0x0F);
if (ret < 0)
return ret;
/* tDRP : (51.2 + 6.4 * val) ms */
ret = rt1711h_write8(chip, RT1711H_RTCTRL15, 0x04);
if (ret < 0)
return ret;
/* dcSRC.DRP : 33% */
ret = rt1711h_write16(chip, RT1711H_RTCTRL16, 330);
if (ret < 0)
return ret;
/* Enable phy discard retry, retry count 7, rx filter deglitch 100 us */
ret = rt1711h_write8(chip, RT1711H_PHYCTRL1, 0xF1);
if (ret < 0)
return ret;
/* Decrease wait time of BMC-encoded 1 bit from 2.67us to 2.55us */
/* wait time : (val * .4167) us */
return rt1711h_write8(chip, RT1711H_PHYCTRL2, 62);
}
static int rt1711h_set_vbus(struct tcpci *tcpci, struct tcpci_data *tdata,
bool src, bool snk)
{
struct rt1711h_chip *chip = tdata_to_rt1711h(tdata);
int ret;
if (chip->src_en == src)
return 0;
if (src)
ret = regulator_enable(chip->vbus);
else
ret = regulator_disable(chip->vbus);
if (!ret)
chip->src_en = src;
return ret;
}
static int rt1711h_set_vconn(struct tcpci *tcpci, struct tcpci_data *tdata,
bool enable)
{
struct rt1711h_chip *chip = tdata_to_rt1711h(tdata);
return regmap_update_bits(chip->data.regmap, RT1711H_RTCTRL8,
RT1711H_AUTOIDLEEN, enable ? 0 : RT1711H_AUTOIDLEEN);
}
/*
* Selects the CC PHY noise filter voltage level according to the remote current
* CC voltage level.
*
* @status: The port's current cc status read from IC
* Return 0 if writes succeed; failure code otherwise
*/
static inline int rt1711h_init_cc_params(struct rt1711h_chip *chip, u8 status)
{
int ret, cc1, cc2;
u8 role = 0;
u32 rxdz_en, rxdz_sel;
ret = rt1711h_read8(chip, TCPC_ROLE_CTRL, &role);
if (ret < 0)
return ret;
cc1 = tcpci_to_typec_cc((status >> TCPC_CC_STATUS_CC1_SHIFT) &
TCPC_CC_STATUS_CC1_MASK,
status & TCPC_CC_STATUS_TERM ||
tcpc_presenting_rd(role, CC1));
cc2 = tcpci_to_typec_cc((status >> TCPC_CC_STATUS_CC2_SHIFT) &
TCPC_CC_STATUS_CC2_MASK,
status & TCPC_CC_STATUS_TERM ||
tcpc_presenting_rd(role, CC2));
if ((cc1 >= TYPEC_CC_RP_1_5 && cc2 < TYPEC_CC_RP_DEF) ||
(cc2 >= TYPEC_CC_RP_1_5 && cc1 < TYPEC_CC_RP_DEF)) {
rxdz_en = BMCIO_RXDZEN;
if (chip->did == RT1715_DID)
rxdz_sel = RT1711H_BMCIO_RXDZSEL;
else
rxdz_sel = 0;
} else {
rxdz_en = 0;
rxdz_sel = RT1711H_BMCIO_RXDZSEL;
}
ret = regmap_update_bits(chip->data.regmap, RT1711H_RTCTRL18,
BMCIO_RXDZEN, rxdz_en);
if (ret < 0)
return ret;
return regmap_update_bits(chip->data.regmap, RT1711H_RTCTRL4,
RT1711H_BMCIO_RXDZSEL, rxdz_sel);
}
static int rt1711h_start_drp_toggling(struct tcpci *tcpci,
struct tcpci_data *tdata,
enum typec_cc_status cc)
{
struct rt1711h_chip *chip = tdata_to_rt1711h(tdata);
int ret;
unsigned int reg = 0;
switch (cc) {
default:
case TYPEC_CC_RP_DEF:
reg |= (TCPC_ROLE_CTRL_RP_VAL_DEF <<
TCPC_ROLE_CTRL_RP_VAL_SHIFT);
break;
case TYPEC_CC_RP_1_5:
reg |= (TCPC_ROLE_CTRL_RP_VAL_1_5 <<
TCPC_ROLE_CTRL_RP_VAL_SHIFT);
break;
case TYPEC_CC_RP_3_0:
reg |= (TCPC_ROLE_CTRL_RP_VAL_3_0 <<
TCPC_ROLE_CTRL_RP_VAL_SHIFT);
break;
}
if (cc == TYPEC_CC_RD)
reg |= (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT) |
(TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT);
else
reg |= (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
(TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT);
ret = rt1711h_write8(chip, TCPC_ROLE_CTRL, reg);
if (ret < 0)
return ret;
usleep_range(500, 1000);
return 0;
}
static irqreturn_t rt1711h_irq(int irq, void *dev_id)
{
int ret;
u16 alert;
u8 status;
struct rt1711h_chip *chip = dev_id;
if (!chip->tcpci)
return IRQ_HANDLED;
ret = rt1711h_read16(chip, TCPC_ALERT, &alert);
if (ret < 0)
goto out;
if (alert & TCPC_ALERT_CC_STATUS) {
ret = rt1711h_read8(chip, TCPC_CC_STATUS, &status);
if (ret < 0)
goto out;
/* Clear cc change event triggered by starting toggling */
if (status & TCPC_CC_STATUS_TOGGLING)
rt1711h_write8(chip, TCPC_ALERT, TCPC_ALERT_CC_STATUS);
else
rt1711h_init_cc_params(chip, status);
}
out:
return tcpci_irq(chip->tcpci);
}
static int rt1711h_sw_reset(struct rt1711h_chip *chip)
{
int ret;
ret = rt1711h_write8(chip, RT1711H_RTCTRL13, 0x01);
if (ret < 0)
return ret;
usleep_range(1000, 2000);
return 0;
}
static int rt1711h_check_revision(struct i2c_client *i2c, struct rt1711h_chip *chip)
{
int ret;
ret = i2c_smbus_read_word_data(i2c, TCPC_VENDOR_ID);
if (ret < 0)
return ret;
if (ret != RT1711H_VID) {
dev_err(&i2c->dev, "vid is not correct, 0x%04x\n", ret);
return -ENODEV;
}
ret = i2c_smbus_read_word_data(i2c, TCPC_PRODUCT_ID);
if (ret < 0)
return ret;
if (ret != RT1711H_PID) {
dev_err(&i2c->dev, "pid is not correct, 0x%04x\n", ret);
return -ENODEV;
}
ret = i2c_smbus_read_word_data(i2c, TCPC_BCD_DEV);
if (ret < 0)
return ret;
if (ret != chip->did) {
dev_err(&i2c->dev, "did is not correct, 0x%04x\n", ret);
return -ENODEV;
}
dev_dbg(&i2c->dev, "did is 0x%04x\n", ret);
return ret;
}
static int rt1711h_probe(struct i2c_client *client)
{
int ret;
struct rt1711h_chip *chip;
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
chip->did = (size_t)device_get_match_data(&client->dev);
ret = rt1711h_check_revision(client, chip);
if (ret < 0) {
dev_err(&client->dev, "check vid/pid fail\n");
return ret;
}
chip->data.regmap = devm_regmap_init_i2c(client,
&rt1711h_regmap_config);
if (IS_ERR(chip->data.regmap))
return PTR_ERR(chip->data.regmap);
chip->dev = &client->dev;
i2c_set_clientdata(client, chip);
ret = rt1711h_sw_reset(chip);
if (ret < 0)
return ret;
/* Disable chip interrupts before requesting irq */
ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0);
if (ret < 0)
return ret;
chip->vbus = devm_regulator_get(&client->dev, "vbus");
if (IS_ERR(chip->vbus))
return PTR_ERR(chip->vbus);
chip->data.init = rt1711h_init;
chip->data.set_vbus = rt1711h_set_vbus;
chip->data.set_vconn = rt1711h_set_vconn;
chip->data.start_drp_toggling = rt1711h_start_drp_toggling;
chip->tcpci = tcpci_register_port(chip->dev, &chip->data);
if (IS_ERR_OR_NULL(chip->tcpci))
return PTR_ERR(chip->tcpci);
ret = devm_request_threaded_irq(chip->dev, client->irq, NULL,
rt1711h_irq,
IRQF_ONESHOT | IRQF_TRIGGER_LOW,
dev_name(chip->dev), chip);
if (ret < 0)
return ret;
enable_irq_wake(client->irq);
return 0;
}
static void rt1711h_remove(struct i2c_client *client)
{
struct rt1711h_chip *chip = i2c_get_clientdata(client);
tcpci_unregister_port(chip->tcpci);
}
static const struct i2c_device_id rt1711h_id[] = {
{ "rt1711h", 0 },
{ "rt1715", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, rt1711h_id);
#ifdef CONFIG_OF
static const struct of_device_id rt1711h_of_match[] = {
{ .compatible = "richtek,rt1711h", .data = (void *)RT1711H_DID },
{ .compatible = "richtek,rt1715", .data = (void *)RT1715_DID },
{},
};
MODULE_DEVICE_TABLE(of, rt1711h_of_match);
#endif
static struct i2c_driver rt1711h_i2c_driver = {
.driver = {
.name = "rt1711h",
.of_match_table = of_match_ptr(rt1711h_of_match),
},
.probe = rt1711h_probe,
.remove = rt1711h_remove,
.id_table = rt1711h_id,
};
module_i2c_driver(rt1711h_i2c_driver);
MODULE_AUTHOR("ShuFan Lee <[email protected]>");
MODULE_DESCRIPTION("RT1711H USB Type-C Port Controller Interface Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/typec/tcpm/tcpci_rt1711h.c |
// SPDX-License-Identifier: GPL-2.0
/*
* typec_wcove.c - WhiskeyCove PMIC USB Type-C PHY driver
*
* Copyright (C) 2017 Intel Corporation
* Author: Heikki Krogerus <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/usb/tcpm.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/mfd/intel_soc_pmic.h>
/* Register offsets */
#define WCOVE_CHGRIRQ0 0x4e09
#define USBC_CONTROL1 0x7001
#define USBC_CONTROL2 0x7002
#define USBC_CONTROL3 0x7003
#define USBC_CC1_CTRL 0x7004
#define USBC_CC2_CTRL 0x7005
#define USBC_STATUS1 0x7007
#define USBC_STATUS2 0x7008
#define USBC_STATUS3 0x7009
#define USBC_CC1 0x700a
#define USBC_CC2 0x700b
#define USBC_CC1_STATUS 0x700c
#define USBC_CC2_STATUS 0x700d
#define USBC_IRQ1 0x7015
#define USBC_IRQ2 0x7016
#define USBC_IRQMASK1 0x7017
#define USBC_IRQMASK2 0x7018
#define USBC_PDCFG2 0x701a
#define USBC_PDCFG3 0x701b
#define USBC_PDSTATUS 0x701c
#define USBC_RXSTATUS 0x701d
#define USBC_RXINFO 0x701e
#define USBC_TXCMD 0x701f
#define USBC_TXINFO 0x7020
#define USBC_RX_DATA 0x7028
#define USBC_TX_DATA 0x7047
/* Register bits */
#define USBC_CONTROL1_MODE_MASK 0x3
#define USBC_CONTROL1_MODE_SNK 0
#define USBC_CONTROL1_MODE_SNKACC 1
#define USBC_CONTROL1_MODE_SRC 2
#define USBC_CONTROL1_MODE_SRCACC 3
#define USBC_CONTROL1_MODE_DRP 4
#define USBC_CONTROL1_MODE_DRPACC 5
#define USBC_CONTROL1_MODE_TEST 7
#define USBC_CONTROL1_CURSRC_MASK 0xc
#define USBC_CONTROL1_CURSRC_UA_0 (0 << 3)
#define USBC_CONTROL1_CURSRC_UA_80 (1 << 3)
#define USBC_CONTROL1_CURSRC_UA_180 (2 << 3)
#define USBC_CONTROL1_CURSRC_UA_330 (3 << 3)
#define USBC_CONTROL1_DRPTOGGLE_RANDOM 0xe0
#define USBC_CONTROL2_UNATT_SNK BIT(0)
#define USBC_CONTROL2_UNATT_SRC BIT(1)
#define USBC_CONTROL2_DIS_ST BIT(2)
#define USBC_CONTROL3_DET_DIS BIT(0)
#define USBC_CONTROL3_PD_DIS BIT(1)
#define USBC_CONTROL3_RESETPHY BIT(2)
#define USBC_CC_CTRL_PU_EN BIT(0)
#define USBC_CC_CTRL_VCONN_EN BIT(1)
#define USBC_CC_CTRL_TX_EN BIT(2)
#define USBC_CC_CTRL_PD_EN BIT(3)
#define USBC_CC_CTRL_CDET_EN BIT(4)
#define USBC_CC_CTRL_RDET_EN BIT(5)
#define USBC_CC_CTRL_ADC_EN BIT(6)
#define USBC_CC_CTRL_VBUSOK BIT(7)
#define USBC_STATUS1_DET_ONGOING BIT(6)
#define USBC_STATUS1_RSLT(r) ((r) & 0xf)
#define USBC_RSLT_NOTHING 0
#define USBC_RSLT_SRC_DEFAULT 1
#define USBC_RSLT_SRC_1_5A 2
#define USBC_RSLT_SRC_3_0A 3
#define USBC_RSLT_SNK 4
#define USBC_RSLT_DEBUG_ACC 5
#define USBC_RSLT_AUDIO_ACC 6
#define USBC_RSLT_UNDEF 15
#define USBC_STATUS1_ORIENT(r) (((r) >> 4) & 0x3)
#define USBC_ORIENT_NORMAL 1
#define USBC_ORIENT_REVERSE 2
#define USBC_STATUS2_VBUS_REQ BIT(5)
#define UCSC_CC_STATUS_SNK_RP BIT(0)
#define UCSC_CC_STATUS_PWRDEFSNK BIT(1)
#define UCSC_CC_STATUS_PWR_1P5A_SNK BIT(2)
#define UCSC_CC_STATUS_PWR_3A_SNK BIT(3)
#define UCSC_CC_STATUS_SRC_RP BIT(4)
#define UCSC_CC_STATUS_RX(r) (((r) >> 5) & 0x3)
#define USBC_CC_STATUS_RD 1
#define USBC_CC_STATUS_RA 2
#define USBC_IRQ1_ADCDONE1 BIT(2)
#define USBC_IRQ1_OVERTEMP BIT(1)
#define USBC_IRQ1_SHORT BIT(0)
#define USBC_IRQ2_CC_CHANGE BIT(7)
#define USBC_IRQ2_RX_PD BIT(6)
#define USBC_IRQ2_RX_HR BIT(5)
#define USBC_IRQ2_RX_CR BIT(4)
#define USBC_IRQ2_TX_SUCCESS BIT(3)
#define USBC_IRQ2_TX_FAIL BIT(2)
#define USBC_IRQMASK1_ALL (USBC_IRQ1_ADCDONE1 | USBC_IRQ1_OVERTEMP | \
USBC_IRQ1_SHORT)
#define USBC_IRQMASK2_ALL (USBC_IRQ2_CC_CHANGE | USBC_IRQ2_RX_PD | \
USBC_IRQ2_RX_HR | USBC_IRQ2_RX_CR | \
USBC_IRQ2_TX_SUCCESS | USBC_IRQ2_TX_FAIL)
#define USBC_PDCFG2_SOP BIT(0)
#define USBC_PDCFG2_SOP_P BIT(1)
#define USBC_PDCFG2_SOP_PP BIT(2)
#define USBC_PDCFG2_SOP_P_DEBUG BIT(3)
#define USBC_PDCFG2_SOP_PP_DEBUG BIT(4)
#define USBC_PDCFG3_DATAROLE_SHIFT 1
#define USBC_PDCFG3_SOP_SHIFT 2
#define USBC_RXSTATUS_RXCLEAR BIT(0)
#define USBC_RXSTATUS_RXDATA BIT(7)
#define USBC_RXINFO_RXBYTES(i) (((i) >> 3) & 0x1f)
#define USBC_TXCMD_BUF_RDY BIT(0)
#define USBC_TXCMD_START BIT(1)
#define USBC_TXCMD_NOP (0 << 5)
#define USBC_TXCMD_MSG (1 << 5)
#define USBC_TXCMD_CR (2 << 5)
#define USBC_TXCMD_HR (3 << 5)
#define USBC_TXCMD_BIST (4 << 5)
#define USBC_TXINFO_RETRIES(d) (d << 3)
struct wcove_typec {
struct mutex lock; /* device lock */
struct device *dev;
struct regmap *regmap;
guid_t guid;
bool vbus;
struct tcpc_dev tcpc;
struct tcpm_port *tcpm;
};
#define tcpc_to_wcove(_tcpc_) container_of(_tcpc_, struct wcove_typec, tcpc)
enum wcove_typec_func {
WCOVE_FUNC_DRIVE_VBUS = 1,
WCOVE_FUNC_ORIENTATION,
WCOVE_FUNC_ROLE,
WCOVE_FUNC_DRIVE_VCONN,
};
enum wcove_typec_orientation {
WCOVE_ORIENTATION_NORMAL,
WCOVE_ORIENTATION_REVERSE,
};
enum wcove_typec_role {
WCOVE_ROLE_HOST,
WCOVE_ROLE_DEVICE,
};
#define WCOVE_DSM_UUID "482383f0-2876-4e49-8685-db66211af037"
static int wcove_typec_func(struct wcove_typec *wcove,
enum wcove_typec_func func, int param)
{
union acpi_object *obj;
union acpi_object tmp;
union acpi_object argv4 = ACPI_INIT_DSM_ARGV4(1, &tmp);
tmp.type = ACPI_TYPE_INTEGER;
tmp.integer.value = param;
obj = acpi_evaluate_dsm(ACPI_HANDLE(wcove->dev), &wcove->guid, 1, func,
&argv4);
if (!obj) {
dev_err(wcove->dev, "%s: failed to evaluate _DSM\n", __func__);
return -EIO;
}
ACPI_FREE(obj);
return 0;
}
static int wcove_init(struct tcpc_dev *tcpc)
{
struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
int ret;
ret = regmap_write(wcove->regmap, USBC_CONTROL1, 0);
if (ret)
return ret;
/* Unmask everything */
ret = regmap_write(wcove->regmap, USBC_IRQMASK1, 0);
if (ret)
return ret;
return regmap_write(wcove->regmap, USBC_IRQMASK2, 0);
}
static int wcove_get_vbus(struct tcpc_dev *tcpc)
{
struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
unsigned int cc1ctrl;
int ret;
ret = regmap_read(wcove->regmap, USBC_CC1_CTRL, &cc1ctrl);
if (ret)
return ret;
wcove->vbus = !!(cc1ctrl & USBC_CC_CTRL_VBUSOK);
return wcove->vbus;
}
static int wcove_set_vbus(struct tcpc_dev *tcpc, bool on, bool sink)
{
struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
return wcove_typec_func(wcove, WCOVE_FUNC_DRIVE_VBUS, on);
}
static int wcove_set_vconn(struct tcpc_dev *tcpc, bool on)
{
struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
return wcove_typec_func(wcove, WCOVE_FUNC_DRIVE_VCONN, on);
}
static enum typec_cc_status wcove_to_typec_cc(unsigned int cc)
{
if (cc & UCSC_CC_STATUS_SNK_RP) {
if (cc & UCSC_CC_STATUS_PWRDEFSNK)
return TYPEC_CC_RP_DEF;
else if (cc & UCSC_CC_STATUS_PWR_1P5A_SNK)
return TYPEC_CC_RP_1_5;
else if (cc & UCSC_CC_STATUS_PWR_3A_SNK)
return TYPEC_CC_RP_3_0;
} else {
switch (UCSC_CC_STATUS_RX(cc)) {
case USBC_CC_STATUS_RD:
return TYPEC_CC_RD;
case USBC_CC_STATUS_RA:
return TYPEC_CC_RA;
default:
break;
}
}
return TYPEC_CC_OPEN;
}
static int wcove_get_cc(struct tcpc_dev *tcpc, enum typec_cc_status *cc1,
enum typec_cc_status *cc2)
{
struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
unsigned int cc1_status;
unsigned int cc2_status;
int ret;
ret = regmap_read(wcove->regmap, USBC_CC1_STATUS, &cc1_status);
if (ret)
return ret;
ret = regmap_read(wcove->regmap, USBC_CC2_STATUS, &cc2_status);
if (ret)
return ret;
*cc1 = wcove_to_typec_cc(cc1_status);
*cc2 = wcove_to_typec_cc(cc2_status);
return 0;
}
static int wcove_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
{
struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
unsigned int ctrl;
switch (cc) {
case TYPEC_CC_RD:
ctrl = USBC_CONTROL1_MODE_SNK;
break;
case TYPEC_CC_RP_DEF:
ctrl = USBC_CONTROL1_CURSRC_UA_80 | USBC_CONTROL1_MODE_SRC;
break;
case TYPEC_CC_RP_1_5:
ctrl = USBC_CONTROL1_CURSRC_UA_180 | USBC_CONTROL1_MODE_SRC;
break;
case TYPEC_CC_RP_3_0:
ctrl = USBC_CONTROL1_CURSRC_UA_330 | USBC_CONTROL1_MODE_SRC;
break;
case TYPEC_CC_OPEN:
ctrl = 0;
break;
default:
return -EINVAL;
}
return regmap_write(wcove->regmap, USBC_CONTROL1, ctrl);
}
static int wcove_set_polarity(struct tcpc_dev *tcpc, enum typec_cc_polarity pol)
{
struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
return wcove_typec_func(wcove, WCOVE_FUNC_ORIENTATION, pol);
}
static int wcove_set_current_limit(struct tcpc_dev *tcpc, u32 max_ma, u32 mv)
{
return 0;
}
static int wcove_set_roles(struct tcpc_dev *tcpc, bool attached,
enum typec_role role, enum typec_data_role data)
{
struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
unsigned int val;
int ret;
ret = wcove_typec_func(wcove, WCOVE_FUNC_ROLE, data == TYPEC_HOST ?
WCOVE_ROLE_HOST : WCOVE_ROLE_DEVICE);
if (ret)
return ret;
val = role;
val |= data << USBC_PDCFG3_DATAROLE_SHIFT;
val |= PD_REV20 << USBC_PDCFG3_SOP_SHIFT;
return regmap_write(wcove->regmap, USBC_PDCFG3, val);
}
static int wcove_set_pd_rx(struct tcpc_dev *tcpc, bool on)
{
struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
return regmap_write(wcove->regmap, USBC_PDCFG2,
on ? USBC_PDCFG2_SOP : 0);
}
static int wcove_pd_transmit(struct tcpc_dev *tcpc,
enum tcpm_transmit_type type,
const struct pd_message *msg,
unsigned int negotiated_rev)
{
struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
unsigned int info = 0;
unsigned int cmd;
int ret;
ret = regmap_read(wcove->regmap, USBC_TXCMD, &cmd);
if (ret)
return ret;
if (!(cmd & USBC_TXCMD_BUF_RDY)) {
dev_warn(wcove->dev, "%s: Last transmission still ongoing!",
__func__);
return -EBUSY;
}
if (msg) {
const u8 *data = (void *)msg;
int i;
for (i = 0; i < pd_header_cnt_le(msg->header) * 4 + 2; i++) {
ret = regmap_write(wcove->regmap, USBC_TX_DATA + i,
data[i]);
if (ret)
return ret;
}
}
switch (type) {
case TCPC_TX_SOP:
case TCPC_TX_SOP_PRIME:
case TCPC_TX_SOP_PRIME_PRIME:
case TCPC_TX_SOP_DEBUG_PRIME:
case TCPC_TX_SOP_DEBUG_PRIME_PRIME:
info = type + 1;
cmd = USBC_TXCMD_MSG;
break;
case TCPC_TX_HARD_RESET:
cmd = USBC_TXCMD_HR;
break;
case TCPC_TX_CABLE_RESET:
cmd = USBC_TXCMD_CR;
break;
case TCPC_TX_BIST_MODE_2:
cmd = USBC_TXCMD_BIST;
break;
default:
return -EINVAL;
}
/* NOTE Setting maximum number of retries (7) */
ret = regmap_write(wcove->regmap, USBC_TXINFO,
info | USBC_TXINFO_RETRIES(7));
if (ret)
return ret;
return regmap_write(wcove->regmap, USBC_TXCMD, cmd | USBC_TXCMD_START);
}
static int wcove_start_toggling(struct tcpc_dev *tcpc,
enum typec_port_type port_type,
enum typec_cc_status cc)
{
struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
unsigned int usbc_ctrl;
if (port_type != TYPEC_PORT_DRP)
return -EOPNOTSUPP;
usbc_ctrl = USBC_CONTROL1_MODE_DRP | USBC_CONTROL1_DRPTOGGLE_RANDOM;
switch (cc) {
case TYPEC_CC_RP_1_5:
usbc_ctrl |= USBC_CONTROL1_CURSRC_UA_180;
break;
case TYPEC_CC_RP_3_0:
usbc_ctrl |= USBC_CONTROL1_CURSRC_UA_330;
break;
default:
usbc_ctrl |= USBC_CONTROL1_CURSRC_UA_80;
break;
}
return regmap_write(wcove->regmap, USBC_CONTROL1, usbc_ctrl);
}
static int wcove_read_rx_buffer(struct wcove_typec *wcove, void *msg)
{
unsigned int info;
int ret;
int i;
ret = regmap_read(wcove->regmap, USBC_RXINFO, &info);
if (ret)
return ret;
/* FIXME: Check that USBC_RXINFO_RXBYTES(info) matches the header */
for (i = 0; i < USBC_RXINFO_RXBYTES(info); i++) {
ret = regmap_read(wcove->regmap, USBC_RX_DATA + i, msg + i);
if (ret)
return ret;
}
return regmap_write(wcove->regmap, USBC_RXSTATUS,
USBC_RXSTATUS_RXCLEAR);
}
static irqreturn_t wcove_typec_irq(int irq, void *data)
{
struct wcove_typec *wcove = data;
unsigned int usbc_irq1 = 0;
unsigned int usbc_irq2 = 0;
unsigned int cc1ctrl;
int ret;
mutex_lock(&wcove->lock);
/* Read.. */
ret = regmap_read(wcove->regmap, USBC_IRQ1, &usbc_irq1);
if (ret)
goto err;
ret = regmap_read(wcove->regmap, USBC_IRQ2, &usbc_irq2);
if (ret)
goto err;
ret = regmap_read(wcove->regmap, USBC_CC1_CTRL, &cc1ctrl);
if (ret)
goto err;
if (!wcove->tcpm)
goto err;
/* ..check.. */
if (usbc_irq1 & USBC_IRQ1_OVERTEMP) {
dev_err(wcove->dev, "VCONN Switch Over Temperature!\n");
wcove_typec_func(wcove, WCOVE_FUNC_DRIVE_VCONN, false);
/* REVISIT: Report an error? */
}
if (usbc_irq1 & USBC_IRQ1_SHORT) {
dev_err(wcove->dev, "VCONN Switch Short Circuit!\n");
wcove_typec_func(wcove, WCOVE_FUNC_DRIVE_VCONN, false);
/* REVISIT: Report an error? */
}
if (wcove->vbus != !!(cc1ctrl & USBC_CC_CTRL_VBUSOK))
tcpm_vbus_change(wcove->tcpm);
/* REVISIT: See if tcpm code can be made to consider Type-C HW FSMs */
if (usbc_irq2 & USBC_IRQ2_CC_CHANGE)
tcpm_cc_change(wcove->tcpm);
if (usbc_irq2 & USBC_IRQ2_RX_PD) {
unsigned int status;
/*
* FIXME: Need to check if TX is ongoing and report
* TX_DIREGARDED if needed?
*/
ret = regmap_read(wcove->regmap, USBC_RXSTATUS, &status);
if (ret)
goto err;
/* Flush all buffers */
while (status & USBC_RXSTATUS_RXDATA) {
struct pd_message msg;
ret = wcove_read_rx_buffer(wcove, &msg);
if (ret) {
dev_err(wcove->dev, "%s: RX read failed\n",
__func__);
goto err;
}
tcpm_pd_receive(wcove->tcpm, &msg);
ret = regmap_read(wcove->regmap, USBC_RXSTATUS,
&status);
if (ret)
goto err;
}
}
if (usbc_irq2 & USBC_IRQ2_RX_HR)
tcpm_pd_hard_reset(wcove->tcpm);
/* REVISIT: if (usbc_irq2 & USBC_IRQ2_RX_CR) */
if (usbc_irq2 & USBC_IRQ2_TX_SUCCESS)
tcpm_pd_transmit_complete(wcove->tcpm, TCPC_TX_SUCCESS);
if (usbc_irq2 & USBC_IRQ2_TX_FAIL)
tcpm_pd_transmit_complete(wcove->tcpm, TCPC_TX_FAILED);
err:
/* ..and clear. */
if (usbc_irq1) {
ret = regmap_write(wcove->regmap, USBC_IRQ1, usbc_irq1);
if (ret)
dev_WARN(wcove->dev, "%s failed to clear IRQ1\n",
__func__);
}
if (usbc_irq2) {
ret = regmap_write(wcove->regmap, USBC_IRQ2, usbc_irq2);
if (ret)
dev_WARN(wcove->dev, "%s failed to clear IRQ2\n",
__func__);
}
/* REVISIT: Clear WhiskeyCove CHGR Type-C interrupt */
regmap_write(wcove->regmap, WCOVE_CHGRIRQ0, BIT(5));
mutex_unlock(&wcove->lock);
return IRQ_HANDLED;
}
/*
* The following power levels should be safe to use with Joule board.
*/
static const u32 src_pdo[] = {
PDO_FIXED(5000, 1500, PDO_FIXED_DUAL_ROLE | PDO_FIXED_DATA_SWAP |
PDO_FIXED_USB_COMM),
};
static const u32 snk_pdo[] = {
PDO_FIXED(5000, 500, PDO_FIXED_DUAL_ROLE | PDO_FIXED_DATA_SWAP |
PDO_FIXED_USB_COMM),
PDO_VAR(5000, 12000, 3000),
};
static const struct property_entry wcove_props[] = {
PROPERTY_ENTRY_STRING("data-role", "dual"),
PROPERTY_ENTRY_STRING("power-role", "dual"),
PROPERTY_ENTRY_STRING("try-power-role", "sink"),
PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo),
PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo),
PROPERTY_ENTRY_U32("op-sink-microwatt", 15000000),
{ }
};
static int wcove_typec_probe(struct platform_device *pdev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
struct wcove_typec *wcove;
int irq;
int ret;
wcove = devm_kzalloc(&pdev->dev, sizeof(*wcove), GFP_KERNEL);
if (!wcove)
return -ENOMEM;
mutex_init(&wcove->lock);
wcove->dev = &pdev->dev;
wcove->regmap = pmic->regmap;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, irq);
if (irq < 0)
return irq;
ret = guid_parse(WCOVE_DSM_UUID, &wcove->guid);
if (ret)
return ret;
if (!acpi_check_dsm(ACPI_HANDLE(&pdev->dev), &wcove->guid, 0, 0x1f)) {
dev_err(&pdev->dev, "Missing _DSM functions\n");
return -ENODEV;
}
wcove->tcpc.init = wcove_init;
wcove->tcpc.get_vbus = wcove_get_vbus;
wcove->tcpc.set_vbus = wcove_set_vbus;
wcove->tcpc.set_cc = wcove_set_cc;
wcove->tcpc.get_cc = wcove_get_cc;
wcove->tcpc.set_polarity = wcove_set_polarity;
wcove->tcpc.set_vconn = wcove_set_vconn;
wcove->tcpc.set_current_limit = wcove_set_current_limit;
wcove->tcpc.start_toggling = wcove_start_toggling;
wcove->tcpc.set_pd_rx = wcove_set_pd_rx;
wcove->tcpc.set_roles = wcove_set_roles;
wcove->tcpc.pd_transmit = wcove_pd_transmit;
wcove->tcpc.fwnode = fwnode_create_software_node(wcove_props, NULL);
if (IS_ERR(wcove->tcpc.fwnode))
return PTR_ERR(wcove->tcpc.fwnode);
wcove->tcpm = tcpm_register_port(wcove->dev, &wcove->tcpc);
if (IS_ERR(wcove->tcpm)) {
fwnode_remove_software_node(wcove->tcpc.fwnode);
return PTR_ERR(wcove->tcpm);
}
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
wcove_typec_irq, IRQF_ONESHOT,
"wcove_typec", wcove);
if (ret) {
tcpm_unregister_port(wcove->tcpm);
fwnode_remove_software_node(wcove->tcpc.fwnode);
return ret;
}
platform_set_drvdata(pdev, wcove);
return 0;
}
static void wcove_typec_remove(struct platform_device *pdev)
{
struct wcove_typec *wcove = platform_get_drvdata(pdev);
unsigned int val;
/* Mask everything */
regmap_read(wcove->regmap, USBC_IRQMASK1, &val);
regmap_write(wcove->regmap, USBC_IRQMASK1, val | USBC_IRQMASK1_ALL);
regmap_read(wcove->regmap, USBC_IRQMASK2, &val);
regmap_write(wcove->regmap, USBC_IRQMASK2, val | USBC_IRQMASK2_ALL);
tcpm_unregister_port(wcove->tcpm);
fwnode_remove_software_node(wcove->tcpc.fwnode);
}
static struct platform_driver wcove_typec_driver = {
.driver = {
.name = "bxt_wcove_usbc",
},
.probe = wcove_typec_probe,
.remove_new = wcove_typec_remove,
};
module_platform_driver(wcove_typec_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("WhiskeyCove PMIC USB Type-C PHY driver");
MODULE_ALIAS("platform:bxt_wcove_usbc");
| linux-master | drivers/usb/typec/tcpm/wcove.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 MediaTek Inc.
*
* Author: ChiYuan Huang <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/usb/tcpci.h>
#include <linux/usb/tcpm.h>
#define MT6360_REG_PHYCTRL1 0x80
#define MT6360_REG_PHYCTRL3 0x82
#define MT6360_REG_PHYCTRL7 0x86
#define MT6360_REG_VCONNCTRL1 0x8C
#define MT6360_REG_MODECTRL2 0x8F
#define MT6360_REG_SWRESET 0xA0
#define MT6360_REG_DEBCTRL1 0xA1
#define MT6360_REG_DRPCTRL1 0xA2
#define MT6360_REG_DRPCTRL2 0xA3
#define MT6360_REG_I2CTORST 0xBF
#define MT6360_REG_PHYCTRL11 0xCA
#define MT6360_REG_RXCTRL1 0xCE
#define MT6360_REG_RXCTRL2 0xCF
#define MT6360_REG_CTDCTRL2 0xEC
/* MT6360_REG_VCONNCTRL1 */
#define MT6360_VCONNCL_ENABLE BIT(0)
/* MT6360_REG_RXCTRL2 */
#define MT6360_OPEN40M_ENABLE BIT(7)
/* MT6360_REG_CTDCTRL2 */
#define MT6360_RPONESHOT_ENABLE BIT(6)
struct mt6360_tcpc_info {
struct tcpci_data tdata;
struct tcpci *tcpci;
struct device *dev;
int irq;
};
static inline int mt6360_tcpc_write16(struct regmap *regmap,
unsigned int reg, u16 val)
{
return regmap_raw_write(regmap, reg, &val, sizeof(u16));
}
static int mt6360_tcpc_init(struct tcpci *tcpci, struct tcpci_data *tdata)
{
struct regmap *regmap = tdata->regmap;
int ret;
ret = regmap_write(regmap, MT6360_REG_SWRESET, 0x01);
if (ret)
return ret;
/* after reset command, wait 1~2ms to wait IC action */
usleep_range(1000, 2000);
/* write all alert to masked */
ret = mt6360_tcpc_write16(regmap, TCPC_ALERT_MASK, 0);
if (ret)
return ret;
/* config I2C timeout reset enable , and timeout to 200ms */
ret = regmap_write(regmap, MT6360_REG_I2CTORST, 0x8F);
if (ret)
return ret;
/* config CC Detect Debounce : 26.7*val us */
ret = regmap_write(regmap, MT6360_REG_DEBCTRL1, 0x10);
if (ret)
return ret;
/* DRP Toggle Cycle : 51.2 + 6.4*val ms */
ret = regmap_write(regmap, MT6360_REG_DRPCTRL1, 4);
if (ret)
return ret;
/* DRP Duyt Ctrl : dcSRC: /1024 */
ret = mt6360_tcpc_write16(regmap, MT6360_REG_DRPCTRL2, 330);
if (ret)
return ret;
/* Enable VCONN Current Limit function */
ret = regmap_update_bits(regmap, MT6360_REG_VCONNCTRL1, MT6360_VCONNCL_ENABLE,
MT6360_VCONNCL_ENABLE);
if (ret)
return ret;
/* Enable cc open 40ms when pmic send vsysuv signal */
ret = regmap_update_bits(regmap, MT6360_REG_RXCTRL2, MT6360_OPEN40M_ENABLE,
MT6360_OPEN40M_ENABLE);
if (ret)
return ret;
/* Enable Rpdet oneshot detection */
ret = regmap_update_bits(regmap, MT6360_REG_CTDCTRL2, MT6360_RPONESHOT_ENABLE,
MT6360_RPONESHOT_ENABLE);
if (ret)
return ret;
/* BMC PHY */
ret = mt6360_tcpc_write16(regmap, MT6360_REG_PHYCTRL1, 0x3A70);
if (ret)
return ret;
ret = regmap_write(regmap, MT6360_REG_PHYCTRL3, 0x82);
if (ret)
return ret;
ret = regmap_write(regmap, MT6360_REG_PHYCTRL7, 0x36);
if (ret)
return ret;
ret = mt6360_tcpc_write16(regmap, MT6360_REG_PHYCTRL11, 0x3C60);
if (ret)
return ret;
ret = regmap_write(regmap, MT6360_REG_RXCTRL1, 0xE8);
if (ret)
return ret;
/* Set shipping mode off, AUTOIDLE on */
return regmap_write(regmap, MT6360_REG_MODECTRL2, 0x7A);
}
static irqreturn_t mt6360_irq(int irq, void *dev_id)
{
struct mt6360_tcpc_info *mti = dev_id;
return tcpci_irq(mti->tcpci);
}
static int mt6360_tcpc_probe(struct platform_device *pdev)
{
struct mt6360_tcpc_info *mti;
int ret;
mti = devm_kzalloc(&pdev->dev, sizeof(*mti), GFP_KERNEL);
if (!mti)
return -ENOMEM;
mti->dev = &pdev->dev;
mti->tdata.regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!mti->tdata.regmap) {
dev_err(&pdev->dev, "Failed to get parent regmap\n");
return -ENODEV;
}
mti->irq = platform_get_irq_byname(pdev, "PD_IRQB");
if (mti->irq < 0)
return mti->irq;
mti->tdata.init = mt6360_tcpc_init;
mti->tcpci = tcpci_register_port(&pdev->dev, &mti->tdata);
if (IS_ERR(mti->tcpci)) {
dev_err(&pdev->dev, "Failed to register tcpci port\n");
return PTR_ERR(mti->tcpci);
}
ret = devm_request_threaded_irq(mti->dev, mti->irq, NULL, mt6360_irq, IRQF_ONESHOT,
dev_name(&pdev->dev), mti);
if (ret) {
dev_err(mti->dev, "Failed to register irq\n");
tcpci_unregister_port(mti->tcpci);
return ret;
}
device_init_wakeup(&pdev->dev, true);
platform_set_drvdata(pdev, mti);
return 0;
}
static void mt6360_tcpc_remove(struct platform_device *pdev)
{
struct mt6360_tcpc_info *mti = platform_get_drvdata(pdev);
disable_irq(mti->irq);
tcpci_unregister_port(mti->tcpci);
}
static int __maybe_unused mt6360_tcpc_suspend(struct device *dev)
{
struct mt6360_tcpc_info *mti = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
enable_irq_wake(mti->irq);
return 0;
}
static int __maybe_unused mt6360_tcpc_resume(struct device *dev)
{
struct mt6360_tcpc_info *mti = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
disable_irq_wake(mti->irq);
return 0;
}
static SIMPLE_DEV_PM_OPS(mt6360_tcpc_pm_ops, mt6360_tcpc_suspend, mt6360_tcpc_resume);
static const struct of_device_id __maybe_unused mt6360_tcpc_of_id[] = {
{ .compatible = "mediatek,mt6360-tcpc", },
{},
};
MODULE_DEVICE_TABLE(of, mt6360_tcpc_of_id);
static struct platform_driver mt6360_tcpc_driver = {
.driver = {
.name = "mt6360-tcpc",
.pm = &mt6360_tcpc_pm_ops,
.of_match_table = mt6360_tcpc_of_id,
},
.probe = mt6360_tcpc_probe,
.remove_new = mt6360_tcpc_remove,
};
module_platform_driver(mt6360_tcpc_driver);
MODULE_AUTHOR("ChiYuan Huang <[email protected]>");
MODULE_DESCRIPTION("MT6360 USB Type-C Port Controller Interface Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/typec/tcpm/tcpci_mt6360.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2022 Richtek Technology Corp.
*
* Author: ChiYuan Huang <[email protected]>
*/
#include <linux/bits.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeup.h>
#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/usb/tcpci.h>
#include <linux/usb/tcpm.h>
#define MT6370_REG_SYSCTRL8 0x9B
#define MT6370_AUTOIDLE_MASK BIT(3)
#define MT6370_VENDOR_ID 0x29CF
#define MT6370_TCPC_DID_A 0x2170
struct mt6370_priv {
struct device *dev;
struct regulator *vbus;
struct tcpci *tcpci;
struct tcpci_data tcpci_data;
};
static const struct reg_sequence mt6370_reg_init[] = {
REG_SEQ(0xA0, 0x1, 1000),
REG_SEQ(0x81, 0x38, 0),
REG_SEQ(0x82, 0x82, 0),
REG_SEQ(0xBA, 0xFC, 0),
REG_SEQ(0xBB, 0x50, 0),
REG_SEQ(0x9E, 0x8F, 0),
REG_SEQ(0xA1, 0x5, 0),
REG_SEQ(0xA2, 0x4, 0),
REG_SEQ(0xA3, 0x4A, 0),
REG_SEQ(0xA4, 0x01, 0),
REG_SEQ(0x95, 0x01, 0),
REG_SEQ(0x80, 0x71, 0),
REG_SEQ(0x9B, 0x3A, 1000),
};
static int mt6370_tcpc_init(struct tcpci *tcpci, struct tcpci_data *data)
{
u16 did;
int ret;
ret = regmap_register_patch(data->regmap, mt6370_reg_init,
ARRAY_SIZE(mt6370_reg_init));
if (ret)
return ret;
ret = regmap_raw_read(data->regmap, TCPC_BCD_DEV, &did, sizeof(u16));
if (ret)
return ret;
if (did == MT6370_TCPC_DID_A)
return regmap_write(data->regmap, TCPC_FAULT_CTRL, 0x80);
return 0;
}
static int mt6370_tcpc_set_vconn(struct tcpci *tcpci, struct tcpci_data *data,
bool enable)
{
return regmap_update_bits(data->regmap, MT6370_REG_SYSCTRL8,
MT6370_AUTOIDLE_MASK,
enable ? 0 : MT6370_AUTOIDLE_MASK);
}
static int mt6370_tcpc_set_vbus(struct tcpci *tcpci, struct tcpci_data *data,
bool source, bool sink)
{
struct mt6370_priv *priv = container_of(data, struct mt6370_priv,
tcpci_data);
int ret;
ret = regulator_is_enabled(priv->vbus);
if (ret < 0)
return ret;
if (ret && !source)
return regulator_disable(priv->vbus);
if (!ret && source)
return regulator_enable(priv->vbus);
return 0;
}
static irqreturn_t mt6370_irq_handler(int irq, void *dev_id)
{
struct mt6370_priv *priv = dev_id;
return tcpci_irq(priv->tcpci);
}
static int mt6370_check_vendor_info(struct mt6370_priv *priv)
{
struct regmap *regmap = priv->tcpci_data.regmap;
u16 vid;
int ret;
ret = regmap_raw_read(regmap, TCPC_VENDOR_ID, &vid, sizeof(u16));
if (ret)
return ret;
if (vid != MT6370_VENDOR_ID)
return dev_err_probe(priv->dev, -ENODEV,
"Vendor ID not correct 0x%02x\n", vid);
return 0;
}
static void mt6370_unregister_tcpci_port(void *tcpci)
{
tcpci_unregister_port(tcpci);
}
static int mt6370_tcpc_probe(struct platform_device *pdev)
{
struct mt6370_priv *priv;
struct device *dev = &pdev->dev;
int irq, ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
priv->tcpci_data.regmap = dev_get_regmap(dev->parent, NULL);
if (!priv->tcpci_data.regmap)
return dev_err_probe(dev, -ENODEV, "Failed to init regmap\n");
ret = mt6370_check_vendor_info(priv);
if (ret)
return ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
/* Assign TCPCI feature and ops */
priv->tcpci_data.auto_discharge_disconnect = 1;
priv->tcpci_data.init = mt6370_tcpc_init;
priv->tcpci_data.set_vconn = mt6370_tcpc_set_vconn;
priv->vbus = devm_regulator_get_optional(dev, "vbus");
if (!IS_ERR(priv->vbus))
priv->tcpci_data.set_vbus = mt6370_tcpc_set_vbus;
priv->tcpci = tcpci_register_port(dev, &priv->tcpci_data);
if (IS_ERR(priv->tcpci))
return dev_err_probe(dev, PTR_ERR(priv->tcpci),
"Failed to register tcpci port\n");
ret = devm_add_action_or_reset(dev, mt6370_unregister_tcpci_port, priv->tcpci);
if (ret)
return ret;
ret = devm_request_threaded_irq(dev, irq, NULL, mt6370_irq_handler,
IRQF_ONESHOT, dev_name(dev), priv);
if (ret)
return dev_err_probe(dev, ret, "Failed to allocate irq\n");
device_init_wakeup(dev, true);
dev_pm_set_wake_irq(dev, irq);
return 0;
}
static void mt6370_tcpc_remove(struct platform_device *pdev)
{
dev_pm_clear_wake_irq(&pdev->dev);
device_init_wakeup(&pdev->dev, false);
}
static const struct of_device_id mt6370_tcpc_devid_table[] = {
{ .compatible = "mediatek,mt6370-tcpc" },
{}
};
MODULE_DEVICE_TABLE(of, mt6370_tcpc_devid_table);
static struct platform_driver mt6370_tcpc_driver = {
.driver = {
.name = "mt6370-tcpc",
.of_match_table = mt6370_tcpc_devid_table,
},
.probe = mt6370_tcpc_probe,
.remove_new = mt6370_tcpc_remove,
};
module_platform_driver(mt6370_tcpc_driver);
MODULE_AUTHOR("ChiYuan Huang <[email protected]>");
MODULE_DESCRIPTION("MT6370 USB Type-C Port Controller Interface Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/typec/tcpm/tcpci_mt6370.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2020 - 2022, Google LLC
*
* MAXIM TCPCI based TCPC driver
*/
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/usb/pd.h>
#include <linux/usb/tcpci.h>
#include <linux/usb/tcpm.h>
#include <linux/usb/typec.h>
#include "tcpci_maxim.h"
#define PD_ACTIVITY_TIMEOUT_MS 10000
#define TCPC_VENDOR_ALERT 0x80
#define TCPC_VENDOR_USBSW_CTRL 0x93
#define TCPC_VENDOR_USBSW_CTRL_ENABLE_USB_DATA 0x9
#define TCPC_VENDOR_USBSW_CTRL_DISABLE_USB_DATA 0
#define TCPC_RECEIVE_BUFFER_COUNT_OFFSET 0
#define TCPC_RECEIVE_BUFFER_FRAME_TYPE_OFFSET 1
#define TCPC_RECEIVE_BUFFER_RX_BYTE_BUF_OFFSET 2
/*
* LongMessage not supported, hence 32 bytes for buf to be read from RECEIVE_BUFFER.
* DEVICE_CAPABILITIES_2.LongMessage = 0, the value in READABLE_BYTE_COUNT reg shall be
* less than or equal to 31. Since, RECEIVE_BUFFER len = 31 + 1(READABLE_BYTE_COUNT).
*/
#define TCPC_RECEIVE_BUFFER_LEN 32
#define MAX_BUCK_BOOST_SID 0x69
#define MAX_BUCK_BOOST_OP 0xb9
#define MAX_BUCK_BOOST_OFF 0
#define MAX_BUCK_BOOST_SOURCE 0xa
#define MAX_BUCK_BOOST_SINK 0x5
static const struct regmap_range max_tcpci_tcpci_range[] = {
regmap_reg_range(0x00, 0x95)
};
static const struct regmap_access_table max_tcpci_tcpci_write_table = {
.yes_ranges = max_tcpci_tcpci_range,
.n_yes_ranges = ARRAY_SIZE(max_tcpci_tcpci_range),
};
static const struct regmap_config max_tcpci_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0x95,
.wr_table = &max_tcpci_tcpci_write_table,
};
static struct max_tcpci_chip *tdata_to_max_tcpci(struct tcpci_data *tdata)
{
return container_of(tdata, struct max_tcpci_chip, data);
}
static void max_tcpci_init_regs(struct max_tcpci_chip *chip)
{
u16 alert_mask = 0;
int ret;
ret = max_tcpci_write16(chip, TCPC_ALERT, 0xffff);
if (ret < 0) {
dev_err(chip->dev, "Error writing to TCPC_ALERT ret:%d\n", ret);
return;
}
ret = max_tcpci_write16(chip, TCPC_VENDOR_ALERT, 0xffff);
if (ret < 0) {
dev_err(chip->dev, "Error writing to TCPC_VENDOR_ALERT ret:%d\n", ret);
return;
}
ret = max_tcpci_write8(chip, TCPC_ALERT_EXTENDED, 0xff);
if (ret < 0) {
dev_err(chip->dev, "Unable to clear TCPC_ALERT_EXTENDED ret:%d\n", ret);
return;
}
/* Enable VSAFE0V detection */
ret = max_tcpci_write8(chip, TCPC_EXTENDED_STATUS_MASK, TCPC_EXTENDED_STATUS_VSAFE0V);
if (ret < 0) {
dev_err(chip->dev, "Unable to unmask TCPC_EXTENDED_STATUS_VSAFE0V ret:%d\n", ret);
return;
}
alert_mask = TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_DISCARDED | TCPC_ALERT_TX_FAILED |
TCPC_ALERT_RX_HARD_RST | TCPC_ALERT_RX_STATUS | TCPC_ALERT_CC_STATUS |
TCPC_ALERT_VBUS_DISCNCT | TCPC_ALERT_RX_BUF_OVF | TCPC_ALERT_POWER_STATUS |
/* Enable Extended alert for detecting Fast Role Swap Signal */
TCPC_ALERT_EXTND | TCPC_ALERT_EXTENDED_STATUS;
ret = max_tcpci_write16(chip, TCPC_ALERT_MASK, alert_mask);
if (ret < 0) {
dev_err(chip->dev,
"Error enabling TCPC_ALERT: TCPC_ALERT_MASK write failed ret:%d\n", ret);
return;
}
/* Enable vbus voltage monitoring and voltage alerts */
ret = max_tcpci_write8(chip, TCPC_POWER_CTRL, 0);
if (ret < 0) {
dev_err(chip->dev, "Error writing to TCPC_POWER_CTRL ret:%d\n", ret);
return;
}
ret = max_tcpci_write8(chip, TCPC_ALERT_EXTENDED_MASK, TCPC_SINK_FAST_ROLE_SWAP);
if (ret < 0)
return;
}
static void process_rx(struct max_tcpci_chip *chip, u16 status)
{
struct pd_message msg;
u8 count, frame_type, rx_buf[TCPC_RECEIVE_BUFFER_LEN];
int ret, payload_index;
u8 *rx_buf_ptr;
/*
* READABLE_BYTE_COUNT: Indicates the number of bytes in the RX_BUF_BYTE_x registers
* plus one (for the RX_BUF_FRAME_TYPE) Table 4-36.
* Read the count and frame type.
*/
ret = regmap_raw_read(chip->data.regmap, TCPC_RX_BYTE_CNT, rx_buf, 2);
if (ret < 0) {
dev_err(chip->dev, "TCPC_RX_BYTE_CNT read failed ret:%d\n", ret);
return;
}
count = rx_buf[TCPC_RECEIVE_BUFFER_COUNT_OFFSET];
frame_type = rx_buf[TCPC_RECEIVE_BUFFER_FRAME_TYPE_OFFSET];
if (count == 0 || frame_type != TCPC_RX_BUF_FRAME_TYPE_SOP) {
max_tcpci_write16(chip, TCPC_ALERT, TCPC_ALERT_RX_STATUS);
dev_err(chip->dev, "%s\n", count == 0 ? "error: count is 0" :
"error frame_type is not SOP");
return;
}
if (count > sizeof(struct pd_message) || count + 1 > TCPC_RECEIVE_BUFFER_LEN) {
dev_err(chip->dev, "Invalid TCPC_RX_BYTE_CNT %d\n", count);
return;
}
/*
* Read count + 1 as RX_BUF_BYTE_x is hidden and can only be read through
* TCPC_RX_BYTE_CNT
*/
count += 1;
ret = regmap_raw_read(chip->data.regmap, TCPC_RX_BYTE_CNT, rx_buf, count);
if (ret < 0) {
dev_err(chip->dev, "Error: TCPC_RX_BYTE_CNT read failed: %d\n", ret);
return;
}
rx_buf_ptr = rx_buf + TCPC_RECEIVE_BUFFER_RX_BYTE_BUF_OFFSET;
msg.header = cpu_to_le16(*(u16 *)rx_buf_ptr);
rx_buf_ptr = rx_buf_ptr + sizeof(msg.header);
for (payload_index = 0; payload_index < pd_header_cnt_le(msg.header); payload_index++,
rx_buf_ptr += sizeof(msg.payload[0]))
msg.payload[payload_index] = cpu_to_le32(*(u32 *)rx_buf_ptr);
/*
* Read complete, clear RX status alert bit.
* Clear overflow as well if set.
*/
ret = max_tcpci_write16(chip, TCPC_ALERT, status & TCPC_ALERT_RX_BUF_OVF ?
TCPC_ALERT_RX_STATUS | TCPC_ALERT_RX_BUF_OVF :
TCPC_ALERT_RX_STATUS);
if (ret < 0)
return;
tcpm_pd_receive(chip->port, &msg);
}
static int max_tcpci_set_vbus(struct tcpci *tcpci, struct tcpci_data *tdata, bool source, bool sink)
{
struct max_tcpci_chip *chip = tdata_to_max_tcpci(tdata);
u8 buffer_source[2] = {MAX_BUCK_BOOST_OP, MAX_BUCK_BOOST_SOURCE};
u8 buffer_sink[2] = {MAX_BUCK_BOOST_OP, MAX_BUCK_BOOST_SINK};
u8 buffer_none[2] = {MAX_BUCK_BOOST_OP, MAX_BUCK_BOOST_OFF};
struct i2c_client *i2c = chip->client;
int ret;
struct i2c_msg msgs[] = {
{
.addr = MAX_BUCK_BOOST_SID,
.flags = i2c->flags & I2C_M_TEN,
.len = 2,
.buf = source ? buffer_source : sink ? buffer_sink : buffer_none,
},
};
if (source && sink) {
dev_err(chip->dev, "Both source and sink set\n");
return -EINVAL;
}
ret = i2c_transfer(i2c->adapter, msgs, 1);
return ret < 0 ? ret : 1;
}
static void process_power_status(struct max_tcpci_chip *chip)
{
u8 pwr_status;
int ret;
ret = max_tcpci_read8(chip, TCPC_POWER_STATUS, &pwr_status);
if (ret < 0)
return;
if (pwr_status == 0xff)
max_tcpci_init_regs(chip);
else if (pwr_status & TCPC_POWER_STATUS_SOURCING_VBUS)
tcpm_sourcing_vbus(chip->port);
else
tcpm_vbus_change(chip->port);
}
static void max_tcpci_frs_sourcing_vbus(struct tcpci *tcpci, struct tcpci_data *tdata)
{
/*
* For Fast Role Swap case, Boost turns on autonomously without
* AP intervention, but, needs AP to enable source mode explicitly
* for AP to regain control.
*/
max_tcpci_set_vbus(tcpci, tdata, true, false);
}
static void process_tx(struct max_tcpci_chip *chip, u16 status)
{
if (status & TCPC_ALERT_TX_SUCCESS)
tcpm_pd_transmit_complete(chip->port, TCPC_TX_SUCCESS);
else if (status & TCPC_ALERT_TX_DISCARDED)
tcpm_pd_transmit_complete(chip->port, TCPC_TX_DISCARDED);
else if (status & TCPC_ALERT_TX_FAILED)
tcpm_pd_transmit_complete(chip->port, TCPC_TX_FAILED);
/* Reinit regs as Hard reset sets them to default value */
if ((status & TCPC_ALERT_TX_SUCCESS) && (status & TCPC_ALERT_TX_FAILED))
max_tcpci_init_regs(chip);
}
/* Enable USB switches when partner is USB communications capable */
static void max_tcpci_set_partner_usb_comm_capable(struct tcpci *tcpci, struct tcpci_data *data,
bool capable)
{
struct max_tcpci_chip *chip = tdata_to_max_tcpci(data);
int ret;
ret = max_tcpci_write8(chip, TCPC_VENDOR_USBSW_CTRL, capable ?
TCPC_VENDOR_USBSW_CTRL_ENABLE_USB_DATA :
TCPC_VENDOR_USBSW_CTRL_DISABLE_USB_DATA);
if (ret < 0)
dev_err(chip->dev, "Failed to enable USB switches");
}
static irqreturn_t _max_tcpci_irq(struct max_tcpci_chip *chip, u16 status)
{
u16 mask;
int ret;
u8 reg_status;
/*
* Clear alert status for everything except RX_STATUS, which shouldn't
* be cleared until we have successfully retrieved message.
*/
if (status & ~TCPC_ALERT_RX_STATUS) {
mask = status & TCPC_ALERT_RX_BUF_OVF ?
status & ~(TCPC_ALERT_RX_STATUS | TCPC_ALERT_RX_BUF_OVF) :
status & ~TCPC_ALERT_RX_STATUS;
ret = max_tcpci_write16(chip, TCPC_ALERT, mask);
if (ret < 0) {
dev_err(chip->dev, "ALERT clear failed\n");
return ret;
}
}
if (status & TCPC_ALERT_RX_BUF_OVF && !(status & TCPC_ALERT_RX_STATUS)) {
ret = max_tcpci_write16(chip, TCPC_ALERT, (TCPC_ALERT_RX_STATUS |
TCPC_ALERT_RX_BUF_OVF));
if (ret < 0) {
dev_err(chip->dev, "ALERT clear failed\n");
return ret;
}
}
if (status & TCPC_ALERT_EXTND) {
ret = max_tcpci_read8(chip, TCPC_ALERT_EXTENDED, ®_status);
if (ret < 0)
return ret;
ret = max_tcpci_write8(chip, TCPC_ALERT_EXTENDED, reg_status);
if (ret < 0)
return ret;
if (reg_status & TCPC_SINK_FAST_ROLE_SWAP) {
dev_info(chip->dev, "FRS Signal\n");
tcpm_sink_frs(chip->port);
}
}
if (status & TCPC_ALERT_EXTENDED_STATUS) {
ret = max_tcpci_read8(chip, TCPC_EXTENDED_STATUS, (u8 *)®_status);
if (ret >= 0 && (reg_status & TCPC_EXTENDED_STATUS_VSAFE0V))
tcpm_vbus_change(chip->port);
}
if (status & TCPC_ALERT_RX_STATUS)
process_rx(chip, status);
if (status & TCPC_ALERT_VBUS_DISCNCT)
tcpm_vbus_change(chip->port);
if (status & TCPC_ALERT_CC_STATUS) {
if (chip->contaminant_state == DETECTED || tcpm_port_is_toggling(chip->port)) {
if (!max_contaminant_is_contaminant(chip, false))
tcpm_port_clean(chip->port);
} else {
tcpm_cc_change(chip->port);
}
}
if (status & TCPC_ALERT_POWER_STATUS)
process_power_status(chip);
if (status & TCPC_ALERT_RX_HARD_RST) {
tcpm_pd_hard_reset(chip->port);
max_tcpci_init_regs(chip);
}
if (status & TCPC_ALERT_TX_SUCCESS || status & TCPC_ALERT_TX_DISCARDED || status &
TCPC_ALERT_TX_FAILED)
process_tx(chip, status);
return IRQ_HANDLED;
}
static irqreturn_t max_tcpci_irq(int irq, void *dev_id)
{
struct max_tcpci_chip *chip = dev_id;
u16 status;
irqreturn_t irq_return = IRQ_HANDLED;
int ret;
if (!chip->port)
return IRQ_HANDLED;
ret = max_tcpci_read16(chip, TCPC_ALERT, &status);
if (ret < 0) {
dev_err(chip->dev, "ALERT read failed\n");
return ret;
}
while (status) {
irq_return = _max_tcpci_irq(chip, status);
/* Do not return if the ALERT is already set. */
ret = max_tcpci_read16(chip, TCPC_ALERT, &status);
if (ret < 0)
break;
}
return irq_return;
}
static irqreturn_t max_tcpci_isr(int irq, void *dev_id)
{
struct max_tcpci_chip *chip = dev_id;
pm_wakeup_event(chip->dev, PD_ACTIVITY_TIMEOUT_MS);
if (!chip->port)
return IRQ_HANDLED;
return IRQ_WAKE_THREAD;
}
static int max_tcpci_init_alert(struct max_tcpci_chip *chip, struct i2c_client *client)
{
int ret;
ret = devm_request_threaded_irq(chip->dev, client->irq, max_tcpci_isr, max_tcpci_irq,
(IRQF_TRIGGER_LOW | IRQF_ONESHOT), dev_name(chip->dev),
chip);
if (ret < 0)
return ret;
enable_irq_wake(client->irq);
return 0;
}
static int max_tcpci_start_toggling(struct tcpci *tcpci, struct tcpci_data *tdata,
enum typec_cc_status cc)
{
struct max_tcpci_chip *chip = tdata_to_max_tcpci(tdata);
max_tcpci_init_regs(chip);
return 0;
}
static int tcpci_init(struct tcpci *tcpci, struct tcpci_data *data)
{
/*
* Generic TCPCI overwrites the regs once this driver initializes
* them. Prevent this by returning -1.
*/
return -1;
}
static void max_tcpci_check_contaminant(struct tcpci *tcpci, struct tcpci_data *tdata)
{
struct max_tcpci_chip *chip = tdata_to_max_tcpci(tdata);
if (!max_contaminant_is_contaminant(chip, true))
tcpm_port_clean(chip->port);
}
static int max_tcpci_probe(struct i2c_client *client)
{
int ret;
struct max_tcpci_chip *chip;
u8 power_status;
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
chip->client = client;
chip->data.regmap = devm_regmap_init_i2c(client, &max_tcpci_regmap_config);
if (IS_ERR(chip->data.regmap)) {
dev_err(&client->dev, "Regmap init failed\n");
return PTR_ERR(chip->data.regmap);
}
chip->dev = &client->dev;
i2c_set_clientdata(client, chip);
ret = max_tcpci_read8(chip, TCPC_POWER_STATUS, &power_status);
if (ret < 0)
return ret;
/* Chip level tcpci callbacks */
chip->data.set_vbus = max_tcpci_set_vbus;
chip->data.start_drp_toggling = max_tcpci_start_toggling;
chip->data.TX_BUF_BYTE_x_hidden = true;
chip->data.init = tcpci_init;
chip->data.frs_sourcing_vbus = max_tcpci_frs_sourcing_vbus;
chip->data.auto_discharge_disconnect = true;
chip->data.vbus_vsafe0v = true;
chip->data.set_partner_usb_comm_capable = max_tcpci_set_partner_usb_comm_capable;
chip->data.check_contaminant = max_tcpci_check_contaminant;
max_tcpci_init_regs(chip);
chip->tcpci = tcpci_register_port(chip->dev, &chip->data);
if (IS_ERR(chip->tcpci)) {
dev_err(&client->dev, "TCPCI port registration failed\n");
return PTR_ERR(chip->tcpci);
}
chip->port = tcpci_get_tcpm_port(chip->tcpci);
ret = max_tcpci_init_alert(chip, client);
if (ret < 0)
goto unreg_port;
device_init_wakeup(chip->dev, true);
return 0;
unreg_port:
tcpci_unregister_port(chip->tcpci);
return ret;
}
static void max_tcpci_remove(struct i2c_client *client)
{
struct max_tcpci_chip *chip = i2c_get_clientdata(client);
if (!IS_ERR_OR_NULL(chip->tcpci))
tcpci_unregister_port(chip->tcpci);
}
static const struct i2c_device_id max_tcpci_id[] = {
{ "maxtcpc", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max_tcpci_id);
#ifdef CONFIG_OF
static const struct of_device_id max_tcpci_of_match[] = {
{ .compatible = "maxim,max33359", },
{},
};
MODULE_DEVICE_TABLE(of, max_tcpci_of_match);
#endif
static struct i2c_driver max_tcpci_i2c_driver = {
.driver = {
.name = "maxtcpc",
.of_match_table = of_match_ptr(max_tcpci_of_match),
},
.probe = max_tcpci_probe,
.remove = max_tcpci_remove,
.id_table = max_tcpci_id,
};
module_i2c_driver(max_tcpci_i2c_driver);
MODULE_AUTHOR("Badhri Jagan Sridharan <[email protected]>");
MODULE_DESCRIPTION("Maxim TCPCI based USB Type-C Port Controller Interface Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/typec/tcpm/tcpci_maxim_core.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2015-2017 Google, Inc
*
* USB Power Delivery protocol stack.
*/
#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/hrtimer.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/power_supply.h>
#include <linux/proc_fs.h>
#include <linux/property.h>
#include <linux/sched/clock.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/usb.h>
#include <linux/usb/pd.h>
#include <linux/usb/pd_ado.h>
#include <linux/usb/pd_bdo.h>
#include <linux/usb/pd_ext_sdb.h>
#include <linux/usb/pd_vdo.h>
#include <linux/usb/role.h>
#include <linux/usb/tcpm.h>
#include <linux/usb/typec_altmode.h>
#include <uapi/linux/sched/types.h>
#define FOREACH_STATE(S) \
S(INVALID_STATE), \
S(TOGGLING), \
S(CHECK_CONTAMINANT), \
S(SRC_UNATTACHED), \
S(SRC_ATTACH_WAIT), \
S(SRC_ATTACHED), \
S(SRC_STARTUP), \
S(SRC_SEND_CAPABILITIES), \
S(SRC_SEND_CAPABILITIES_TIMEOUT), \
S(SRC_NEGOTIATE_CAPABILITIES), \
S(SRC_TRANSITION_SUPPLY), \
S(SRC_READY), \
S(SRC_WAIT_NEW_CAPABILITIES), \
\
S(SNK_UNATTACHED), \
S(SNK_ATTACH_WAIT), \
S(SNK_DEBOUNCED), \
S(SNK_ATTACHED), \
S(SNK_STARTUP), \
S(SNK_DISCOVERY), \
S(SNK_DISCOVERY_DEBOUNCE), \
S(SNK_DISCOVERY_DEBOUNCE_DONE), \
S(SNK_WAIT_CAPABILITIES), \
S(SNK_NEGOTIATE_CAPABILITIES), \
S(SNK_NEGOTIATE_PPS_CAPABILITIES), \
S(SNK_TRANSITION_SINK), \
S(SNK_TRANSITION_SINK_VBUS), \
S(SNK_READY), \
\
S(ACC_UNATTACHED), \
S(DEBUG_ACC_ATTACHED), \
S(AUDIO_ACC_ATTACHED), \
S(AUDIO_ACC_DEBOUNCE), \
\
S(HARD_RESET_SEND), \
S(HARD_RESET_START), \
S(SRC_HARD_RESET_VBUS_OFF), \
S(SRC_HARD_RESET_VBUS_ON), \
S(SNK_HARD_RESET_SINK_OFF), \
S(SNK_HARD_RESET_WAIT_VBUS), \
S(SNK_HARD_RESET_SINK_ON), \
\
S(SOFT_RESET), \
S(SRC_SOFT_RESET_WAIT_SNK_TX), \
S(SNK_SOFT_RESET), \
S(SOFT_RESET_SEND), \
\
S(DR_SWAP_ACCEPT), \
S(DR_SWAP_SEND), \
S(DR_SWAP_SEND_TIMEOUT), \
S(DR_SWAP_CANCEL), \
S(DR_SWAP_CHANGE_DR), \
\
S(PR_SWAP_ACCEPT), \
S(PR_SWAP_SEND), \
S(PR_SWAP_SEND_TIMEOUT), \
S(PR_SWAP_CANCEL), \
S(PR_SWAP_START), \
S(PR_SWAP_SRC_SNK_TRANSITION_OFF), \
S(PR_SWAP_SRC_SNK_SOURCE_OFF), \
S(PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED), \
S(PR_SWAP_SRC_SNK_SINK_ON), \
S(PR_SWAP_SNK_SRC_SINK_OFF), \
S(PR_SWAP_SNK_SRC_SOURCE_ON), \
S(PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP), \
\
S(VCONN_SWAP_ACCEPT), \
S(VCONN_SWAP_SEND), \
S(VCONN_SWAP_SEND_TIMEOUT), \
S(VCONN_SWAP_CANCEL), \
S(VCONN_SWAP_START), \
S(VCONN_SWAP_WAIT_FOR_VCONN), \
S(VCONN_SWAP_TURN_ON_VCONN), \
S(VCONN_SWAP_TURN_OFF_VCONN), \
\
S(FR_SWAP_SEND), \
S(FR_SWAP_SEND_TIMEOUT), \
S(FR_SWAP_SNK_SRC_TRANSITION_TO_OFF), \
S(FR_SWAP_SNK_SRC_NEW_SINK_READY), \
S(FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED), \
S(FR_SWAP_CANCEL), \
\
S(SNK_TRY), \
S(SNK_TRY_WAIT), \
S(SNK_TRY_WAIT_DEBOUNCE), \
S(SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS), \
S(SRC_TRYWAIT), \
S(SRC_TRYWAIT_DEBOUNCE), \
S(SRC_TRYWAIT_UNATTACHED), \
\
S(SRC_TRY), \
S(SRC_TRY_WAIT), \
S(SRC_TRY_DEBOUNCE), \
S(SNK_TRYWAIT), \
S(SNK_TRYWAIT_DEBOUNCE), \
S(SNK_TRYWAIT_VBUS), \
S(BIST_RX), \
\
S(GET_STATUS_SEND), \
S(GET_STATUS_SEND_TIMEOUT), \
S(GET_PPS_STATUS_SEND), \
S(GET_PPS_STATUS_SEND_TIMEOUT), \
\
S(GET_SINK_CAP), \
S(GET_SINK_CAP_TIMEOUT), \
\
S(ERROR_RECOVERY), \
S(PORT_RESET), \
S(PORT_RESET_WAIT_OFF), \
\
S(AMS_START), \
S(CHUNK_NOT_SUPP)
#define FOREACH_AMS(S) \
S(NONE_AMS), \
S(POWER_NEGOTIATION), \
S(GOTOMIN), \
S(SOFT_RESET_AMS), \
S(HARD_RESET), \
S(CABLE_RESET), \
S(GET_SOURCE_CAPABILITIES), \
S(GET_SINK_CAPABILITIES), \
S(POWER_ROLE_SWAP), \
S(FAST_ROLE_SWAP), \
S(DATA_ROLE_SWAP), \
S(VCONN_SWAP), \
S(SOURCE_ALERT), \
S(GETTING_SOURCE_EXTENDED_CAPABILITIES),\
S(GETTING_SOURCE_SINK_STATUS), \
S(GETTING_BATTERY_CAPABILITIES), \
S(GETTING_BATTERY_STATUS), \
S(GETTING_MANUFACTURER_INFORMATION), \
S(SECURITY), \
S(FIRMWARE_UPDATE), \
S(DISCOVER_IDENTITY), \
S(SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY), \
S(DISCOVER_SVIDS), \
S(DISCOVER_MODES), \
S(DFP_TO_UFP_ENTER_MODE), \
S(DFP_TO_UFP_EXIT_MODE), \
S(DFP_TO_CABLE_PLUG_ENTER_MODE), \
S(DFP_TO_CABLE_PLUG_EXIT_MODE), \
S(ATTENTION), \
S(BIST), \
S(UNSTRUCTURED_VDMS), \
S(STRUCTURED_VDMS), \
S(COUNTRY_INFO), \
S(COUNTRY_CODES)
#define GENERATE_ENUM(e) e
#define GENERATE_STRING(s) #s
enum tcpm_state {
FOREACH_STATE(GENERATE_ENUM)
};
static const char * const tcpm_states[] = {
FOREACH_STATE(GENERATE_STRING)
};
enum tcpm_ams {
FOREACH_AMS(GENERATE_ENUM)
};
static const char * const tcpm_ams_str[] = {
FOREACH_AMS(GENERATE_STRING)
};
enum vdm_states {
VDM_STATE_ERR_BUSY = -3,
VDM_STATE_ERR_SEND = -2,
VDM_STATE_ERR_TMOUT = -1,
VDM_STATE_DONE = 0,
/* Anything >0 represents an active state */
VDM_STATE_READY = 1,
VDM_STATE_BUSY = 2,
VDM_STATE_WAIT_RSP_BUSY = 3,
VDM_STATE_SEND_MESSAGE = 4,
};
enum pd_msg_request {
PD_MSG_NONE = 0,
PD_MSG_CTRL_REJECT,
PD_MSG_CTRL_WAIT,
PD_MSG_CTRL_NOT_SUPP,
PD_MSG_DATA_SINK_CAP,
PD_MSG_DATA_SOURCE_CAP,
};
enum adev_actions {
ADEV_NONE = 0,
ADEV_NOTIFY_USB_AND_QUEUE_VDM,
ADEV_QUEUE_VDM,
ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL,
ADEV_ATTENTION,
};
/*
* Initial current capability of the new source when vSafe5V is applied during PD3.0 Fast Role Swap.
* Based on "Table 6-14 Fixed Supply PDO - Sink" of "USB Power Delivery Specification Revision 3.0,
* Version 1.2"
*/
enum frs_typec_current {
FRS_NOT_SUPPORTED,
FRS_DEFAULT_POWER,
FRS_5V_1P5A,
FRS_5V_3A,
};
/* Events from low level driver */
#define TCPM_CC_EVENT BIT(0)
#define TCPM_VBUS_EVENT BIT(1)
#define TCPM_RESET_EVENT BIT(2)
#define TCPM_FRS_EVENT BIT(3)
#define TCPM_SOURCING_VBUS BIT(4)
#define TCPM_PORT_CLEAN BIT(5)
#define LOG_BUFFER_ENTRIES 1024
#define LOG_BUFFER_ENTRY_SIZE 128
/* Alternate mode support */
#define SVID_DISCOVERY_MAX 16
#define ALTMODE_DISCOVERY_MAX (SVID_DISCOVERY_MAX * MODE_DISCOVERY_MAX)
#define GET_SINK_CAP_RETRY_MS 100
#define SEND_DISCOVER_RETRY_MS 100
struct pd_mode_data {
int svid_index; /* current SVID index */
int nsvids;
u16 svids[SVID_DISCOVERY_MAX];
int altmodes; /* number of alternate modes */
struct typec_altmode_desc altmode_desc[ALTMODE_DISCOVERY_MAX];
};
/*
* @min_volt: Actual min voltage at the local port
* @req_min_volt: Requested min voltage to the port partner
* @max_volt: Actual max voltage at the local port
* @req_max_volt: Requested max voltage to the port partner
* @max_curr: Actual max current at the local port
* @req_max_curr: Requested max current of the port partner
* @req_out_volt: Requested output voltage to the port partner
* @req_op_curr: Requested operating current to the port partner
* @supported: Parter has at least one APDO hence supports PPS
* @active: PPS mode is active
*/
struct pd_pps_data {
u32 min_volt;
u32 req_min_volt;
u32 max_volt;
u32 req_max_volt;
u32 max_curr;
u32 req_max_curr;
u32 req_out_volt;
u32 req_op_curr;
bool supported;
bool active;
};
struct tcpm_port {
struct device *dev;
struct mutex lock; /* tcpm state machine lock */
struct kthread_worker *wq;
struct typec_capability typec_caps;
struct typec_port *typec_port;
struct tcpc_dev *tcpc;
struct usb_role_switch *role_sw;
enum typec_role vconn_role;
enum typec_role pwr_role;
enum typec_data_role data_role;
enum typec_pwr_opmode pwr_opmode;
struct usb_pd_identity partner_ident;
struct typec_partner_desc partner_desc;
struct typec_partner *partner;
enum typec_cc_status cc_req;
enum typec_cc_status src_rp; /* work only if pd_supported == false */
enum typec_cc_status cc1;
enum typec_cc_status cc2;
enum typec_cc_polarity polarity;
bool attached;
bool connected;
bool registered;
bool pd_supported;
enum typec_port_type port_type;
/*
* Set to true when vbus is greater than VSAFE5V min.
* Set to false when vbus falls below vSinkDisconnect max threshold.
*/
bool vbus_present;
/*
* Set to true when vbus is less than VSAFE0V max.
* Set to false when vbus is greater than VSAFE0V max.
*/
bool vbus_vsafe0v;
bool vbus_never_low;
bool vbus_source;
bool vbus_charge;
/* Set to true when Discover_Identity Command is expected to be sent in Ready states. */
bool send_discover;
bool op_vsafe5v;
int try_role;
int try_snk_count;
int try_src_count;
enum pd_msg_request queued_message;
enum tcpm_state enter_state;
enum tcpm_state prev_state;
enum tcpm_state state;
enum tcpm_state delayed_state;
ktime_t delayed_runtime;
unsigned long delay_ms;
spinlock_t pd_event_lock;
u32 pd_events;
struct kthread_work event_work;
struct hrtimer state_machine_timer;
struct kthread_work state_machine;
struct hrtimer vdm_state_machine_timer;
struct kthread_work vdm_state_machine;
struct hrtimer enable_frs_timer;
struct kthread_work enable_frs;
struct hrtimer send_discover_timer;
struct kthread_work send_discover_work;
bool state_machine_running;
/* Set to true when VDM State Machine has following actions. */
bool vdm_sm_running;
struct completion tx_complete;
enum tcpm_transmit_status tx_status;
struct mutex swap_lock; /* swap command lock */
bool swap_pending;
bool non_pd_role_swap;
struct completion swap_complete;
int swap_status;
unsigned int negotiated_rev;
unsigned int message_id;
unsigned int caps_count;
unsigned int hard_reset_count;
bool pd_capable;
bool explicit_contract;
unsigned int rx_msgid;
/* USB PD objects */
struct usb_power_delivery *pd;
struct usb_power_delivery_capabilities *port_source_caps;
struct usb_power_delivery_capabilities *port_sink_caps;
struct usb_power_delivery *partner_pd;
struct usb_power_delivery_capabilities *partner_source_caps;
struct usb_power_delivery_capabilities *partner_sink_caps;
/* Partner capabilities/requests */
u32 sink_request;
u32 source_caps[PDO_MAX_OBJECTS];
unsigned int nr_source_caps;
u32 sink_caps[PDO_MAX_OBJECTS];
unsigned int nr_sink_caps;
/* Local capabilities */
u32 src_pdo[PDO_MAX_OBJECTS];
unsigned int nr_src_pdo;
u32 snk_pdo[PDO_MAX_OBJECTS];
unsigned int nr_snk_pdo;
u32 snk_vdo_v1[VDO_MAX_OBJECTS];
unsigned int nr_snk_vdo_v1;
u32 snk_vdo[VDO_MAX_OBJECTS];
unsigned int nr_snk_vdo;
unsigned int operating_snk_mw;
bool update_sink_caps;
/* Requested current / voltage to the port partner */
u32 req_current_limit;
u32 req_supply_voltage;
/* Actual current / voltage limit of the local port */
u32 current_limit;
u32 supply_voltage;
/* Used to export TA voltage and current */
struct power_supply *psy;
struct power_supply_desc psy_desc;
enum power_supply_usb_type usb_type;
u32 bist_request;
/* PD state for Vendor Defined Messages */
enum vdm_states vdm_state;
u32 vdm_retries;
/* next Vendor Defined Message to send */
u32 vdo_data[VDO_MAX_SIZE];
u8 vdo_count;
/* VDO to retry if UFP responder replied busy */
u32 vdo_retry;
/* PPS */
struct pd_pps_data pps_data;
struct completion pps_complete;
bool pps_pending;
int pps_status;
/* Alternate mode data */
struct pd_mode_data mode_data;
struct typec_altmode *partner_altmode[ALTMODE_DISCOVERY_MAX];
struct typec_altmode *port_altmode[ALTMODE_DISCOVERY_MAX];
/* Deadline in jiffies to exit src_try_wait state */
unsigned long max_wait;
/* port belongs to a self powered device */
bool self_powered;
/* Sink FRS */
enum frs_typec_current new_source_frs_current;
/* Sink caps have been queried */
bool sink_cap_done;
/* Collision Avoidance and Atomic Message Sequence */
enum tcpm_state upcoming_state;
enum tcpm_ams ams;
enum tcpm_ams next_ams;
bool in_ams;
/* Auto vbus discharge status */
bool auto_vbus_discharge_enabled;
/*
* When set, port requests PD_P_SNK_STDBY_MW upon entering SNK_DISCOVERY and
* the actual current limit after RX of PD_CTRL_PSRDY for PD link,
* SNK_READY for non-pd link.
*/
bool slow_charger_loop;
/*
* When true indicates that the lower level drivers indicate potential presence
* of contaminant in the connector pins based on the tcpm state machine
* transitions.
*/
bool potential_contaminant;
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
struct mutex logbuffer_lock; /* log buffer access lock */
int logbuffer_head;
int logbuffer_tail;
u8 *logbuffer[LOG_BUFFER_ENTRIES];
#endif
};
struct pd_rx_event {
struct kthread_work work;
struct tcpm_port *port;
struct pd_message msg;
};
static const char * const pd_rev[] = {
[PD_REV10] = "rev1",
[PD_REV20] = "rev2",
[PD_REV30] = "rev3",
};
#define tcpm_cc_is_sink(cc) \
((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
(cc) == TYPEC_CC_RP_3_0)
#define tcpm_port_is_sink(port) \
((tcpm_cc_is_sink((port)->cc1) && !tcpm_cc_is_sink((port)->cc2)) || \
(tcpm_cc_is_sink((port)->cc2) && !tcpm_cc_is_sink((port)->cc1)))
#define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD)
#define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA)
#define tcpm_cc_is_open(cc) ((cc) == TYPEC_CC_OPEN)
#define tcpm_port_is_source(port) \
((tcpm_cc_is_source((port)->cc1) && \
!tcpm_cc_is_source((port)->cc2)) || \
(tcpm_cc_is_source((port)->cc2) && \
!tcpm_cc_is_source((port)->cc1)))
#define tcpm_port_is_debug(port) \
(tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2))
#define tcpm_port_is_audio(port) \
(tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2))
#define tcpm_port_is_audio_detached(port) \
((tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_open((port)->cc2)) || \
(tcpm_cc_is_audio((port)->cc2) && tcpm_cc_is_open((port)->cc1)))
#define tcpm_try_snk(port) \
((port)->try_snk_count == 0 && (port)->try_role == TYPEC_SINK && \
(port)->port_type == TYPEC_PORT_DRP)
#define tcpm_try_src(port) \
((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE && \
(port)->port_type == TYPEC_PORT_DRP)
#define tcpm_data_role_for_source(port) \
((port)->typec_caps.data == TYPEC_PORT_UFP ? \
TYPEC_DEVICE : TYPEC_HOST)
#define tcpm_data_role_for_sink(port) \
((port)->typec_caps.data == TYPEC_PORT_DFP ? \
TYPEC_HOST : TYPEC_DEVICE)
#define tcpm_sink_tx_ok(port) \
(tcpm_port_is_sink(port) && \
((port)->cc1 == TYPEC_CC_RP_3_0 || (port)->cc2 == TYPEC_CC_RP_3_0))
#define tcpm_wait_for_discharge(port) \
(((port)->auto_vbus_discharge_enabled && !(port)->vbus_vsafe0v) ? PD_T_SAFE_0V : 0)
static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
{
if (port->port_type == TYPEC_PORT_DRP) {
if (port->try_role == TYPEC_SINK)
return SNK_UNATTACHED;
else if (port->try_role == TYPEC_SOURCE)
return SRC_UNATTACHED;
/* Fall through to return SRC_UNATTACHED */
} else if (port->port_type == TYPEC_PORT_SNK) {
return SNK_UNATTACHED;
}
return SRC_UNATTACHED;
}
static bool tcpm_port_is_disconnected(struct tcpm_port *port)
{
return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
port->cc2 == TYPEC_CC_OPEN) ||
(port->attached && ((port->polarity == TYPEC_POLARITY_CC1 &&
port->cc1 == TYPEC_CC_OPEN) ||
(port->polarity == TYPEC_POLARITY_CC2 &&
port->cc2 == TYPEC_CC_OPEN)));
}
/*
* Logging
*/
#ifdef CONFIG_DEBUG_FS
static bool tcpm_log_full(struct tcpm_port *port)
{
return port->logbuffer_tail ==
(port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
}
__printf(2, 0)
static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
{
char tmpbuffer[LOG_BUFFER_ENTRY_SIZE];
u64 ts_nsec = local_clock();
unsigned long rem_nsec;
mutex_lock(&port->logbuffer_lock);
if (!port->logbuffer[port->logbuffer_head]) {
port->logbuffer[port->logbuffer_head] =
kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
if (!port->logbuffer[port->logbuffer_head]) {
mutex_unlock(&port->logbuffer_lock);
return;
}
}
vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
if (tcpm_log_full(port)) {
port->logbuffer_head = max(port->logbuffer_head - 1, 0);
strcpy(tmpbuffer, "overflow");
}
if (port->logbuffer_head < 0 ||
port->logbuffer_head >= LOG_BUFFER_ENTRIES) {
dev_warn(port->dev,
"Bad log buffer index %d\n", port->logbuffer_head);
goto abort;
}
if (!port->logbuffer[port->logbuffer_head]) {
dev_warn(port->dev,
"Log buffer index %d is NULL\n", port->logbuffer_head);
goto abort;
}
rem_nsec = do_div(ts_nsec, 1000000000);
scnprintf(port->logbuffer[port->logbuffer_head],
LOG_BUFFER_ENTRY_SIZE, "[%5lu.%06lu] %s",
(unsigned long)ts_nsec, rem_nsec / 1000,
tmpbuffer);
port->logbuffer_head = (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
abort:
mutex_unlock(&port->logbuffer_lock);
}
__printf(2, 3)
static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
{
va_list args;
/* Do not log while disconnected and unattached */
if (tcpm_port_is_disconnected(port) &&
(port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
port->state == TOGGLING || port->state == CHECK_CONTAMINANT))
return;
va_start(args, fmt);
_tcpm_log(port, fmt, args);
va_end(args);
}
__printf(2, 3)
static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
_tcpm_log(port, fmt, args);
va_end(args);
}
static void tcpm_log_source_caps(struct tcpm_port *port)
{
int i;
for (i = 0; i < port->nr_source_caps; i++) {
u32 pdo = port->source_caps[i];
enum pd_pdo_type type = pdo_type(pdo);
char msg[64];
switch (type) {
case PDO_TYPE_FIXED:
scnprintf(msg, sizeof(msg),
"%u mV, %u mA [%s%s%s%s%s%s]",
pdo_fixed_voltage(pdo),
pdo_max_current(pdo),
(pdo & PDO_FIXED_DUAL_ROLE) ?
"R" : "",
(pdo & PDO_FIXED_SUSPEND) ?
"S" : "",
(pdo & PDO_FIXED_HIGHER_CAP) ?
"H" : "",
(pdo & PDO_FIXED_USB_COMM) ?
"U" : "",
(pdo & PDO_FIXED_DATA_SWAP) ?
"D" : "",
(pdo & PDO_FIXED_EXTPOWER) ?
"E" : "");
break;
case PDO_TYPE_VAR:
scnprintf(msg, sizeof(msg),
"%u-%u mV, %u mA",
pdo_min_voltage(pdo),
pdo_max_voltage(pdo),
pdo_max_current(pdo));
break;
case PDO_TYPE_BATT:
scnprintf(msg, sizeof(msg),
"%u-%u mV, %u mW",
pdo_min_voltage(pdo),
pdo_max_voltage(pdo),
pdo_max_power(pdo));
break;
case PDO_TYPE_APDO:
if (pdo_apdo_type(pdo) == APDO_TYPE_PPS)
scnprintf(msg, sizeof(msg),
"%u-%u mV, %u mA",
pdo_pps_apdo_min_voltage(pdo),
pdo_pps_apdo_max_voltage(pdo),
pdo_pps_apdo_max_current(pdo));
else
strcpy(msg, "undefined APDO");
break;
default:
strcpy(msg, "undefined");
break;
}
tcpm_log(port, " PDO %d: type %d, %s",
i, type, msg);
}
}
static int tcpm_debug_show(struct seq_file *s, void *v)
{
struct tcpm_port *port = s->private;
int tail;
mutex_lock(&port->logbuffer_lock);
tail = port->logbuffer_tail;
while (tail != port->logbuffer_head) {
seq_printf(s, "%s\n", port->logbuffer[tail]);
tail = (tail + 1) % LOG_BUFFER_ENTRIES;
}
if (!seq_has_overflowed(s))
port->logbuffer_tail = tail;
mutex_unlock(&port->logbuffer_lock);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(tcpm_debug);
static void tcpm_debugfs_init(struct tcpm_port *port)
{
char name[NAME_MAX];
mutex_init(&port->logbuffer_lock);
snprintf(name, NAME_MAX, "tcpm-%s", dev_name(port->dev));
port->dentry = debugfs_create_dir(name, usb_debug_root);
debugfs_create_file("log", S_IFREG | 0444, port->dentry, port,
&tcpm_debug_fops);
}
static void tcpm_debugfs_exit(struct tcpm_port *port)
{
int i;
mutex_lock(&port->logbuffer_lock);
for (i = 0; i < LOG_BUFFER_ENTRIES; i++) {
kfree(port->logbuffer[i]);
port->logbuffer[i] = NULL;
}
mutex_unlock(&port->logbuffer_lock);
debugfs_remove(port->dentry);
}
#else
__printf(2, 3)
static void tcpm_log(const struct tcpm_port *port, const char *fmt, ...) { }
__printf(2, 3)
static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) { }
static void tcpm_log_source_caps(struct tcpm_port *port) { }
static void tcpm_debugfs_init(const struct tcpm_port *port) { }
static void tcpm_debugfs_exit(const struct tcpm_port *port) { }
#endif
static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
{
tcpm_log(port, "cc:=%d", cc);
port->cc_req = cc;
port->tcpc->set_cc(port->tcpc, cc);
}
static int tcpm_enable_auto_vbus_discharge(struct tcpm_port *port, bool enable)
{
int ret = 0;
if (port->tcpc->enable_auto_vbus_discharge) {
ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, enable);
tcpm_log_force(port, "%s vbus discharge ret:%d", enable ? "enable" : "disable",
ret);
if (!ret)
port->auto_vbus_discharge_enabled = enable;
}
return ret;
}
static void tcpm_apply_rc(struct tcpm_port *port)
{
/*
* TCPCI: Move to APPLY_RC state to prevent disconnect during PR_SWAP
* when Vbus auto discharge on disconnect is enabled.
*/
if (port->tcpc->enable_auto_vbus_discharge && port->tcpc->apply_rc) {
tcpm_log(port, "Apply_RC");
port->tcpc->apply_rc(port->tcpc, port->cc_req, port->polarity);
tcpm_enable_auto_vbus_discharge(port, false);
}
}
/*
* Determine RP value to set based on maximum current supported
* by a port if configured as source.
* Returns CC value to report to link partner.
*/
static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
{
const u32 *src_pdo = port->src_pdo;
int nr_pdo = port->nr_src_pdo;
int i;
if (!port->pd_supported)
return port->src_rp;
/*
* Search for first entry with matching voltage.
* It should report the maximum supported current.
*/
for (i = 0; i < nr_pdo; i++) {
const u32 pdo = src_pdo[i];
if (pdo_type(pdo) == PDO_TYPE_FIXED &&
pdo_fixed_voltage(pdo) == 5000) {
unsigned int curr = pdo_max_current(pdo);
if (curr >= 3000)
return TYPEC_CC_RP_3_0;
else if (curr >= 1500)
return TYPEC_CC_RP_1_5;
return TYPEC_CC_RP_DEF;
}
}
return TYPEC_CC_RP_DEF;
}
static void tcpm_ams_finish(struct tcpm_port *port)
{
tcpm_log(port, "AMS %s finished", tcpm_ams_str[port->ams]);
if (port->pd_capable && port->pwr_role == TYPEC_SOURCE) {
if (port->negotiated_rev >= PD_REV30)
tcpm_set_cc(port, SINK_TX_OK);
else
tcpm_set_cc(port, SINK_TX_NG);
} else if (port->pwr_role == TYPEC_SOURCE) {
tcpm_set_cc(port, tcpm_rp_cc(port));
}
port->in_ams = false;
port->ams = NONE_AMS;
}
static int tcpm_pd_transmit(struct tcpm_port *port,
enum tcpm_transmit_type type,
const struct pd_message *msg)
{
unsigned long timeout;
int ret;
if (msg)
tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header));
else
tcpm_log(port, "PD TX, type: %#x", type);
reinit_completion(&port->tx_complete);
ret = port->tcpc->pd_transmit(port->tcpc, type, msg, port->negotiated_rev);
if (ret < 0)
return ret;
mutex_unlock(&port->lock);
timeout = wait_for_completion_timeout(&port->tx_complete,
msecs_to_jiffies(PD_T_TCPC_TX_TIMEOUT));
mutex_lock(&port->lock);
if (!timeout)
return -ETIMEDOUT;
switch (port->tx_status) {
case TCPC_TX_SUCCESS:
port->message_id = (port->message_id + 1) & PD_HEADER_ID_MASK;
/*
* USB PD rev 2.0, 8.3.2.2.1:
* USB PD rev 3.0, 8.3.2.1.3:
* "... Note that every AMS is Interruptible until the first
* Message in the sequence has been successfully sent (GoodCRC
* Message received)."
*/
if (port->ams != NONE_AMS)
port->in_ams = true;
break;
case TCPC_TX_DISCARDED:
ret = -EAGAIN;
break;
case TCPC_TX_FAILED:
default:
ret = -EIO;
break;
}
/* Some AMS don't expect responses. Finish them here. */
if (port->ams == ATTENTION || port->ams == SOURCE_ALERT)
tcpm_ams_finish(port);
return ret;
}
void tcpm_pd_transmit_complete(struct tcpm_port *port,
enum tcpm_transmit_status status)
{
tcpm_log(port, "PD TX complete, status: %u", status);
port->tx_status = status;
complete(&port->tx_complete);
}
EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete);
static int tcpm_mux_set(struct tcpm_port *port, int state,
enum usb_role usb_role,
enum typec_orientation orientation)
{
int ret;
tcpm_log(port, "Requesting mux state %d, usb-role %d, orientation %d",
state, usb_role, orientation);
ret = typec_set_orientation(port->typec_port, orientation);
if (ret)
return ret;
if (port->role_sw) {
ret = usb_role_switch_set_role(port->role_sw, usb_role);
if (ret)
return ret;
}
return typec_set_mode(port->typec_port, state);
}
static int tcpm_set_polarity(struct tcpm_port *port,
enum typec_cc_polarity polarity)
{
int ret;
tcpm_log(port, "polarity %d", polarity);
ret = port->tcpc->set_polarity(port->tcpc, polarity);
if (ret < 0)
return ret;
port->polarity = polarity;
return 0;
}
static int tcpm_set_vconn(struct tcpm_port *port, bool enable)
{
int ret;
tcpm_log(port, "vconn:=%d", enable);
ret = port->tcpc->set_vconn(port->tcpc, enable);
if (!ret) {
port->vconn_role = enable ? TYPEC_SOURCE : TYPEC_SINK;
typec_set_vconn_role(port->typec_port, port->vconn_role);
}
return ret;
}
static u32 tcpm_get_current_limit(struct tcpm_port *port)
{
enum typec_cc_status cc;
u32 limit;
cc = port->polarity ? port->cc2 : port->cc1;
switch (cc) {
case TYPEC_CC_RP_1_5:
limit = 1500;
break;
case TYPEC_CC_RP_3_0:
limit = 3000;
break;
case TYPEC_CC_RP_DEF:
default:
if (port->tcpc->get_current_limit)
limit = port->tcpc->get_current_limit(port->tcpc);
else
limit = 0;
break;
}
return limit;
}
static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
{
int ret = -EOPNOTSUPP;
tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
port->supply_voltage = mv;
port->current_limit = max_ma;
power_supply_changed(port->psy);
if (port->tcpc->set_current_limit)
ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
return ret;
}
static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
{
return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role,
port->data_role);
}
static int tcpm_set_roles(struct tcpm_port *port, bool attached,
enum typec_role role, enum typec_data_role data)
{
enum typec_orientation orientation;
enum usb_role usb_role;
int ret;
if (port->polarity == TYPEC_POLARITY_CC1)
orientation = TYPEC_ORIENTATION_NORMAL;
else
orientation = TYPEC_ORIENTATION_REVERSE;
if (port->typec_caps.data == TYPEC_PORT_DRD) {
if (data == TYPEC_HOST)
usb_role = USB_ROLE_HOST;
else
usb_role = USB_ROLE_DEVICE;
} else if (port->typec_caps.data == TYPEC_PORT_DFP) {
if (data == TYPEC_HOST) {
if (role == TYPEC_SOURCE)
usb_role = USB_ROLE_HOST;
else
usb_role = USB_ROLE_NONE;
} else {
return -ENOTSUPP;
}
} else {
if (data == TYPEC_DEVICE) {
if (role == TYPEC_SINK)
usb_role = USB_ROLE_DEVICE;
else
usb_role = USB_ROLE_NONE;
} else {
return -ENOTSUPP;
}
}
ret = tcpm_mux_set(port, TYPEC_STATE_USB, usb_role, orientation);
if (ret < 0)
return ret;
ret = port->tcpc->set_roles(port->tcpc, attached, role, data);
if (ret < 0)
return ret;
port->pwr_role = role;
port->data_role = data;
typec_set_data_role(port->typec_port, data);
typec_set_pwr_role(port->typec_port, role);
return 0;
}
static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role)
{
int ret;
ret = port->tcpc->set_roles(port->tcpc, true, role,
port->data_role);
if (ret < 0)
return ret;
port->pwr_role = role;
typec_set_pwr_role(port->typec_port, role);
return 0;
}
/*
* Transform the PDO to be compliant to PD rev2.0.
* Return 0 if the PDO type is not defined in PD rev2.0.
* Otherwise, return the converted PDO.
*/
static u32 tcpm_forge_legacy_pdo(struct tcpm_port *port, u32 pdo, enum typec_role role)
{
switch (pdo_type(pdo)) {
case PDO_TYPE_FIXED:
if (role == TYPEC_SINK)
return pdo & ~PDO_FIXED_FRS_CURR_MASK;
else
return pdo & ~PDO_FIXED_UNCHUNK_EXT;
case PDO_TYPE_VAR:
case PDO_TYPE_BATT:
return pdo;
case PDO_TYPE_APDO:
default:
return 0;
}
}
static int tcpm_pd_send_source_caps(struct tcpm_port *port)
{
struct pd_message msg;
u32 pdo;
unsigned int i, nr_pdo = 0;
memset(&msg, 0, sizeof(msg));
for (i = 0; i < port->nr_src_pdo; i++) {
if (port->negotiated_rev >= PD_REV30) {
msg.payload[nr_pdo++] = cpu_to_le32(port->src_pdo[i]);
} else {
pdo = tcpm_forge_legacy_pdo(port, port->src_pdo[i], TYPEC_SOURCE);
if (pdo)
msg.payload[nr_pdo++] = cpu_to_le32(pdo);
}
}
if (!nr_pdo) {
/* No source capabilities defined, sink only */
msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
port->pwr_role,
port->data_role,
port->negotiated_rev,
port->message_id, 0);
} else {
msg.header = PD_HEADER_LE(PD_DATA_SOURCE_CAP,
port->pwr_role,
port->data_role,
port->negotiated_rev,
port->message_id,
nr_pdo);
}
return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
}
static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
{
struct pd_message msg;
u32 pdo;
unsigned int i, nr_pdo = 0;
memset(&msg, 0, sizeof(msg));
for (i = 0; i < port->nr_snk_pdo; i++) {
if (port->negotiated_rev >= PD_REV30) {
msg.payload[nr_pdo++] = cpu_to_le32(port->snk_pdo[i]);
} else {
pdo = tcpm_forge_legacy_pdo(port, port->snk_pdo[i], TYPEC_SINK);
if (pdo)
msg.payload[nr_pdo++] = cpu_to_le32(pdo);
}
}
if (!nr_pdo) {
/* No sink capabilities defined, source only */
msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
port->pwr_role,
port->data_role,
port->negotiated_rev,
port->message_id, 0);
} else {
msg.header = PD_HEADER_LE(PD_DATA_SINK_CAP,
port->pwr_role,
port->data_role,
port->negotiated_rev,
port->message_id,
nr_pdo);
}
return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
}
static void mod_tcpm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
{
if (delay_ms) {
hrtimer_start(&port->state_machine_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
} else {
hrtimer_cancel(&port->state_machine_timer);
kthread_queue_work(port->wq, &port->state_machine);
}
}
static void mod_vdm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
{
if (delay_ms) {
hrtimer_start(&port->vdm_state_machine_timer, ms_to_ktime(delay_ms),
HRTIMER_MODE_REL);
} else {
hrtimer_cancel(&port->vdm_state_machine_timer);
kthread_queue_work(port->wq, &port->vdm_state_machine);
}
}
static void mod_enable_frs_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
{
if (delay_ms) {
hrtimer_start(&port->enable_frs_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
} else {
hrtimer_cancel(&port->enable_frs_timer);
kthread_queue_work(port->wq, &port->enable_frs);
}
}
static void mod_send_discover_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
{
if (delay_ms) {
hrtimer_start(&port->send_discover_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
} else {
hrtimer_cancel(&port->send_discover_timer);
kthread_queue_work(port->wq, &port->send_discover_work);
}
}
static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
unsigned int delay_ms)
{
if (delay_ms) {
tcpm_log(port, "pending state change %s -> %s @ %u ms [%s %s]",
tcpm_states[port->state], tcpm_states[state], delay_ms,
pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
port->delayed_state = state;
mod_tcpm_delayed_work(port, delay_ms);
port->delayed_runtime = ktime_add(ktime_get(), ms_to_ktime(delay_ms));
port->delay_ms = delay_ms;
} else {
tcpm_log(port, "state change %s -> %s [%s %s]",
tcpm_states[port->state], tcpm_states[state],
pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
port->delayed_state = INVALID_STATE;
port->prev_state = port->state;
port->state = state;
/*
* Don't re-queue the state machine work item if we're currently
* in the state machine and we're immediately changing states.
* tcpm_state_machine_work() will continue running the state
* machine.
*/
if (!port->state_machine_running)
mod_tcpm_delayed_work(port, 0);
}
}
static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state,
unsigned int delay_ms)
{
if (port->enter_state == port->state)
tcpm_set_state(port, state, delay_ms);
else
tcpm_log(port,
"skipped %sstate change %s -> %s [%u ms], context state %s [%s %s]",
delay_ms ? "delayed " : "",
tcpm_states[port->state], tcpm_states[state],
delay_ms, tcpm_states[port->enter_state],
pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
}
static void tcpm_queue_message(struct tcpm_port *port,
enum pd_msg_request message)
{
port->queued_message = message;
mod_tcpm_delayed_work(port, 0);
}
static bool tcpm_vdm_ams(struct tcpm_port *port)
{
switch (port->ams) {
case DISCOVER_IDENTITY:
case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
case DISCOVER_SVIDS:
case DISCOVER_MODES:
case DFP_TO_UFP_ENTER_MODE:
case DFP_TO_UFP_EXIT_MODE:
case DFP_TO_CABLE_PLUG_ENTER_MODE:
case DFP_TO_CABLE_PLUG_EXIT_MODE:
case ATTENTION:
case UNSTRUCTURED_VDMS:
case STRUCTURED_VDMS:
break;
default:
return false;
}
return true;
}
static bool tcpm_ams_interruptible(struct tcpm_port *port)
{
switch (port->ams) {
/* Interruptible AMS */
case NONE_AMS:
case SECURITY:
case FIRMWARE_UPDATE:
case DISCOVER_IDENTITY:
case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
case DISCOVER_SVIDS:
case DISCOVER_MODES:
case DFP_TO_UFP_ENTER_MODE:
case DFP_TO_UFP_EXIT_MODE:
case DFP_TO_CABLE_PLUG_ENTER_MODE:
case DFP_TO_CABLE_PLUG_EXIT_MODE:
case UNSTRUCTURED_VDMS:
case STRUCTURED_VDMS:
case COUNTRY_INFO:
case COUNTRY_CODES:
break;
/* Non-Interruptible AMS */
default:
if (port->in_ams)
return false;
break;
}
return true;
}
static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams)
{
int ret = 0;
tcpm_log(port, "AMS %s start", tcpm_ams_str[ams]);
if (!tcpm_ams_interruptible(port) &&
!(ams == HARD_RESET || ams == SOFT_RESET_AMS)) {
port->upcoming_state = INVALID_STATE;
tcpm_log(port, "AMS %s not interruptible, aborting",
tcpm_ams_str[port->ams]);
return -EAGAIN;
}
if (port->pwr_role == TYPEC_SOURCE) {
enum typec_cc_status cc_req = port->cc_req;
port->ams = ams;
if (ams == HARD_RESET) {
tcpm_set_cc(port, tcpm_rp_cc(port));
tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
tcpm_set_state(port, HARD_RESET_START, 0);
return ret;
} else if (ams == SOFT_RESET_AMS) {
if (!port->explicit_contract)
tcpm_set_cc(port, tcpm_rp_cc(port));
tcpm_set_state(port, SOFT_RESET_SEND, 0);
return ret;
} else if (tcpm_vdm_ams(port)) {
/* tSinkTx is enforced in vdm_run_state_machine */
if (port->negotiated_rev >= PD_REV30)
tcpm_set_cc(port, SINK_TX_NG);
return ret;
}
if (port->negotiated_rev >= PD_REV30)
tcpm_set_cc(port, SINK_TX_NG);
switch (port->state) {
case SRC_READY:
case SRC_STARTUP:
case SRC_SOFT_RESET_WAIT_SNK_TX:
case SOFT_RESET:
case SOFT_RESET_SEND:
if (port->negotiated_rev >= PD_REV30)
tcpm_set_state(port, AMS_START,
cc_req == SINK_TX_OK ?
PD_T_SINK_TX : 0);
else
tcpm_set_state(port, AMS_START, 0);
break;
default:
if (port->negotiated_rev >= PD_REV30)
tcpm_set_state(port, SRC_READY,
cc_req == SINK_TX_OK ?
PD_T_SINK_TX : 0);
else
tcpm_set_state(port, SRC_READY, 0);
break;
}
} else {
if (port->negotiated_rev >= PD_REV30 &&
!tcpm_sink_tx_ok(port) &&
ams != SOFT_RESET_AMS &&
ams != HARD_RESET) {
port->upcoming_state = INVALID_STATE;
tcpm_log(port, "Sink TX No Go");
return -EAGAIN;
}
port->ams = ams;
if (ams == HARD_RESET) {
tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
tcpm_set_state(port, HARD_RESET_START, 0);
return ret;
} else if (tcpm_vdm_ams(port)) {
return ret;
}
if (port->state == SNK_READY ||
port->state == SNK_SOFT_RESET)
tcpm_set_state(port, AMS_START, 0);
else
tcpm_set_state(port, SNK_READY, 0);
}
return ret;
}
/*
* VDM/VDO handling functions
*/
static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
const u32 *data, int cnt)
{
u32 vdo_hdr = port->vdo_data[0];
WARN_ON(!mutex_is_locked(&port->lock));
/* If is sending discover_identity, handle received message first */
if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMD(vdo_hdr) == CMD_DISCOVER_IDENT) {
port->send_discover = true;
mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
} else {
/* Make sure we are not still processing a previous VDM packet */
WARN_ON(port->vdm_state > VDM_STATE_DONE);
}
port->vdo_count = cnt + 1;
port->vdo_data[0] = header;
memcpy(&port->vdo_data[1], data, sizeof(u32) * cnt);
/* Set ready, vdm state machine will actually send */
port->vdm_retries = 0;
port->vdm_state = VDM_STATE_READY;
port->vdm_sm_running = true;
mod_vdm_delayed_work(port, 0);
}
static void tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header,
const u32 *data, int cnt)
{
mutex_lock(&port->lock);
tcpm_queue_vdm(port, header, data, cnt);
mutex_unlock(&port->lock);
}
static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt)
{
u32 vdo = p[VDO_INDEX_IDH];
u32 product = p[VDO_INDEX_PRODUCT];
memset(&port->mode_data, 0, sizeof(port->mode_data));
port->partner_ident.id_header = vdo;
port->partner_ident.cert_stat = p[VDO_INDEX_CSTAT];
port->partner_ident.product = product;
typec_partner_set_identity(port->partner);
tcpm_log(port, "Identity: %04x:%04x.%04x",
PD_IDH_VID(vdo),
PD_PRODUCT_PID(product), product & 0xffff);
}
static bool svdm_consume_svids(struct tcpm_port *port, const u32 *p, int cnt)
{
struct pd_mode_data *pmdata = &port->mode_data;
int i;
for (i = 1; i < cnt; i++) {
u16 svid;
svid = (p[i] >> 16) & 0xffff;
if (!svid)
return false;
if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
goto abort;
pmdata->svids[pmdata->nsvids++] = svid;
tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
svid = p[i] & 0xffff;
if (!svid)
return false;
if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
goto abort;
pmdata->svids[pmdata->nsvids++] = svid;
tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
}
/*
* PD3.0 Spec 6.4.4.3.2: The SVIDs are returned 2 per VDO (see Table
* 6-43), and can be returned maximum 6 VDOs per response (see Figure
* 6-19). If the Respondersupports 12 or more SVID then the Discover
* SVIDs Command Shall be executed multiple times until a Discover
* SVIDs VDO is returned ending either with a SVID value of 0x0000 in
* the last part of the last VDO or with a VDO containing two SVIDs
* with values of 0x0000.
*
* However, some odd dockers support SVIDs less than 12 but without
* 0x0000 in the last VDO, so we need to break the Discover SVIDs
* request and return false here.
*/
return cnt == 7;
abort:
tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
return false;
}
static void svdm_consume_modes(struct tcpm_port *port, const u32 *p, int cnt)
{
struct pd_mode_data *pmdata = &port->mode_data;
struct typec_altmode_desc *paltmode;
int i;
if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
/* Already logged in svdm_consume_svids() */
return;
}
for (i = 1; i < cnt; i++) {
paltmode = &pmdata->altmode_desc[pmdata->altmodes];
memset(paltmode, 0, sizeof(*paltmode));
paltmode->svid = pmdata->svids[pmdata->svid_index];
paltmode->mode = i;
paltmode->vdo = p[i];
tcpm_log(port, " Alternate mode %d: SVID 0x%04x, VDO %d: 0x%08x",
pmdata->altmodes, paltmode->svid,
paltmode->mode, paltmode->vdo);
pmdata->altmodes++;
}
}
static void tcpm_register_partner_altmodes(struct tcpm_port *port)
{
struct pd_mode_data *modep = &port->mode_data;
struct typec_altmode *altmode;
int i;
for (i = 0; i < modep->altmodes; i++) {
altmode = typec_partner_register_altmode(port->partner,
&modep->altmode_desc[i]);
if (IS_ERR(altmode)) {
tcpm_log(port, "Failed to register partner SVID 0x%04x",
modep->altmode_desc[i].svid);
altmode = NULL;
}
port->partner_altmode[i] = altmode;
}
}
#define supports_modal(port) PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
const u32 *p, int cnt, u32 *response,
enum adev_actions *adev_action)
{
struct typec_port *typec = port->typec_port;
struct typec_altmode *pdev;
struct pd_mode_data *modep;
int svdm_version;
int rlen = 0;
int cmd_type;
int cmd;
int i;
cmd_type = PD_VDO_CMDT(p[0]);
cmd = PD_VDO_CMD(p[0]);
tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
p[0], cmd_type, cmd, cnt);
modep = &port->mode_data;
pdev = typec_match_altmode(port->partner_altmode, ALTMODE_DISCOVERY_MAX,
PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
svdm_version = typec_get_negotiated_svdm_version(typec);
if (svdm_version < 0)
return 0;
switch (cmd_type) {
case CMDT_INIT:
switch (cmd) {
case CMD_DISCOVER_IDENT:
if (PD_VDO_VID(p[0]) != USB_SID_PD)
break;
if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
typec_partner_set_svdm_version(port->partner,
PD_VDO_SVDM_VER(p[0]));
svdm_version = PD_VDO_SVDM_VER(p[0]);
}
port->ams = DISCOVER_IDENTITY;
/*
* PD2.0 Spec 6.10.3: respond with NAK as DFP (data host)
* PD3.1 Spec 6.4.4.2.5.1: respond with NAK if "invalid field" or
* "wrong configuation" or "Unrecognized"
*/
if ((port->data_role == TYPEC_DEVICE || svdm_version >= SVDM_VER_2_0) &&
port->nr_snk_vdo) {
if (svdm_version < SVDM_VER_2_0) {
for (i = 0; i < port->nr_snk_vdo_v1; i++)
response[i + 1] = port->snk_vdo_v1[i];
rlen = port->nr_snk_vdo_v1 + 1;
} else {
for (i = 0; i < port->nr_snk_vdo; i++)
response[i + 1] = port->snk_vdo[i];
rlen = port->nr_snk_vdo + 1;
}
}
break;
case CMD_DISCOVER_SVID:
port->ams = DISCOVER_SVIDS;
break;
case CMD_DISCOVER_MODES:
port->ams = DISCOVER_MODES;
break;
case CMD_ENTER_MODE:
port->ams = DFP_TO_UFP_ENTER_MODE;
break;
case CMD_EXIT_MODE:
port->ams = DFP_TO_UFP_EXIT_MODE;
break;
case CMD_ATTENTION:
/* Attention command does not have response */
*adev_action = ADEV_ATTENTION;
return 0;
default:
break;
}
if (rlen >= 1) {
response[0] = p[0] | VDO_CMDT(CMDT_RSP_ACK);
} else if (rlen == 0) {
response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
rlen = 1;
} else {
response[0] = p[0] | VDO_CMDT(CMDT_RSP_BUSY);
rlen = 1;
}
response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
(VDO_SVDM_VERS(typec_get_negotiated_svdm_version(typec)));
break;
case CMDT_RSP_ACK:
/* silently drop message if we are not connected */
if (IS_ERR_OR_NULL(port->partner))
break;
tcpm_ams_finish(port);
switch (cmd) {
case CMD_DISCOVER_IDENT:
if (PD_VDO_SVDM_VER(p[0]) < svdm_version)
typec_partner_set_svdm_version(port->partner,
PD_VDO_SVDM_VER(p[0]));
/* 6.4.4.3.1 */
svdm_consume_identity(port, p, cnt);
response[0] = VDO(USB_SID_PD, 1, typec_get_negotiated_svdm_version(typec),
CMD_DISCOVER_SVID);
rlen = 1;
break;
case CMD_DISCOVER_SVID:
/* 6.4.4.3.2 */
if (svdm_consume_svids(port, p, cnt)) {
response[0] = VDO(USB_SID_PD, 1, svdm_version, CMD_DISCOVER_SVID);
rlen = 1;
} else if (modep->nsvids && supports_modal(port)) {
response[0] = VDO(modep->svids[0], 1, svdm_version,
CMD_DISCOVER_MODES);
rlen = 1;
}
break;
case CMD_DISCOVER_MODES:
/* 6.4.4.3.3 */
svdm_consume_modes(port, p, cnt);
modep->svid_index++;
if (modep->svid_index < modep->nsvids) {
u16 svid = modep->svids[modep->svid_index];
response[0] = VDO(svid, 1, svdm_version, CMD_DISCOVER_MODES);
rlen = 1;
} else {
tcpm_register_partner_altmodes(port);
}
break;
case CMD_ENTER_MODE:
if (adev && pdev)
*adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
return 0;
case CMD_EXIT_MODE:
if (adev && pdev) {
/* Back to USB Operation */
*adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
return 0;
}
break;
case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
break;
default:
/* Unrecognized SVDM */
response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
rlen = 1;
response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
(VDO_SVDM_VERS(svdm_version));
break;
}
break;
case CMDT_RSP_NAK:
tcpm_ams_finish(port);
switch (cmd) {
case CMD_DISCOVER_IDENT:
case CMD_DISCOVER_SVID:
case CMD_DISCOVER_MODES:
case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
break;
case CMD_ENTER_MODE:
/* Back to USB Operation */
*adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
return 0;
default:
/* Unrecognized SVDM */
response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
rlen = 1;
response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
(VDO_SVDM_VERS(svdm_version));
break;
}
break;
default:
response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
rlen = 1;
response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
(VDO_SVDM_VERS(svdm_version));
break;
}
/* Informing the alternate mode drivers about everything */
*adev_action = ADEV_QUEUE_VDM;
return rlen;
}
static void tcpm_pd_handle_msg(struct tcpm_port *port,
enum pd_msg_request message,
enum tcpm_ams ams);
static void tcpm_handle_vdm_request(struct tcpm_port *port,
const __le32 *payload, int cnt)
{
enum adev_actions adev_action = ADEV_NONE;
struct typec_altmode *adev;
u32 p[PD_MAX_PAYLOAD];
u32 response[8] = { };
int i, rlen = 0;
for (i = 0; i < cnt; i++)
p[i] = le32_to_cpu(payload[i]);
adev = typec_match_altmode(port->port_altmode, ALTMODE_DISCOVERY_MAX,
PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
if (port->vdm_state == VDM_STATE_BUSY) {
/* If UFP responded busy retry after timeout */
if (PD_VDO_CMDT(p[0]) == CMDT_RSP_BUSY) {
port->vdm_state = VDM_STATE_WAIT_RSP_BUSY;
port->vdo_retry = (p[0] & ~VDO_CMDT_MASK) |
CMDT_INIT;
mod_vdm_delayed_work(port, PD_T_VDM_BUSY);
return;
}
port->vdm_state = VDM_STATE_DONE;
}
if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) {
/*
* Here a SVDM is received (INIT or RSP or unknown). Set the vdm_sm_running in
* advance because we are dropping the lock but may send VDMs soon.
* For the cases of INIT received:
* - If no response to send, it will be cleared later in this function.
* - If there are responses to send, it will be cleared in the state machine.
* For the cases of RSP received:
* - If no further INIT to send, it will be cleared later in this function.
* - Otherwise, it will be cleared in the state machine if timeout or it will go
* back here until no further INIT to send.
* For the cases of unknown type received:
* - We will send NAK and the flag will be cleared in the state machine.
*/
port->vdm_sm_running = true;
rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action);
} else {
if (port->negotiated_rev >= PD_REV30)
tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
}
/*
* We are done with any state stored in the port struct now, except
* for any port struct changes done by the tcpm_queue_vdm() call
* below, which is a separate operation.
*
* So we can safely release the lock here; and we MUST release the
* lock here to avoid an AB BA lock inversion:
*
* If we keep the lock here then the lock ordering in this path is:
* 1. tcpm_pd_rx_handler take the tcpm port lock
* 2. One of the typec_altmode_* calls below takes the alt-mode's lock
*
* And we also have this ordering:
* 1. alt-mode driver takes the alt-mode's lock
* 2. alt-mode driver calls tcpm_altmode_enter which takes the
* tcpm port lock
*
* Dropping our lock here avoids this.
*/
mutex_unlock(&port->lock);
if (adev) {
switch (adev_action) {
case ADEV_NONE:
break;
case ADEV_NOTIFY_USB_AND_QUEUE_VDM:
WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB, NULL));
typec_altmode_vdm(adev, p[0], &p[1], cnt);
break;
case ADEV_QUEUE_VDM:
typec_altmode_vdm(adev, p[0], &p[1], cnt);
break;
case ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL:
if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
int svdm_version = typec_get_negotiated_svdm_version(
port->typec_port);
if (svdm_version < 0)
break;
response[0] = VDO(adev->svid, 1, svdm_version,
CMD_EXIT_MODE);
response[0] |= VDO_OPOS(adev->mode);
rlen = 1;
}
break;
case ADEV_ATTENTION:
if (typec_altmode_attention(adev, p[1]))
tcpm_log(port, "typec_altmode_attention no port partner altmode");
break;
}
}
/*
* We must re-take the lock here to balance the unlock in
* tcpm_pd_rx_handler, note that no changes, other then the
* tcpm_queue_vdm call, are made while the lock is held again.
* All that is done after the call is unwinding the call stack until
* we return to tcpm_pd_rx_handler and do the unlock there.
*/
mutex_lock(&port->lock);
if (rlen > 0)
tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
else
port->vdm_sm_running = false;
}
static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
const u32 *data, int count)
{
int svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
u32 header;
if (svdm_version < 0)
return;
if (WARN_ON(count > VDO_MAX_SIZE - 1))
count = VDO_MAX_SIZE - 1;
/* set VDM header with VID & CMD */
header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION),
svdm_version, cmd);
tcpm_queue_vdm(port, header, data, count);
}
static unsigned int vdm_ready_timeout(u32 vdm_hdr)
{
unsigned int timeout;
int cmd = PD_VDO_CMD(vdm_hdr);
/* its not a structured VDM command */
if (!PD_VDO_SVDM(vdm_hdr))
return PD_T_VDM_UNSTRUCTURED;
switch (PD_VDO_CMDT(vdm_hdr)) {
case CMDT_INIT:
if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
timeout = PD_T_VDM_WAIT_MODE_E;
else
timeout = PD_T_VDM_SNDR_RSP;
break;
default:
if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
timeout = PD_T_VDM_E_MODE;
else
timeout = PD_T_VDM_RCVR_RSP;
break;
}
return timeout;
}
static void vdm_run_state_machine(struct tcpm_port *port)
{
struct pd_message msg;
int i, res = 0;
u32 vdo_hdr = port->vdo_data[0];
switch (port->vdm_state) {
case VDM_STATE_READY:
/* Only transmit VDM if attached */
if (!port->attached) {
port->vdm_state = VDM_STATE_ERR_BUSY;
break;
}
/*
* if there's traffic or we're not in PDO ready state don't send
* a VDM.
*/
if (port->state != SRC_READY && port->state != SNK_READY) {
port->vdm_sm_running = false;
break;
}
/* TODO: AMS operation for Unstructured VDM */
if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) {
switch (PD_VDO_CMD(vdo_hdr)) {
case CMD_DISCOVER_IDENT:
res = tcpm_ams_start(port, DISCOVER_IDENTITY);
if (res == 0) {
port->send_discover = false;
} else if (res == -EAGAIN) {
port->vdo_data[0] = 0;
mod_send_discover_delayed_work(port,
SEND_DISCOVER_RETRY_MS);
}
break;
case CMD_DISCOVER_SVID:
res = tcpm_ams_start(port, DISCOVER_SVIDS);
break;
case CMD_DISCOVER_MODES:
res = tcpm_ams_start(port, DISCOVER_MODES);
break;
case CMD_ENTER_MODE:
res = tcpm_ams_start(port, DFP_TO_UFP_ENTER_MODE);
break;
case CMD_EXIT_MODE:
res = tcpm_ams_start(port, DFP_TO_UFP_EXIT_MODE);
break;
case CMD_ATTENTION:
res = tcpm_ams_start(port, ATTENTION);
break;
case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
res = tcpm_ams_start(port, STRUCTURED_VDMS);
break;
default:
res = -EOPNOTSUPP;
break;
}
if (res < 0) {
port->vdm_state = VDM_STATE_ERR_BUSY;
return;
}
}
port->vdm_state = VDM_STATE_SEND_MESSAGE;
mod_vdm_delayed_work(port, (port->negotiated_rev >= PD_REV30 &&
port->pwr_role == TYPEC_SOURCE &&
PD_VDO_SVDM(vdo_hdr) &&
PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) ?
PD_T_SINK_TX : 0);
break;
case VDM_STATE_WAIT_RSP_BUSY:
port->vdo_data[0] = port->vdo_retry;
port->vdo_count = 1;
port->vdm_state = VDM_STATE_READY;
tcpm_ams_finish(port);
break;
case VDM_STATE_BUSY:
port->vdm_state = VDM_STATE_ERR_TMOUT;
if (port->ams != NONE_AMS)
tcpm_ams_finish(port);
break;
case VDM_STATE_ERR_SEND:
/*
* A partner which does not support USB PD will not reply,
* so this is not a fatal error. At the same time, some
* devices may not return GoodCRC under some circumstances,
* so we need to retry.
*/
if (port->vdm_retries < 3) {
tcpm_log(port, "VDM Tx error, retry");
port->vdm_retries++;
port->vdm_state = VDM_STATE_READY;
if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT)
tcpm_ams_finish(port);
} else {
tcpm_ams_finish(port);
}
break;
case VDM_STATE_SEND_MESSAGE:
/* Prepare and send VDM */
memset(&msg, 0, sizeof(msg));
msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
port->pwr_role,
port->data_role,
port->negotiated_rev,
port->message_id, port->vdo_count);
for (i = 0; i < port->vdo_count; i++)
msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
if (res < 0) {
port->vdm_state = VDM_STATE_ERR_SEND;
} else {
unsigned long timeout;
port->vdm_retries = 0;
port->vdo_data[0] = 0;
port->vdm_state = VDM_STATE_BUSY;
timeout = vdm_ready_timeout(vdo_hdr);
mod_vdm_delayed_work(port, timeout);
}
break;
default:
break;
}
}
static void vdm_state_machine_work(struct kthread_work *work)
{
struct tcpm_port *port = container_of(work, struct tcpm_port, vdm_state_machine);
enum vdm_states prev_state;
mutex_lock(&port->lock);
/*
* Continue running as long as the port is not busy and there was
* a state change.
*/
do {
prev_state = port->vdm_state;
vdm_run_state_machine(port);
} while (port->vdm_state != prev_state &&
port->vdm_state != VDM_STATE_BUSY &&
port->vdm_state != VDM_STATE_SEND_MESSAGE);
if (port->vdm_state < VDM_STATE_READY)
port->vdm_sm_running = false;
mutex_unlock(&port->lock);
}
enum pdo_err {
PDO_NO_ERR,
PDO_ERR_NO_VSAFE5V,
PDO_ERR_VSAFE5V_NOT_FIRST,
PDO_ERR_PDO_TYPE_NOT_IN_ORDER,
PDO_ERR_FIXED_NOT_SORTED,
PDO_ERR_VARIABLE_BATT_NOT_SORTED,
PDO_ERR_DUPE_PDO,
PDO_ERR_PPS_APDO_NOT_SORTED,
PDO_ERR_DUPE_PPS_APDO,
};
static const char * const pdo_err_msg[] = {
[PDO_ERR_NO_VSAFE5V] =
" err: source/sink caps should at least have vSafe5V",
[PDO_ERR_VSAFE5V_NOT_FIRST] =
" err: vSafe5V Fixed Supply Object Shall always be the first object",
[PDO_ERR_PDO_TYPE_NOT_IN_ORDER] =
" err: PDOs should be in the following order: Fixed; Battery; Variable",
[PDO_ERR_FIXED_NOT_SORTED] =
" err: Fixed supply pdos should be in increasing order of their fixed voltage",
[PDO_ERR_VARIABLE_BATT_NOT_SORTED] =
" err: Variable/Battery supply pdos should be in increasing order of their minimum voltage",
[PDO_ERR_DUPE_PDO] =
" err: Variable/Batt supply pdos cannot have same min/max voltage",
[PDO_ERR_PPS_APDO_NOT_SORTED] =
" err: Programmable power supply apdos should be in increasing order of their maximum voltage",
[PDO_ERR_DUPE_PPS_APDO] =
" err: Programmable power supply apdos cannot have same min/max voltage and max current",
};
static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
unsigned int nr_pdo)
{
unsigned int i;
/* Should at least contain vSafe5v */
if (nr_pdo < 1)
return PDO_ERR_NO_VSAFE5V;
/* The vSafe5V Fixed Supply Object Shall always be the first object */
if (pdo_type(pdo[0]) != PDO_TYPE_FIXED ||
pdo_fixed_voltage(pdo[0]) != VSAFE5V)
return PDO_ERR_VSAFE5V_NOT_FIRST;
for (i = 1; i < nr_pdo; i++) {
if (pdo_type(pdo[i]) < pdo_type(pdo[i - 1])) {
return PDO_ERR_PDO_TYPE_NOT_IN_ORDER;
} else if (pdo_type(pdo[i]) == pdo_type(pdo[i - 1])) {
enum pd_pdo_type type = pdo_type(pdo[i]);
switch (type) {
/*
* The remaining Fixed Supply Objects, if
* present, shall be sent in voltage order;
* lowest to highest.
*/
case PDO_TYPE_FIXED:
if (pdo_fixed_voltage(pdo[i]) <=
pdo_fixed_voltage(pdo[i - 1]))
return PDO_ERR_FIXED_NOT_SORTED;
break;
/*
* The Battery Supply Objects and Variable
* supply, if present shall be sent in Minimum
* Voltage order; lowest to highest.
*/
case PDO_TYPE_VAR:
case PDO_TYPE_BATT:
if (pdo_min_voltage(pdo[i]) <
pdo_min_voltage(pdo[i - 1]))
return PDO_ERR_VARIABLE_BATT_NOT_SORTED;
else if ((pdo_min_voltage(pdo[i]) ==
pdo_min_voltage(pdo[i - 1])) &&
(pdo_max_voltage(pdo[i]) ==
pdo_max_voltage(pdo[i - 1])))
return PDO_ERR_DUPE_PDO;
break;
/*
* The Programmable Power Supply APDOs, if present,
* shall be sent in Maximum Voltage order;
* lowest to highest.
*/
case PDO_TYPE_APDO:
if (pdo_apdo_type(pdo[i]) != APDO_TYPE_PPS)
break;
if (pdo_pps_apdo_max_voltage(pdo[i]) <
pdo_pps_apdo_max_voltage(pdo[i - 1]))
return PDO_ERR_PPS_APDO_NOT_SORTED;
else if (pdo_pps_apdo_min_voltage(pdo[i]) ==
pdo_pps_apdo_min_voltage(pdo[i - 1]) &&
pdo_pps_apdo_max_voltage(pdo[i]) ==
pdo_pps_apdo_max_voltage(pdo[i - 1]) &&
pdo_pps_apdo_max_current(pdo[i]) ==
pdo_pps_apdo_max_current(pdo[i - 1]))
return PDO_ERR_DUPE_PPS_APDO;
break;
default:
tcpm_log_force(port, " Unknown pdo type");
}
}
}
return PDO_NO_ERR;
}
static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo,
unsigned int nr_pdo)
{
enum pdo_err err_index = tcpm_caps_err(port, pdo, nr_pdo);
if (err_index != PDO_NO_ERR) {
tcpm_log_force(port, " %s", pdo_err_msg[err_index]);
return -EINVAL;
}
return 0;
}
static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
{
struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
int svdm_version;
u32 header;
svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
if (svdm_version < 0)
return svdm_version;
header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
header |= VDO_OPOS(altmode->mode);
tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0);
return 0;
}
static int tcpm_altmode_exit(struct typec_altmode *altmode)
{
struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
int svdm_version;
u32 header;
svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
if (svdm_version < 0)
return svdm_version;
header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
header |= VDO_OPOS(altmode->mode);
tcpm_queue_vdm_unlocked(port, header, NULL, 0);
return 0;
}
static int tcpm_altmode_vdm(struct typec_altmode *altmode,
u32 header, const u32 *data, int count)
{
struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
tcpm_queue_vdm_unlocked(port, header, data, count - 1);
return 0;
}
static const struct typec_altmode_ops tcpm_altmode_ops = {
.enter = tcpm_altmode_enter,
.exit = tcpm_altmode_exit,
.vdm = tcpm_altmode_vdm,
};
/*
* PD (data, control) command handling functions
*/
static inline enum tcpm_state ready_state(struct tcpm_port *port)
{
if (port->pwr_role == TYPEC_SOURCE)
return SRC_READY;
else
return SNK_READY;
}
static int tcpm_pd_send_control(struct tcpm_port *port,
enum pd_ctrl_msg_type type);
static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload,
int cnt)
{
u32 p0 = le32_to_cpu(payload[0]);
unsigned int type = usb_pd_ado_type(p0);
if (!type) {
tcpm_log(port, "Alert message received with no type");
tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
return;
}
/* Just handling non-battery alerts for now */
if (!(type & USB_PD_ADO_TYPE_BATT_STATUS_CHANGE)) {
if (port->pwr_role == TYPEC_SOURCE) {
port->upcoming_state = GET_STATUS_SEND;
tcpm_ams_start(port, GETTING_SOURCE_SINK_STATUS);
} else {
/*
* Do not check SinkTxOk here in case the Source doesn't set its Rp to
* SinkTxOk in time.
*/
port->ams = GETTING_SOURCE_SINK_STATUS;
tcpm_set_state(port, GET_STATUS_SEND, 0);
}
} else {
tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
}
}
static int tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port *port,
enum typec_pwr_opmode mode, bool pps_active,
u32 requested_vbus_voltage)
{
int ret;
if (!port->tcpc->set_auto_vbus_discharge_threshold)
return 0;
ret = port->tcpc->set_auto_vbus_discharge_threshold(port->tcpc, mode, pps_active,
requested_vbus_voltage);
tcpm_log_force(port,
"set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u ret:%d",
mode, pps_active ? 'y' : 'n', requested_vbus_voltage, ret);
return ret;
}
static void tcpm_pd_handle_state(struct tcpm_port *port,
enum tcpm_state state,
enum tcpm_ams ams,
unsigned int delay_ms)
{
switch (port->state) {
case SRC_READY:
case SNK_READY:
port->ams = ams;
tcpm_set_state(port, state, delay_ms);
break;
/* 8.3.3.4.1.1 and 6.8.1 power transitioning */
case SNK_TRANSITION_SINK:
case SNK_TRANSITION_SINK_VBUS:
case SRC_TRANSITION_SUPPLY:
tcpm_set_state(port, HARD_RESET_SEND, 0);
break;
default:
if (!tcpm_ams_interruptible(port)) {
tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
SRC_SOFT_RESET_WAIT_SNK_TX :
SNK_SOFT_RESET,
0);
} else {
/* process the Message 6.8.1 */
port->upcoming_state = state;
port->next_ams = ams;
tcpm_set_state(port, ready_state(port), delay_ms);
}
break;
}
}
static void tcpm_pd_handle_msg(struct tcpm_port *port,
enum pd_msg_request message,
enum tcpm_ams ams)
{
switch (port->state) {
case SRC_READY:
case SNK_READY:
port->ams = ams;
tcpm_queue_message(port, message);
break;
/* PD 3.0 Spec 8.3.3.4.1.1 and 6.8.1 */
case SNK_TRANSITION_SINK:
case SNK_TRANSITION_SINK_VBUS:
case SRC_TRANSITION_SUPPLY:
tcpm_set_state(port, HARD_RESET_SEND, 0);
break;
default:
if (!tcpm_ams_interruptible(port)) {
tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
SRC_SOFT_RESET_WAIT_SNK_TX :
SNK_SOFT_RESET,
0);
} else {
port->next_ams = ams;
tcpm_set_state(port, ready_state(port), 0);
/* 6.8.1 process the Message */
tcpm_queue_message(port, message);
}
break;
}
}
static int tcpm_register_source_caps(struct tcpm_port *port)
{
struct usb_power_delivery_desc desc = { port->negotiated_rev };
struct usb_power_delivery_capabilities_desc caps = { };
struct usb_power_delivery_capabilities *cap;
if (!port->partner_pd)
port->partner_pd = usb_power_delivery_register(NULL, &desc);
if (IS_ERR(port->partner_pd))
return PTR_ERR(port->partner_pd);
memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps);
caps.role = TYPEC_SOURCE;
cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
if (IS_ERR(cap))
return PTR_ERR(cap);
port->partner_source_caps = cap;
return 0;
}
static int tcpm_register_sink_caps(struct tcpm_port *port)
{
struct usb_power_delivery_desc desc = { port->negotiated_rev };
struct usb_power_delivery_capabilities_desc caps = { };
struct usb_power_delivery_capabilities *cap;
if (!port->partner_pd)
port->partner_pd = usb_power_delivery_register(NULL, &desc);
if (IS_ERR(port->partner_pd))
return PTR_ERR(port->partner_pd);
memcpy(caps.pdo, port->sink_caps, sizeof(u32) * port->nr_sink_caps);
caps.role = TYPEC_SINK;
cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
if (IS_ERR(cap))
return PTR_ERR(cap);
port->partner_sink_caps = cap;
return 0;
}
static void tcpm_pd_data_request(struct tcpm_port *port,
const struct pd_message *msg)
{
enum pd_data_msg_type type = pd_header_type_le(msg->header);
unsigned int cnt = pd_header_cnt_le(msg->header);
unsigned int rev = pd_header_rev_le(msg->header);
unsigned int i;
enum frs_typec_current partner_frs_current;
bool frs_enable;
int ret;
if (tcpm_vdm_ams(port) && type != PD_DATA_VENDOR_DEF) {
port->vdm_state = VDM_STATE_ERR_BUSY;
tcpm_ams_finish(port);
mod_vdm_delayed_work(port, 0);
}
switch (type) {
case PD_DATA_SOURCE_CAP:
for (i = 0; i < cnt; i++)
port->source_caps[i] = le32_to_cpu(msg->payload[i]);
port->nr_source_caps = cnt;
tcpm_log_source_caps(port);
tcpm_validate_caps(port, port->source_caps,
port->nr_source_caps);
tcpm_register_source_caps(port);
/*
* Adjust revision in subsequent message headers, as required,
* to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
* support Rev 1.0 so just do nothing in that scenario.
*/
if (rev == PD_REV10) {
if (port->ams == GET_SOURCE_CAPABILITIES)
tcpm_ams_finish(port);
break;
}
if (rev < PD_MAX_REV)
port->negotiated_rev = rev;
if (port->pwr_role == TYPEC_SOURCE) {
if (port->ams == GET_SOURCE_CAPABILITIES)
tcpm_pd_handle_state(port, SRC_READY, NONE_AMS, 0);
/* Unexpected Source Capabilities */
else
tcpm_pd_handle_msg(port,
port->negotiated_rev < PD_REV30 ?
PD_MSG_CTRL_REJECT :
PD_MSG_CTRL_NOT_SUPP,
NONE_AMS);
} else if (port->state == SNK_WAIT_CAPABILITIES) {
/*
* This message may be received even if VBUS is not
* present. This is quite unexpected; see USB PD
* specification, sections 8.3.3.6.3.1 and 8.3.3.6.3.2.
* However, at the same time, we must be ready to
* receive this message and respond to it 15ms after
* receiving PS_RDY during power swap operations, no matter
* if VBUS is available or not (USB PD specification,
* section 6.5.9.2).
* So we need to accept the message either way,
* but be prepared to keep waiting for VBUS after it was
* handled.
*/
port->ams = POWER_NEGOTIATION;
port->in_ams = true;
tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
} else {
if (port->ams == GET_SOURCE_CAPABILITIES)
tcpm_ams_finish(port);
tcpm_pd_handle_state(port, SNK_NEGOTIATE_CAPABILITIES,
POWER_NEGOTIATION, 0);
}
break;
case PD_DATA_REQUEST:
/*
* Adjust revision in subsequent message headers, as required,
* to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
* support Rev 1.0 so just reject in that scenario.
*/
if (rev == PD_REV10) {
tcpm_pd_handle_msg(port,
port->negotiated_rev < PD_REV30 ?
PD_MSG_CTRL_REJECT :
PD_MSG_CTRL_NOT_SUPP,
NONE_AMS);
break;
}
if (rev < PD_MAX_REV)
port->negotiated_rev = rev;
if (port->pwr_role != TYPEC_SOURCE || cnt != 1) {
tcpm_pd_handle_msg(port,
port->negotiated_rev < PD_REV30 ?
PD_MSG_CTRL_REJECT :
PD_MSG_CTRL_NOT_SUPP,
NONE_AMS);
break;
}
port->sink_request = le32_to_cpu(msg->payload[0]);
if (port->vdm_sm_running && port->explicit_contract) {
tcpm_pd_handle_msg(port, PD_MSG_CTRL_WAIT, port->ams);
break;
}
if (port->state == SRC_SEND_CAPABILITIES)
tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
else
tcpm_pd_handle_state(port, SRC_NEGOTIATE_CAPABILITIES,
POWER_NEGOTIATION, 0);
break;
case PD_DATA_SINK_CAP:
/* We don't do anything with this at the moment... */
for (i = 0; i < cnt; i++)
port->sink_caps[i] = le32_to_cpu(msg->payload[i]);
partner_frs_current = (port->sink_caps[0] & PDO_FIXED_FRS_CURR_MASK) >>
PDO_FIXED_FRS_CURR_SHIFT;
frs_enable = partner_frs_current && (partner_frs_current <=
port->new_source_frs_current);
tcpm_log(port,
"Port partner FRS capable partner_frs_current:%u port_frs_current:%u enable:%c",
partner_frs_current, port->new_source_frs_current, frs_enable ? 'y' : 'n');
if (frs_enable) {
ret = port->tcpc->enable_frs(port->tcpc, true);
tcpm_log(port, "Enable FRS %s, ret:%d\n", ret ? "fail" : "success", ret);
}
port->nr_sink_caps = cnt;
port->sink_cap_done = true;
tcpm_register_sink_caps(port);
if (port->ams == GET_SINK_CAPABILITIES)
tcpm_set_state(port, ready_state(port), 0);
/* Unexpected Sink Capabilities */
else
tcpm_pd_handle_msg(port,
port->negotiated_rev < PD_REV30 ?
PD_MSG_CTRL_REJECT :
PD_MSG_CTRL_NOT_SUPP,
NONE_AMS);
break;
case PD_DATA_VENDOR_DEF:
tcpm_handle_vdm_request(port, msg->payload, cnt);
break;
case PD_DATA_BIST:
port->bist_request = le32_to_cpu(msg->payload[0]);
tcpm_pd_handle_state(port, BIST_RX, BIST, 0);
break;
case PD_DATA_ALERT:
if (port->state != SRC_READY && port->state != SNK_READY)
tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
NONE_AMS, 0);
else
tcpm_handle_alert(port, msg->payload, cnt);
break;
case PD_DATA_BATT_STATUS:
case PD_DATA_GET_COUNTRY_INFO:
/* Currently unsupported */
tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
PD_MSG_CTRL_REJECT :
PD_MSG_CTRL_NOT_SUPP,
NONE_AMS);
break;
default:
tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
PD_MSG_CTRL_REJECT :
PD_MSG_CTRL_NOT_SUPP,
NONE_AMS);
tcpm_log(port, "Unrecognized data message type %#x", type);
break;
}
}
static void tcpm_pps_complete(struct tcpm_port *port, int result)
{
if (port->pps_pending) {
port->pps_status = result;
port->pps_pending = false;
complete(&port->pps_complete);
}
}
static void tcpm_pd_ctrl_request(struct tcpm_port *port,
const struct pd_message *msg)
{
enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
enum tcpm_state next_state;
/*
* Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in
* VDM AMS if waiting for VDM responses and will be handled later.
*/
if (tcpm_vdm_ams(port) && type != PD_CTRL_NOT_SUPP && type != PD_CTRL_GOOD_CRC) {
port->vdm_state = VDM_STATE_ERR_BUSY;
tcpm_ams_finish(port);
mod_vdm_delayed_work(port, 0);
}
switch (type) {
case PD_CTRL_GOOD_CRC:
case PD_CTRL_PING:
break;
case PD_CTRL_GET_SOURCE_CAP:
tcpm_pd_handle_msg(port, PD_MSG_DATA_SOURCE_CAP, GET_SOURCE_CAPABILITIES);
break;
case PD_CTRL_GET_SINK_CAP:
tcpm_pd_handle_msg(port, PD_MSG_DATA_SINK_CAP, GET_SINK_CAPABILITIES);
break;
case PD_CTRL_GOTO_MIN:
break;
case PD_CTRL_PS_RDY:
switch (port->state) {
case SNK_TRANSITION_SINK:
if (port->vbus_present) {
tcpm_set_current_limit(port,
port->req_current_limit,
port->req_supply_voltage);
port->explicit_contract = true;
tcpm_set_auto_vbus_discharge_threshold(port,
TYPEC_PWR_MODE_PD,
port->pps_data.active,
port->supply_voltage);
tcpm_set_state(port, SNK_READY, 0);
} else {
/*
* Seen after power swap. Keep waiting for VBUS
* in a transitional state.
*/
tcpm_set_state(port,
SNK_TRANSITION_SINK_VBUS, 0);
}
break;
case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
tcpm_set_state(port, PR_SWAP_SRC_SNK_SINK_ON, 0);
break;
case PR_SWAP_SNK_SRC_SINK_OFF:
tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON, 0);
break;
case VCONN_SWAP_WAIT_FOR_VCONN:
tcpm_set_state(port, VCONN_SWAP_TURN_OFF_VCONN, 0);
break;
case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
tcpm_set_state(port, FR_SWAP_SNK_SRC_NEW_SINK_READY, 0);
break;
default:
tcpm_pd_handle_state(port,
port->pwr_role == TYPEC_SOURCE ?
SRC_SOFT_RESET_WAIT_SNK_TX :
SNK_SOFT_RESET,
NONE_AMS, 0);
break;
}
break;
case PD_CTRL_REJECT:
case PD_CTRL_WAIT:
case PD_CTRL_NOT_SUPP:
switch (port->state) {
case SNK_NEGOTIATE_CAPABILITIES:
/* USB PD specification, Figure 8-43 */
if (port->explicit_contract)
next_state = SNK_READY;
else
next_state = SNK_WAIT_CAPABILITIES;
/* Threshold was relaxed before sending Request. Restore it back. */
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
port->pps_data.active,
port->supply_voltage);
tcpm_set_state(port, next_state, 0);
break;
case SNK_NEGOTIATE_PPS_CAPABILITIES:
/* Revert data back from any requested PPS updates */
port->pps_data.req_out_volt = port->supply_voltage;
port->pps_data.req_op_curr = port->current_limit;
port->pps_status = (type == PD_CTRL_WAIT ?
-EAGAIN : -EOPNOTSUPP);
/* Threshold was relaxed before sending Request. Restore it back. */
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
port->pps_data.active,
port->supply_voltage);
tcpm_set_state(port, SNK_READY, 0);
break;
case DR_SWAP_SEND:
port->swap_status = (type == PD_CTRL_WAIT ?
-EAGAIN : -EOPNOTSUPP);
tcpm_set_state(port, DR_SWAP_CANCEL, 0);
break;
case PR_SWAP_SEND:
port->swap_status = (type == PD_CTRL_WAIT ?
-EAGAIN : -EOPNOTSUPP);
tcpm_set_state(port, PR_SWAP_CANCEL, 0);
break;
case VCONN_SWAP_SEND:
port->swap_status = (type == PD_CTRL_WAIT ?
-EAGAIN : -EOPNOTSUPP);
tcpm_set_state(port, VCONN_SWAP_CANCEL, 0);
break;
case FR_SWAP_SEND:
tcpm_set_state(port, FR_SWAP_CANCEL, 0);
break;
case GET_SINK_CAP:
port->sink_cap_done = true;
tcpm_set_state(port, ready_state(port), 0);
break;
/*
* Some port partners do not support GET_STATUS, avoid soft reset the link to
* prevent redundant power re-negotiation
*/
case GET_STATUS_SEND:
tcpm_set_state(port, ready_state(port), 0);
break;
case SRC_READY:
case SNK_READY:
if (port->vdm_state > VDM_STATE_READY) {
port->vdm_state = VDM_STATE_DONE;
if (tcpm_vdm_ams(port))
tcpm_ams_finish(port);
mod_vdm_delayed_work(port, 0);
break;
}
fallthrough;
default:
tcpm_pd_handle_state(port,
port->pwr_role == TYPEC_SOURCE ?
SRC_SOFT_RESET_WAIT_SNK_TX :
SNK_SOFT_RESET,
NONE_AMS, 0);
break;
}
break;
case PD_CTRL_ACCEPT:
switch (port->state) {
case SNK_NEGOTIATE_CAPABILITIES:
port->pps_data.active = false;
tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
break;
case SNK_NEGOTIATE_PPS_CAPABILITIES:
port->pps_data.active = true;
port->pps_data.min_volt = port->pps_data.req_min_volt;
port->pps_data.max_volt = port->pps_data.req_max_volt;
port->pps_data.max_curr = port->pps_data.req_max_curr;
port->req_supply_voltage = port->pps_data.req_out_volt;
port->req_current_limit = port->pps_data.req_op_curr;
power_supply_changed(port->psy);
tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
break;
case SOFT_RESET_SEND:
if (port->ams == SOFT_RESET_AMS)
tcpm_ams_finish(port);
if (port->pwr_role == TYPEC_SOURCE) {
port->upcoming_state = SRC_SEND_CAPABILITIES;
tcpm_ams_start(port, POWER_NEGOTIATION);
} else {
tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
}
break;
case DR_SWAP_SEND:
tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
break;
case PR_SWAP_SEND:
tcpm_set_state(port, PR_SWAP_START, 0);
break;
case VCONN_SWAP_SEND:
tcpm_set_state(port, VCONN_SWAP_START, 0);
break;
case FR_SWAP_SEND:
tcpm_set_state(port, FR_SWAP_SNK_SRC_TRANSITION_TO_OFF, 0);
break;
default:
tcpm_pd_handle_state(port,
port->pwr_role == TYPEC_SOURCE ?
SRC_SOFT_RESET_WAIT_SNK_TX :
SNK_SOFT_RESET,
NONE_AMS, 0);
break;
}
break;
case PD_CTRL_SOFT_RESET:
port->ams = SOFT_RESET_AMS;
tcpm_set_state(port, SOFT_RESET, 0);
break;
case PD_CTRL_DR_SWAP:
/*
* XXX
* 6.3.9: If an alternate mode is active, a request to swap
* alternate modes shall trigger a port reset.
*/
if (port->typec_caps.data != TYPEC_PORT_DRD) {
tcpm_pd_handle_msg(port,
port->negotiated_rev < PD_REV30 ?
PD_MSG_CTRL_REJECT :
PD_MSG_CTRL_NOT_SUPP,
NONE_AMS);
} else {
if (port->send_discover) {
tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
break;
}
tcpm_pd_handle_state(port, DR_SWAP_ACCEPT, DATA_ROLE_SWAP, 0);
}
break;
case PD_CTRL_PR_SWAP:
if (port->port_type != TYPEC_PORT_DRP) {
tcpm_pd_handle_msg(port,
port->negotiated_rev < PD_REV30 ?
PD_MSG_CTRL_REJECT :
PD_MSG_CTRL_NOT_SUPP,
NONE_AMS);
} else {
if (port->send_discover) {
tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
break;
}
tcpm_pd_handle_state(port, PR_SWAP_ACCEPT, POWER_ROLE_SWAP, 0);
}
break;
case PD_CTRL_VCONN_SWAP:
if (port->send_discover) {
tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
break;
}
tcpm_pd_handle_state(port, VCONN_SWAP_ACCEPT, VCONN_SWAP, 0);
break;
case PD_CTRL_GET_SOURCE_CAP_EXT:
case PD_CTRL_GET_STATUS:
case PD_CTRL_FR_SWAP:
case PD_CTRL_GET_PPS_STATUS:
case PD_CTRL_GET_COUNTRY_CODES:
/* Currently not supported */
tcpm_pd_handle_msg(port,
port->negotiated_rev < PD_REV30 ?
PD_MSG_CTRL_REJECT :
PD_MSG_CTRL_NOT_SUPP,
NONE_AMS);
break;
default:
tcpm_pd_handle_msg(port,
port->negotiated_rev < PD_REV30 ?
PD_MSG_CTRL_REJECT :
PD_MSG_CTRL_NOT_SUPP,
NONE_AMS);
tcpm_log(port, "Unrecognized ctrl message type %#x", type);
break;
}
}
static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
const struct pd_message *msg)
{
enum pd_ext_msg_type type = pd_header_type_le(msg->header);
unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header);
/* stopping VDM state machine if interrupted by other Messages */
if (tcpm_vdm_ams(port)) {
port->vdm_state = VDM_STATE_ERR_BUSY;
tcpm_ams_finish(port);
mod_vdm_delayed_work(port, 0);
}
if (!(le16_to_cpu(msg->ext_msg.header) & PD_EXT_HDR_CHUNKED)) {
tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
tcpm_log(port, "Unchunked extended messages unsupported");
return;
}
if (data_size > PD_EXT_MAX_CHUNK_DATA) {
tcpm_pd_handle_state(port, CHUNK_NOT_SUPP, NONE_AMS, PD_T_CHUNK_NOT_SUPP);
tcpm_log(port, "Chunk handling not yet supported");
return;
}
switch (type) {
case PD_EXT_STATUS:
case PD_EXT_PPS_STATUS:
if (port->ams == GETTING_SOURCE_SINK_STATUS) {
tcpm_ams_finish(port);
tcpm_set_state(port, ready_state(port), 0);
} else {
/* unexpected Status or PPS_Status Message */
tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
NONE_AMS, 0);
}
break;
case PD_EXT_SOURCE_CAP_EXT:
case PD_EXT_GET_BATT_CAP:
case PD_EXT_GET_BATT_STATUS:
case PD_EXT_BATT_CAP:
case PD_EXT_GET_MANUFACTURER_INFO:
case PD_EXT_MANUFACTURER_INFO:
case PD_EXT_SECURITY_REQUEST:
case PD_EXT_SECURITY_RESPONSE:
case PD_EXT_FW_UPDATE_REQUEST:
case PD_EXT_FW_UPDATE_RESPONSE:
case PD_EXT_COUNTRY_INFO:
case PD_EXT_COUNTRY_CODES:
tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
break;
default:
tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
tcpm_log(port, "Unrecognized extended message type %#x", type);
break;
}
}
static void tcpm_pd_rx_handler(struct kthread_work *work)
{
struct pd_rx_event *event = container_of(work,
struct pd_rx_event, work);
const struct pd_message *msg = &event->msg;
unsigned int cnt = pd_header_cnt_le(msg->header);
struct tcpm_port *port = event->port;
mutex_lock(&port->lock);
tcpm_log(port, "PD RX, header: %#x [%d]", le16_to_cpu(msg->header),
port->attached);
if (port->attached) {
enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
unsigned int msgid = pd_header_msgid_le(msg->header);
/*
* USB PD standard, 6.6.1.2:
* "... if MessageID value in a received Message is the
* same as the stored value, the receiver shall return a
* GoodCRC Message with that MessageID value and drop
* the Message (this is a retry of an already received
* Message). Note: this shall not apply to the Soft_Reset
* Message which always has a MessageID value of zero."
*/
if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
goto done;
port->rx_msgid = msgid;
/*
* If both ends believe to be DFP/host, we have a data role
* mismatch.
*/
if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) ==
(port->data_role == TYPEC_HOST)) {
tcpm_log(port,
"Data role mismatch, initiating error recovery");
tcpm_set_state(port, ERROR_RECOVERY, 0);
} else {
if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR)
tcpm_pd_ext_msg_request(port, msg);
else if (cnt)
tcpm_pd_data_request(port, msg);
else
tcpm_pd_ctrl_request(port, msg);
}
}
done:
mutex_unlock(&port->lock);
kfree(event);
}
void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg)
{
struct pd_rx_event *event;
event = kzalloc(sizeof(*event), GFP_ATOMIC);
if (!event)
return;
kthread_init_work(&event->work, tcpm_pd_rx_handler);
event->port = port;
memcpy(&event->msg, msg, sizeof(*msg));
kthread_queue_work(port->wq, &event->work);
}
EXPORT_SYMBOL_GPL(tcpm_pd_receive);
static int tcpm_pd_send_control(struct tcpm_port *port,
enum pd_ctrl_msg_type type)
{
struct pd_message msg;
memset(&msg, 0, sizeof(msg));
msg.header = PD_HEADER_LE(type, port->pwr_role,
port->data_role,
port->negotiated_rev,
port->message_id, 0);
return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
}
/*
* Send queued message without affecting state.
* Return true if state machine should go back to sleep,
* false otherwise.
*/
static bool tcpm_send_queued_message(struct tcpm_port *port)
{
enum pd_msg_request queued_message;
int ret;
do {
queued_message = port->queued_message;
port->queued_message = PD_MSG_NONE;
switch (queued_message) {
case PD_MSG_CTRL_WAIT:
tcpm_pd_send_control(port, PD_CTRL_WAIT);
break;
case PD_MSG_CTRL_REJECT:
tcpm_pd_send_control(port, PD_CTRL_REJECT);
break;
case PD_MSG_CTRL_NOT_SUPP:
tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP);
break;
case PD_MSG_DATA_SINK_CAP:
ret = tcpm_pd_send_sink_caps(port);
if (ret < 0) {
tcpm_log(port, "Unable to send snk caps, ret=%d", ret);
tcpm_set_state(port, SNK_SOFT_RESET, 0);
}
tcpm_ams_finish(port);
break;
case PD_MSG_DATA_SOURCE_CAP:
ret = tcpm_pd_send_source_caps(port);
if (ret < 0) {
tcpm_log(port,
"Unable to send src caps, ret=%d",
ret);
tcpm_set_state(port, SOFT_RESET_SEND, 0);
} else if (port->pwr_role == TYPEC_SOURCE) {
tcpm_ams_finish(port);
tcpm_set_state(port, HARD_RESET_SEND,
PD_T_SENDER_RESPONSE);
} else {
tcpm_ams_finish(port);
}
break;
default:
break;
}
} while (port->queued_message != PD_MSG_NONE);
if (port->delayed_state != INVALID_STATE) {
if (ktime_after(port->delayed_runtime, ktime_get())) {
mod_tcpm_delayed_work(port, ktime_to_ms(ktime_sub(port->delayed_runtime,
ktime_get())));
return true;
}
port->delayed_state = INVALID_STATE;
}
return false;
}
static int tcpm_pd_check_request(struct tcpm_port *port)
{
u32 pdo, rdo = port->sink_request;
unsigned int max, op, pdo_max, index;
enum pd_pdo_type type;
index = rdo_index(rdo);
if (!index || index > port->nr_src_pdo)
return -EINVAL;
pdo = port->src_pdo[index - 1];
type = pdo_type(pdo);
switch (type) {
case PDO_TYPE_FIXED:
case PDO_TYPE_VAR:
max = rdo_max_current(rdo);
op = rdo_op_current(rdo);
pdo_max = pdo_max_current(pdo);
if (op > pdo_max)
return -EINVAL;
if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
return -EINVAL;
if (type == PDO_TYPE_FIXED)
tcpm_log(port,
"Requested %u mV, %u mA for %u / %u mA",
pdo_fixed_voltage(pdo), pdo_max, op, max);
else
tcpm_log(port,
"Requested %u -> %u mV, %u mA for %u / %u mA",
pdo_min_voltage(pdo), pdo_max_voltage(pdo),
pdo_max, op, max);
break;
case PDO_TYPE_BATT:
max = rdo_max_power(rdo);
op = rdo_op_power(rdo);
pdo_max = pdo_max_power(pdo);
if (op > pdo_max)
return -EINVAL;
if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
return -EINVAL;
tcpm_log(port,
"Requested %u -> %u mV, %u mW for %u / %u mW",
pdo_min_voltage(pdo), pdo_max_voltage(pdo),
pdo_max, op, max);
break;
default:
return -EINVAL;
}
port->op_vsafe5v = index == 1;
return 0;
}
#define min_power(x, y) min(pdo_max_power(x), pdo_max_power(y))
#define min_current(x, y) min(pdo_max_current(x), pdo_max_current(y))
static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
int *src_pdo)
{
unsigned int i, j, max_src_mv = 0, min_src_mv = 0, max_mw = 0,
max_mv = 0, src_mw = 0, src_ma = 0, max_snk_mv = 0,
min_snk_mv = 0;
int ret = -EINVAL;
port->pps_data.supported = false;
port->usb_type = POWER_SUPPLY_USB_TYPE_PD;
power_supply_changed(port->psy);
/*
* Select the source PDO providing the most power which has a
* matchig sink cap.
*/
for (i = 0; i < port->nr_source_caps; i++) {
u32 pdo = port->source_caps[i];
enum pd_pdo_type type = pdo_type(pdo);
switch (type) {
case PDO_TYPE_FIXED:
max_src_mv = pdo_fixed_voltage(pdo);
min_src_mv = max_src_mv;
break;
case PDO_TYPE_BATT:
case PDO_TYPE_VAR:
max_src_mv = pdo_max_voltage(pdo);
min_src_mv = pdo_min_voltage(pdo);
break;
case PDO_TYPE_APDO:
if (pdo_apdo_type(pdo) == APDO_TYPE_PPS) {
port->pps_data.supported = true;
port->usb_type =
POWER_SUPPLY_USB_TYPE_PD_PPS;
power_supply_changed(port->psy);
}
continue;
default:
tcpm_log(port, "Invalid source PDO type, ignoring");
continue;
}
switch (type) {
case PDO_TYPE_FIXED:
case PDO_TYPE_VAR:
src_ma = pdo_max_current(pdo);
src_mw = src_ma * min_src_mv / 1000;
break;
case PDO_TYPE_BATT:
src_mw = pdo_max_power(pdo);
break;
case PDO_TYPE_APDO:
continue;
default:
tcpm_log(port, "Invalid source PDO type, ignoring");
continue;
}
for (j = 0; j < port->nr_snk_pdo; j++) {
pdo = port->snk_pdo[j];
switch (pdo_type(pdo)) {
case PDO_TYPE_FIXED:
max_snk_mv = pdo_fixed_voltage(pdo);
min_snk_mv = max_snk_mv;
break;
case PDO_TYPE_BATT:
case PDO_TYPE_VAR:
max_snk_mv = pdo_max_voltage(pdo);
min_snk_mv = pdo_min_voltage(pdo);
break;
case PDO_TYPE_APDO:
continue;
default:
tcpm_log(port, "Invalid sink PDO type, ignoring");
continue;
}
if (max_src_mv <= max_snk_mv &&
min_src_mv >= min_snk_mv) {
/* Prefer higher voltages if available */
if ((src_mw == max_mw && min_src_mv > max_mv) ||
src_mw > max_mw) {
*src_pdo = i;
*sink_pdo = j;
max_mw = src_mw;
max_mv = min_src_mv;
ret = 0;
}
}
}
}
return ret;
}
static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
{
unsigned int i, src_ma, max_temp_mw = 0, max_op_ma, op_mw;
unsigned int src_pdo = 0;
u32 pdo, src;
for (i = 1; i < port->nr_source_caps; ++i) {
pdo = port->source_caps[i];
switch (pdo_type(pdo)) {
case PDO_TYPE_APDO:
if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) {
tcpm_log(port, "Not PPS APDO (source), ignoring");
continue;
}
if (port->pps_data.req_out_volt > pdo_pps_apdo_max_voltage(pdo) ||
port->pps_data.req_out_volt < pdo_pps_apdo_min_voltage(pdo))
continue;
src_ma = pdo_pps_apdo_max_current(pdo);
max_op_ma = min(src_ma, port->pps_data.req_op_curr);
op_mw = max_op_ma * port->pps_data.req_out_volt / 1000;
if (op_mw > max_temp_mw) {
src_pdo = i;
max_temp_mw = op_mw;
}
break;
default:
tcpm_log(port, "Not APDO type (source), ignoring");
continue;
}
}
if (src_pdo) {
src = port->source_caps[src_pdo];
port->pps_data.req_min_volt = pdo_pps_apdo_min_voltage(src);
port->pps_data.req_max_volt = pdo_pps_apdo_max_voltage(src);
port->pps_data.req_max_curr = pdo_pps_apdo_max_current(src);
port->pps_data.req_op_curr = min(port->pps_data.req_max_curr,
port->pps_data.req_op_curr);
}
return src_pdo;
}
static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
{
unsigned int mv, ma, mw, flags;
unsigned int max_ma, max_mw;
enum pd_pdo_type type;
u32 pdo, matching_snk_pdo;
int src_pdo_index = 0;
int snk_pdo_index = 0;
int ret;
ret = tcpm_pd_select_pdo(port, &snk_pdo_index, &src_pdo_index);
if (ret < 0)
return ret;
pdo = port->source_caps[src_pdo_index];
matching_snk_pdo = port->snk_pdo[snk_pdo_index];
type = pdo_type(pdo);
switch (type) {
case PDO_TYPE_FIXED:
mv = pdo_fixed_voltage(pdo);
break;
case PDO_TYPE_BATT:
case PDO_TYPE_VAR:
mv = pdo_min_voltage(pdo);
break;
default:
tcpm_log(port, "Invalid PDO selected!");
return -EINVAL;
}
/* Select maximum available current within the sink pdo's limit */
if (type == PDO_TYPE_BATT) {
mw = min_power(pdo, matching_snk_pdo);
ma = 1000 * mw / mv;
} else {
ma = min_current(pdo, matching_snk_pdo);
mw = ma * mv / 1000;
}
flags = RDO_USB_COMM | RDO_NO_SUSPEND;
/* Set mismatch bit if offered power is less than operating power */
max_ma = ma;
max_mw = mw;
if (mw < port->operating_snk_mw) {
flags |= RDO_CAP_MISMATCH;
if (type == PDO_TYPE_BATT &&
(pdo_max_power(matching_snk_pdo) > pdo_max_power(pdo)))
max_mw = pdo_max_power(matching_snk_pdo);
else if (pdo_max_current(matching_snk_pdo) >
pdo_max_current(pdo))
max_ma = pdo_max_current(matching_snk_pdo);
}
tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
port->cc_req, port->cc1, port->cc2, port->vbus_source,
port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
port->polarity);
if (type == PDO_TYPE_BATT) {
*rdo = RDO_BATT(src_pdo_index + 1, mw, max_mw, flags);
tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
src_pdo_index, mv, mw,
flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
} else {
*rdo = RDO_FIXED(src_pdo_index + 1, ma, max_ma, flags);
tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
src_pdo_index, mv, ma,
flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
}
port->req_current_limit = ma;
port->req_supply_voltage = mv;
return 0;
}
static int tcpm_pd_send_request(struct tcpm_port *port)
{
struct pd_message msg;
int ret;
u32 rdo;
ret = tcpm_pd_build_request(port, &rdo);
if (ret < 0)
return ret;
/*
* Relax the threshold as voltage will be adjusted after Accept Message plus tSrcTransition.
* It is safer to modify the threshold here.
*/
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
memset(&msg, 0, sizeof(msg));
msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
port->pwr_role,
port->data_role,
port->negotiated_rev,
port->message_id, 1);
msg.payload[0] = cpu_to_le32(rdo);
return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
}
static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
{
unsigned int out_mv, op_ma, op_mw, max_mv, max_ma, flags;
unsigned int src_pdo_index;
src_pdo_index = tcpm_pd_select_pps_apdo(port);
if (!src_pdo_index)
return -EOPNOTSUPP;
max_mv = port->pps_data.req_max_volt;
max_ma = port->pps_data.req_max_curr;
out_mv = port->pps_data.req_out_volt;
op_ma = port->pps_data.req_op_curr;
flags = RDO_USB_COMM | RDO_NO_SUSPEND;
op_mw = (op_ma * out_mv) / 1000;
if (op_mw < port->operating_snk_mw) {
/*
* Try raising current to meet power needs. If that's not enough
* then try upping the voltage. If that's still not enough
* then we've obviously chosen a PPS APDO which really isn't
* suitable so abandon ship.
*/
op_ma = (port->operating_snk_mw * 1000) / out_mv;
if ((port->operating_snk_mw * 1000) % out_mv)
++op_ma;
op_ma += RDO_PROG_CURR_MA_STEP - (op_ma % RDO_PROG_CURR_MA_STEP);
if (op_ma > max_ma) {
op_ma = max_ma;
out_mv = (port->operating_snk_mw * 1000) / op_ma;
if ((port->operating_snk_mw * 1000) % op_ma)
++out_mv;
out_mv += RDO_PROG_VOLT_MV_STEP -
(out_mv % RDO_PROG_VOLT_MV_STEP);
if (out_mv > max_mv) {
tcpm_log(port, "Invalid PPS APDO selected!");
return -EINVAL;
}
}
}
tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
port->cc_req, port->cc1, port->cc2, port->vbus_source,
port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
port->polarity);
*rdo = RDO_PROG(src_pdo_index + 1, out_mv, op_ma, flags);
tcpm_log(port, "Requesting APDO %d: %u mV, %u mA",
src_pdo_index, out_mv, op_ma);
port->pps_data.req_op_curr = op_ma;
port->pps_data.req_out_volt = out_mv;
return 0;
}
static int tcpm_pd_send_pps_request(struct tcpm_port *port)
{
struct pd_message msg;
int ret;
u32 rdo;
ret = tcpm_pd_build_pps_request(port, &rdo);
if (ret < 0)
return ret;
/* Relax the threshold as voltage will be adjusted right after Accept Message. */
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
memset(&msg, 0, sizeof(msg));
msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
port->pwr_role,
port->data_role,
port->negotiated_rev,
port->message_id, 1);
msg.payload[0] = cpu_to_le32(rdo);
return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
}
static int tcpm_set_vbus(struct tcpm_port *port, bool enable)
{
int ret;
if (enable && port->vbus_charge)
return -EINVAL;
tcpm_log(port, "vbus:=%d charge=%d", enable, port->vbus_charge);
ret = port->tcpc->set_vbus(port->tcpc, enable, port->vbus_charge);
if (ret < 0)
return ret;
port->vbus_source = enable;
return 0;
}
static int tcpm_set_charge(struct tcpm_port *port, bool charge)
{
int ret;
if (charge && port->vbus_source)
return -EINVAL;
if (charge != port->vbus_charge) {
tcpm_log(port, "vbus=%d charge:=%d", port->vbus_source, charge);
ret = port->tcpc->set_vbus(port->tcpc, port->vbus_source,
charge);
if (ret < 0)
return ret;
}
port->vbus_charge = charge;
power_supply_changed(port->psy);
return 0;
}
static bool tcpm_start_toggling(struct tcpm_port *port, enum typec_cc_status cc)
{
int ret;
if (!port->tcpc->start_toggling)
return false;
tcpm_log_force(port, "Start toggling");
ret = port->tcpc->start_toggling(port->tcpc, port->port_type, cc);
return ret == 0;
}
static int tcpm_init_vbus(struct tcpm_port *port)
{
int ret;
ret = port->tcpc->set_vbus(port->tcpc, false, false);
port->vbus_source = false;
port->vbus_charge = false;
return ret;
}
static int tcpm_init_vconn(struct tcpm_port *port)
{
int ret;
ret = port->tcpc->set_vconn(port->tcpc, false);
port->vconn_role = TYPEC_SINK;
return ret;
}
static void tcpm_typec_connect(struct tcpm_port *port)
{
if (!port->connected) {
/* Make sure we don't report stale identity information */
memset(&port->partner_ident, 0, sizeof(port->partner_ident));
port->partner_desc.usb_pd = port->pd_capable;
if (tcpm_port_is_debug(port))
port->partner_desc.accessory = TYPEC_ACCESSORY_DEBUG;
else if (tcpm_port_is_audio(port))
port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
else
port->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
port->partner = typec_register_partner(port->typec_port,
&port->partner_desc);
port->connected = true;
typec_partner_set_usb_power_delivery(port->partner, port->partner_pd);
}
}
static int tcpm_src_attach(struct tcpm_port *port)
{
enum typec_cc_polarity polarity =
port->cc2 == TYPEC_CC_RD ? TYPEC_POLARITY_CC2
: TYPEC_POLARITY_CC1;
int ret;
if (port->attached)
return 0;
ret = tcpm_set_polarity(port, polarity);
if (ret < 0)
return ret;
tcpm_enable_auto_vbus_discharge(port, true);
ret = tcpm_set_roles(port, true, TYPEC_SOURCE, tcpm_data_role_for_source(port));
if (ret < 0)
return ret;
if (port->pd_supported) {
ret = port->tcpc->set_pd_rx(port->tcpc, true);
if (ret < 0)
goto out_disable_mux;
}
/*
* USB Type-C specification, version 1.2,
* chapter 4.5.2.2.8.1 (Attached.SRC Requirements)
* Enable VCONN only if the non-RD port is set to RA.
*/
if ((polarity == TYPEC_POLARITY_CC1 && port->cc2 == TYPEC_CC_RA) ||
(polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) {
ret = tcpm_set_vconn(port, true);
if (ret < 0)
goto out_disable_pd;
}
ret = tcpm_set_vbus(port, true);
if (ret < 0)
goto out_disable_vconn;
port->pd_capable = false;
port->partner = NULL;
port->attached = true;
port->send_discover = true;
return 0;
out_disable_vconn:
tcpm_set_vconn(port, false);
out_disable_pd:
if (port->pd_supported)
port->tcpc->set_pd_rx(port->tcpc, false);
out_disable_mux:
tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
TYPEC_ORIENTATION_NONE);
return ret;
}
static void tcpm_typec_disconnect(struct tcpm_port *port)
{
if (port->connected) {
typec_partner_set_usb_power_delivery(port->partner, NULL);
typec_unregister_partner(port->partner);
port->partner = NULL;
port->connected = false;
}
}
static void tcpm_unregister_altmodes(struct tcpm_port *port)
{
struct pd_mode_data *modep = &port->mode_data;
int i;
for (i = 0; i < modep->altmodes; i++) {
typec_unregister_altmode(port->partner_altmode[i]);
port->partner_altmode[i] = NULL;
}
memset(modep, 0, sizeof(*modep));
}
static void tcpm_set_partner_usb_comm_capable(struct tcpm_port *port, bool capable)
{
tcpm_log(port, "Setting usb_comm capable %s", capable ? "true" : "false");
if (port->tcpc->set_partner_usb_comm_capable)
port->tcpc->set_partner_usb_comm_capable(port->tcpc, capable);
}
static void tcpm_reset_port(struct tcpm_port *port)
{
tcpm_enable_auto_vbus_discharge(port, false);
port->in_ams = false;
port->ams = NONE_AMS;
port->vdm_sm_running = false;
tcpm_unregister_altmodes(port);
tcpm_typec_disconnect(port);
port->attached = false;
port->pd_capable = false;
port->pps_data.supported = false;
tcpm_set_partner_usb_comm_capable(port, false);
/*
* First Rx ID should be 0; set this to a sentinel of -1 so that
* we can check tcpm_pd_rx_handler() if we had seen it before.
*/
port->rx_msgid = -1;
port->tcpc->set_pd_rx(port->tcpc, false);
tcpm_init_vbus(port); /* also disables charging */
tcpm_init_vconn(port);
tcpm_set_current_limit(port, 0, 0);
tcpm_set_polarity(port, TYPEC_POLARITY_CC1);
tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
TYPEC_ORIENTATION_NONE);
tcpm_set_attached_state(port, false);
port->try_src_count = 0;
port->try_snk_count = 0;
port->usb_type = POWER_SUPPLY_USB_TYPE_C;
power_supply_changed(port->psy);
port->nr_sink_caps = 0;
port->sink_cap_done = false;
if (port->tcpc->enable_frs)
port->tcpc->enable_frs(port->tcpc, false);
usb_power_delivery_unregister_capabilities(port->partner_sink_caps);
port->partner_sink_caps = NULL;
usb_power_delivery_unregister_capabilities(port->partner_source_caps);
port->partner_source_caps = NULL;
usb_power_delivery_unregister(port->partner_pd);
port->partner_pd = NULL;
}
static void tcpm_detach(struct tcpm_port *port)
{
if (tcpm_port_is_disconnected(port))
port->hard_reset_count = 0;
port->try_src_count = 0;
port->try_snk_count = 0;
if (!port->attached)
return;
if (port->tcpc->set_bist_data) {
tcpm_log(port, "disable BIST MODE TESTDATA");
port->tcpc->set_bist_data(port->tcpc, false);
}
tcpm_reset_port(port);
}
static void tcpm_src_detach(struct tcpm_port *port)
{
tcpm_detach(port);
}
static int tcpm_snk_attach(struct tcpm_port *port)
{
int ret;
if (port->attached)
return 0;
ret = tcpm_set_polarity(port, port->cc2 != TYPEC_CC_OPEN ?
TYPEC_POLARITY_CC2 : TYPEC_POLARITY_CC1);
if (ret < 0)
return ret;
tcpm_enable_auto_vbus_discharge(port, true);
ret = tcpm_set_roles(port, true, TYPEC_SINK, tcpm_data_role_for_sink(port));
if (ret < 0)
return ret;
port->pd_capable = false;
port->partner = NULL;
port->attached = true;
port->send_discover = true;
return 0;
}
static void tcpm_snk_detach(struct tcpm_port *port)
{
tcpm_detach(port);
}
static int tcpm_acc_attach(struct tcpm_port *port)
{
int ret;
if (port->attached)
return 0;
ret = tcpm_set_roles(port, true, TYPEC_SOURCE,
tcpm_data_role_for_source(port));
if (ret < 0)
return ret;
port->partner = NULL;
tcpm_typec_connect(port);
port->attached = true;
return 0;
}
static void tcpm_acc_detach(struct tcpm_port *port)
{
tcpm_detach(port);
}
static inline enum tcpm_state hard_reset_state(struct tcpm_port *port)
{
if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
return HARD_RESET_SEND;
if (port->pd_capable)
return ERROR_RECOVERY;
if (port->pwr_role == TYPEC_SOURCE)
return SRC_UNATTACHED;
if (port->state == SNK_WAIT_CAPABILITIES)
return SNK_READY;
return SNK_UNATTACHED;
}
static inline enum tcpm_state unattached_state(struct tcpm_port *port)
{
if (port->port_type == TYPEC_PORT_DRP) {
if (port->pwr_role == TYPEC_SOURCE)
return SRC_UNATTACHED;
else
return SNK_UNATTACHED;
} else if (port->port_type == TYPEC_PORT_SRC) {
return SRC_UNATTACHED;
}
return SNK_UNATTACHED;
}
static void tcpm_swap_complete(struct tcpm_port *port, int result)
{
if (port->swap_pending) {
port->swap_status = result;
port->swap_pending = false;
port->non_pd_role_swap = false;
complete(&port->swap_complete);
}
}
static enum typec_pwr_opmode tcpm_get_pwr_opmode(enum typec_cc_status cc)
{
switch (cc) {
case TYPEC_CC_RP_1_5:
return TYPEC_PWR_MODE_1_5A;
case TYPEC_CC_RP_3_0:
return TYPEC_PWR_MODE_3_0A;
case TYPEC_CC_RP_DEF:
default:
return TYPEC_PWR_MODE_USB;
}
}
static enum typec_cc_status tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode)
{
switch (opmode) {
case TYPEC_PWR_MODE_USB:
return TYPEC_CC_RP_DEF;
case TYPEC_PWR_MODE_1_5A:
return TYPEC_CC_RP_1_5;
case TYPEC_PWR_MODE_3_0A:
case TYPEC_PWR_MODE_PD:
default:
return TYPEC_CC_RP_3_0;
}
}
static void tcpm_set_initial_svdm_version(struct tcpm_port *port)
{
switch (port->negotiated_rev) {
case PD_REV30:
break;
/*
* 6.4.4.2.3 Structured VDM Version
* 2.0 states "At this time, there is only one version (1.0) defined.
* This field Shall be set to zero to indicate Version 1.0."
* 3.0 states "This field Shall be set to 01b to indicate Version 2.0."
* To ensure that we follow the Power Delivery revision we are currently
* operating on, downgrade the SVDM version to the highest one supported
* by the Power Delivery revision.
*/
case PD_REV20:
typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
break;
default:
typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
break;
}
}
static void run_state_machine(struct tcpm_port *port)
{
int ret;
enum typec_pwr_opmode opmode;
unsigned int msecs;
enum tcpm_state upcoming_state;
if (port->tcpc->check_contaminant && port->state != CHECK_CONTAMINANT)
port->potential_contaminant = ((port->enter_state == SRC_ATTACH_WAIT &&
port->state == SRC_UNATTACHED) ||
(port->enter_state == SNK_ATTACH_WAIT &&
port->state == SNK_UNATTACHED));
port->enter_state = port->state;
switch (port->state) {
case TOGGLING:
break;
case CHECK_CONTAMINANT:
port->tcpc->check_contaminant(port->tcpc);
break;
/* SRC states */
case SRC_UNATTACHED:
if (!port->non_pd_role_swap)
tcpm_swap_complete(port, -ENOTCONN);
tcpm_src_detach(port);
if (port->potential_contaminant) {
tcpm_set_state(port, CHECK_CONTAMINANT, 0);
break;
}
if (tcpm_start_toggling(port, tcpm_rp_cc(port))) {
tcpm_set_state(port, TOGGLING, 0);
break;
}
tcpm_set_cc(port, tcpm_rp_cc(port));
if (port->port_type == TYPEC_PORT_DRP)
tcpm_set_state(port, SNK_UNATTACHED, PD_T_DRP_SNK);
break;
case SRC_ATTACH_WAIT:
if (tcpm_port_is_debug(port))
tcpm_set_state(port, DEBUG_ACC_ATTACHED,
PD_T_CC_DEBOUNCE);
else if (tcpm_port_is_audio(port))
tcpm_set_state(port, AUDIO_ACC_ATTACHED,
PD_T_CC_DEBOUNCE);
else if (tcpm_port_is_source(port) && port->vbus_vsafe0v)
tcpm_set_state(port,
tcpm_try_snk(port) ? SNK_TRY
: SRC_ATTACHED,
PD_T_CC_DEBOUNCE);
break;
case SNK_TRY:
port->try_snk_count++;
/*
* Requirements:
* - Do not drive vconn or vbus
* - Terminate CC pins (both) to Rd
* Action:
* - Wait for tDRPTry (PD_T_DRP_TRY).
* Until then, ignore any state changes.
*/
tcpm_set_cc(port, TYPEC_CC_RD);
tcpm_set_state(port, SNK_TRY_WAIT, PD_T_DRP_TRY);
break;
case SNK_TRY_WAIT:
if (tcpm_port_is_sink(port)) {
tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE, 0);
} else {
tcpm_set_state(port, SRC_TRYWAIT, 0);
port->max_wait = 0;
}
break;
case SNK_TRY_WAIT_DEBOUNCE:
tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS,
PD_T_TRY_CC_DEBOUNCE);
break;
case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
if (port->vbus_present && tcpm_port_is_sink(port))
tcpm_set_state(port, SNK_ATTACHED, 0);
else
port->max_wait = 0;
break;
case SRC_TRYWAIT:
tcpm_set_cc(port, tcpm_rp_cc(port));
if (port->max_wait == 0) {
port->max_wait = jiffies +
msecs_to_jiffies(PD_T_DRP_TRY);
tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
PD_T_DRP_TRY);
} else {
if (time_is_after_jiffies(port->max_wait))
tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
jiffies_to_msecs(port->max_wait -
jiffies));
else
tcpm_set_state(port, SNK_UNATTACHED, 0);
}
break;
case SRC_TRYWAIT_DEBOUNCE:
tcpm_set_state(port, SRC_ATTACHED, PD_T_CC_DEBOUNCE);
break;
case SRC_TRYWAIT_UNATTACHED:
tcpm_set_state(port, SNK_UNATTACHED, 0);
break;
case SRC_ATTACHED:
ret = tcpm_src_attach(port);
tcpm_set_state(port, SRC_UNATTACHED,
ret < 0 ? 0 : PD_T_PS_SOURCE_ON);
break;
case SRC_STARTUP:
opmode = tcpm_get_pwr_opmode(tcpm_rp_cc(port));
typec_set_pwr_opmode(port->typec_port, opmode);
port->pwr_opmode = TYPEC_PWR_MODE_USB;
port->caps_count = 0;
port->negotiated_rev = PD_MAX_REV;
port->message_id = 0;
port->rx_msgid = -1;
port->explicit_contract = false;
/* SNK -> SRC POWER/FAST_ROLE_SWAP finished */
if (port->ams == POWER_ROLE_SWAP ||
port->ams == FAST_ROLE_SWAP)
tcpm_ams_finish(port);
if (!port->pd_supported) {
tcpm_set_state(port, SRC_READY, 0);
break;
}
port->upcoming_state = SRC_SEND_CAPABILITIES;
tcpm_ams_start(port, POWER_NEGOTIATION);
break;
case SRC_SEND_CAPABILITIES:
port->caps_count++;
if (port->caps_count > PD_N_CAPS_COUNT) {
tcpm_set_state(port, SRC_READY, 0);
break;
}
ret = tcpm_pd_send_source_caps(port);
if (ret < 0) {
tcpm_set_state(port, SRC_SEND_CAPABILITIES,
PD_T_SEND_SOURCE_CAP);
} else {
/*
* Per standard, we should clear the reset counter here.
* However, that can result in state machine hang-ups.
* Reset it only in READY state to improve stability.
*/
/* port->hard_reset_count = 0; */
port->caps_count = 0;
port->pd_capable = true;
tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
PD_T_SEND_SOURCE_CAP);
}
break;
case SRC_SEND_CAPABILITIES_TIMEOUT:
/*
* Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
*
* PD 2.0 sinks are supposed to accept src-capabilities with a
* 3.0 header and simply ignore any src PDOs which the sink does
* not understand such as PPS but some 2.0 sinks instead ignore
* the entire PD_DATA_SOURCE_CAP message, causing contract
* negotiation to fail.
*
* After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
* sending src-capabilities with a lower PD revision to
* make these broken sinks work.
*/
if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
tcpm_set_state(port, HARD_RESET_SEND, 0);
} else if (port->negotiated_rev > PD_REV20) {
port->negotiated_rev--;
port->hard_reset_count = 0;
tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
} else {
tcpm_set_state(port, hard_reset_state(port), 0);
}
break;
case SRC_NEGOTIATE_CAPABILITIES:
ret = tcpm_pd_check_request(port);
if (ret < 0) {
tcpm_pd_send_control(port, PD_CTRL_REJECT);
if (!port->explicit_contract) {
tcpm_set_state(port,
SRC_WAIT_NEW_CAPABILITIES, 0);
} else {
tcpm_set_state(port, SRC_READY, 0);
}
} else {
tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
tcpm_set_partner_usb_comm_capable(port,
!!(port->sink_request & RDO_USB_COMM));
tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
PD_T_SRC_TRANSITION);
}
break;
case SRC_TRANSITION_SUPPLY:
/* XXX: regulator_set_voltage(vbus, ...) */
tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
port->explicit_contract = true;
typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
port->pwr_opmode = TYPEC_PWR_MODE_PD;
tcpm_set_state_cond(port, SRC_READY, 0);
break;
case SRC_READY:
#if 1
port->hard_reset_count = 0;
#endif
port->try_src_count = 0;
tcpm_swap_complete(port, 0);
tcpm_typec_connect(port);
if (port->ams != NONE_AMS)
tcpm_ams_finish(port);
if (port->next_ams != NONE_AMS) {
port->ams = port->next_ams;
port->next_ams = NONE_AMS;
}
/*
* If previous AMS is interrupted, switch to the upcoming
* state.
*/
if (port->upcoming_state != INVALID_STATE) {
upcoming_state = port->upcoming_state;
port->upcoming_state = INVALID_STATE;
tcpm_set_state(port, upcoming_state, 0);
break;
}
/*
* 6.4.4.3.1 Discover Identity
* "The Discover Identity Command Shall only be sent to SOP when there is an
* Explicit Contract."
* For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
* port->explicit_contract to decide whether to send the command.
*/
if (port->explicit_contract) {
tcpm_set_initial_svdm_version(port);
mod_send_discover_delayed_work(port, 0);
} else {
port->send_discover = false;
}
/*
* 6.3.5
* Sending ping messages is not necessary if
* - the source operates at vSafe5V
* or
* - The system is not operating in PD mode
* or
* - Both partners are connected using a Type-C connector
*
* There is no actual need to send PD messages since the local
* port type-c and the spec does not clearly say whether PD is
* possible when type-c is connected to Type-A/B
*/
break;
case SRC_WAIT_NEW_CAPABILITIES:
/* Nothing to do... */
break;
/* SNK states */
case SNK_UNATTACHED:
if (!port->non_pd_role_swap)
tcpm_swap_complete(port, -ENOTCONN);
tcpm_pps_complete(port, -ENOTCONN);
tcpm_snk_detach(port);
if (port->potential_contaminant) {
tcpm_set_state(port, CHECK_CONTAMINANT, 0);
break;
}
if (tcpm_start_toggling(port, TYPEC_CC_RD)) {
tcpm_set_state(port, TOGGLING, 0);
break;
}
tcpm_set_cc(port, TYPEC_CC_RD);
if (port->port_type == TYPEC_PORT_DRP)
tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC);
break;
case SNK_ATTACH_WAIT:
if ((port->cc1 == TYPEC_CC_OPEN &&
port->cc2 != TYPEC_CC_OPEN) ||
(port->cc1 != TYPEC_CC_OPEN &&
port->cc2 == TYPEC_CC_OPEN))
tcpm_set_state(port, SNK_DEBOUNCED,
PD_T_CC_DEBOUNCE);
else if (tcpm_port_is_disconnected(port))
tcpm_set_state(port, SNK_UNATTACHED,
PD_T_PD_DEBOUNCE);
break;
case SNK_DEBOUNCED:
if (tcpm_port_is_disconnected(port))
tcpm_set_state(port, SNK_UNATTACHED,
PD_T_PD_DEBOUNCE);
else if (port->vbus_present)
tcpm_set_state(port,
tcpm_try_src(port) ? SRC_TRY
: SNK_ATTACHED,
0);
break;
case SRC_TRY:
port->try_src_count++;
tcpm_set_cc(port, tcpm_rp_cc(port));
port->max_wait = 0;
tcpm_set_state(port, SRC_TRY_WAIT, 0);
break;
case SRC_TRY_WAIT:
if (port->max_wait == 0) {
port->max_wait = jiffies +
msecs_to_jiffies(PD_T_DRP_TRY);
msecs = PD_T_DRP_TRY;
} else {
if (time_is_after_jiffies(port->max_wait))
msecs = jiffies_to_msecs(port->max_wait -
jiffies);
else
msecs = 0;
}
tcpm_set_state(port, SNK_TRYWAIT, msecs);
break;
case SRC_TRY_DEBOUNCE:
tcpm_set_state(port, SRC_ATTACHED, PD_T_PD_DEBOUNCE);
break;
case SNK_TRYWAIT:
tcpm_set_cc(port, TYPEC_CC_RD);
tcpm_set_state(port, SNK_TRYWAIT_VBUS, PD_T_CC_DEBOUNCE);
break;
case SNK_TRYWAIT_VBUS:
/*
* TCPM stays in this state indefinitely until VBUS
* is detected as long as Rp is not detected for
* more than a time period of tPDDebounce.
*/
if (port->vbus_present && tcpm_port_is_sink(port)) {
tcpm_set_state(port, SNK_ATTACHED, 0);
break;
}
if (!tcpm_port_is_sink(port))
tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
break;
case SNK_TRYWAIT_DEBOUNCE:
tcpm_set_state(port, SNK_UNATTACHED, PD_T_PD_DEBOUNCE);
break;
case SNK_ATTACHED:
ret = tcpm_snk_attach(port);
if (ret < 0)
tcpm_set_state(port, SNK_UNATTACHED, 0);
else
tcpm_set_state(port, SNK_STARTUP, 0);
break;
case SNK_STARTUP:
opmode = tcpm_get_pwr_opmode(port->polarity ?
port->cc2 : port->cc1);
typec_set_pwr_opmode(port->typec_port, opmode);
port->pwr_opmode = TYPEC_PWR_MODE_USB;
port->negotiated_rev = PD_MAX_REV;
port->message_id = 0;
port->rx_msgid = -1;
port->explicit_contract = false;
if (port->ams == POWER_ROLE_SWAP ||
port->ams == FAST_ROLE_SWAP)
/* SRC -> SNK POWER/FAST_ROLE_SWAP finished */
tcpm_ams_finish(port);
tcpm_set_state(port, SNK_DISCOVERY, 0);
break;
case SNK_DISCOVERY:
if (port->vbus_present) {
u32 current_lim = tcpm_get_current_limit(port);
if (port->slow_charger_loop && (current_lim > PD_P_SNK_STDBY_MW / 5))
current_lim = PD_P_SNK_STDBY_MW / 5;
tcpm_set_current_limit(port, current_lim, 5000);
/* Not sink vbus if operational current is 0mA */
tcpm_set_charge(port, !!pdo_max_current(port->snk_pdo[0]));
if (!port->pd_supported)
tcpm_set_state(port, SNK_READY, 0);
else
tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
break;
}
/*
* For DRP, timeouts differ. Also, handling is supposed to be
* different and much more complex (dead battery detection;
* see USB power delivery specification, section 8.3.3.6.1.5.1).
*/
tcpm_set_state(port, hard_reset_state(port),
port->port_type == TYPEC_PORT_DRP ?
PD_T_DB_DETECT : PD_T_NO_RESPONSE);
break;
case SNK_DISCOVERY_DEBOUNCE:
tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE_DONE,
PD_T_CC_DEBOUNCE);
break;
case SNK_DISCOVERY_DEBOUNCE_DONE:
if (!tcpm_port_is_disconnected(port) &&
tcpm_port_is_sink(port) &&
ktime_after(port->delayed_runtime, ktime_get())) {
tcpm_set_state(port, SNK_DISCOVERY,
ktime_to_ms(ktime_sub(port->delayed_runtime, ktime_get())));
break;
}
tcpm_set_state(port, unattached_state(port), 0);
break;
case SNK_WAIT_CAPABILITIES:
ret = port->tcpc->set_pd_rx(port->tcpc, true);
if (ret < 0) {
tcpm_set_state(port, SNK_READY, 0);
break;
}
/*
* If VBUS has never been low, and we time out waiting
* for source cap, try a soft reset first, in case we
* were already in a stable contract before this boot.
* Do this only once.
*/
if (port->vbus_never_low) {
port->vbus_never_low = false;
tcpm_set_state(port, SNK_SOFT_RESET,
PD_T_SINK_WAIT_CAP);
} else {
tcpm_set_state(port, hard_reset_state(port),
PD_T_SINK_WAIT_CAP);
}
break;
case SNK_NEGOTIATE_CAPABILITIES:
port->pd_capable = true;
tcpm_set_partner_usb_comm_capable(port,
!!(port->source_caps[0] & PDO_FIXED_USB_COMM));
port->hard_reset_count = 0;
ret = tcpm_pd_send_request(port);
if (ret < 0) {
/* Restore back to the original state */
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
port->pps_data.active,
port->supply_voltage);
/* Let the Source send capabilities again. */
tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
} else {
tcpm_set_state_cond(port, hard_reset_state(port),
PD_T_SENDER_RESPONSE);
}
break;
case SNK_NEGOTIATE_PPS_CAPABILITIES:
ret = tcpm_pd_send_pps_request(port);
if (ret < 0) {
/* Restore back to the original state */
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
port->pps_data.active,
port->supply_voltage);
port->pps_status = ret;
/*
* If this was called due to updates to sink
* capabilities, and pps is no longer valid, we should
* safely fall back to a standard PDO.
*/
if (port->update_sink_caps)
tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
else
tcpm_set_state(port, SNK_READY, 0);
} else {
tcpm_set_state_cond(port, hard_reset_state(port),
PD_T_SENDER_RESPONSE);
}
break;
case SNK_TRANSITION_SINK:
/* From the USB PD spec:
* "The Sink Shall transition to Sink Standby before a positive or
* negative voltage transition of VBUS. During Sink Standby
* the Sink Shall reduce its power draw to pSnkStdby."
*
* This is not applicable to PPS though as the port can continue
* to draw negotiated power without switching to standby.
*/
if (port->supply_voltage != port->req_supply_voltage && !port->pps_data.active &&
port->current_limit * port->supply_voltage / 1000 > PD_P_SNK_STDBY_MW) {
u32 stdby_ma = PD_P_SNK_STDBY_MW * 1000 / port->supply_voltage;
tcpm_log(port, "Setting standby current %u mV @ %u mA",
port->supply_voltage, stdby_ma);
tcpm_set_current_limit(port, stdby_ma, port->supply_voltage);
}
fallthrough;
case SNK_TRANSITION_SINK_VBUS:
tcpm_set_state(port, hard_reset_state(port),
PD_T_PS_TRANSITION);
break;
case SNK_READY:
port->try_snk_count = 0;
port->update_sink_caps = false;
if (port->explicit_contract) {
typec_set_pwr_opmode(port->typec_port,
TYPEC_PWR_MODE_PD);
port->pwr_opmode = TYPEC_PWR_MODE_PD;
}
if (!port->pd_capable && port->slow_charger_loop)
tcpm_set_current_limit(port, tcpm_get_current_limit(port), 5000);
tcpm_swap_complete(port, 0);
tcpm_typec_connect(port);
mod_enable_frs_delayed_work(port, 0);
tcpm_pps_complete(port, port->pps_status);
if (port->ams != NONE_AMS)
tcpm_ams_finish(port);
if (port->next_ams != NONE_AMS) {
port->ams = port->next_ams;
port->next_ams = NONE_AMS;
}
/*
* If previous AMS is interrupted, switch to the upcoming
* state.
*/
if (port->upcoming_state != INVALID_STATE) {
upcoming_state = port->upcoming_state;
port->upcoming_state = INVALID_STATE;
tcpm_set_state(port, upcoming_state, 0);
break;
}
/*
* 6.4.4.3.1 Discover Identity
* "The Discover Identity Command Shall only be sent to SOP when there is an
* Explicit Contract."
* For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
* port->explicit_contract.
*/
if (port->explicit_contract) {
tcpm_set_initial_svdm_version(port);
mod_send_discover_delayed_work(port, 0);
} else {
port->send_discover = false;
}
power_supply_changed(port->psy);
break;
/* Accessory states */
case ACC_UNATTACHED:
tcpm_acc_detach(port);
tcpm_set_state(port, SRC_UNATTACHED, 0);
break;
case DEBUG_ACC_ATTACHED:
case AUDIO_ACC_ATTACHED:
ret = tcpm_acc_attach(port);
if (ret < 0)
tcpm_set_state(port, ACC_UNATTACHED, 0);
break;
case AUDIO_ACC_DEBOUNCE:
tcpm_set_state(port, ACC_UNATTACHED, PD_T_CC_DEBOUNCE);
break;
/* Hard_Reset states */
case HARD_RESET_SEND:
if (port->ams != NONE_AMS)
tcpm_ams_finish(port);
/*
* State machine will be directed to HARD_RESET_START,
* thus set upcoming_state to INVALID_STATE.
*/
port->upcoming_state = INVALID_STATE;
tcpm_ams_start(port, HARD_RESET);
break;
case HARD_RESET_START:
port->sink_cap_done = false;
if (port->tcpc->enable_frs)
port->tcpc->enable_frs(port->tcpc, false);
port->hard_reset_count++;
port->tcpc->set_pd_rx(port->tcpc, false);
tcpm_unregister_altmodes(port);
port->nr_sink_caps = 0;
port->send_discover = true;
if (port->pwr_role == TYPEC_SOURCE)
tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
PD_T_PS_HARD_RESET);
else
tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0);
break;
case SRC_HARD_RESET_VBUS_OFF:
/*
* 7.1.5 Response to Hard Resets
* Hard Reset Signaling indicates a communication failure has occurred and the
* Source Shall stop driving VCONN, Shall remove Rp from the VCONN pin and Shall
* drive VBUS to vSafe0V as shown in Figure 7-9.
*/
tcpm_set_vconn(port, false);
tcpm_set_vbus(port, false);
tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE,
tcpm_data_role_for_source(port));
/*
* If tcpc fails to notify vbus off, TCPM will wait for PD_T_SAFE_0V +
* PD_T_SRC_RECOVER before turning vbus back on.
* From Table 7-12 Sequence Description for a Source Initiated Hard Reset:
* 4. Policy Engine waits tPSHardReset after sending Hard Reset Signaling and then
* tells the Device Policy Manager to instruct the power supply to perform a
* Hard Reset. The transition to vSafe0V Shall occur within tSafe0V (t2).
* 5. After tSrcRecover the Source applies power to VBUS in an attempt to
* re-establish communication with the Sink and resume USB Default Operation.
* The transition to vSafe5V Shall occur within tSrcTurnOn(t4).
*/
tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SAFE_0V + PD_T_SRC_RECOVER);
break;
case SRC_HARD_RESET_VBUS_ON:
tcpm_set_vconn(port, true);
tcpm_set_vbus(port, true);
if (port->ams == HARD_RESET)
tcpm_ams_finish(port);
if (port->pd_supported)
port->tcpc->set_pd_rx(port->tcpc, true);
tcpm_set_attached_state(port, true);
tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON);
break;
case SNK_HARD_RESET_SINK_OFF:
/* Do not discharge/disconnect during hard reseet */
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
memset(&port->pps_data, 0, sizeof(port->pps_data));
tcpm_set_vconn(port, false);
if (port->pd_capable)
tcpm_set_charge(port, false);
tcpm_set_roles(port, port->self_powered, TYPEC_SINK,
tcpm_data_role_for_sink(port));
/*
* VBUS may or may not toggle, depending on the adapter.
* If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
* directly after timeout.
*/
tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V);
break;
case SNK_HARD_RESET_WAIT_VBUS:
if (port->ams == HARD_RESET)
tcpm_ams_finish(port);
/* Assume we're disconnected if VBUS doesn't come back. */
tcpm_set_state(port, SNK_UNATTACHED,
PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON);
break;
case SNK_HARD_RESET_SINK_ON:
/* Note: There is no guarantee that VBUS is on in this state */
/*
* XXX:
* The specification suggests that dual mode ports in sink
* mode should transition to state PE_SRC_Transition_to_default.
* See USB power delivery specification chapter 8.3.3.6.1.3.
* This would mean to
* - turn off VCONN, reset power supply
* - request hardware reset
* - turn on VCONN
* - Transition to state PE_Src_Startup
* SNK only ports shall transition to state Snk_Startup
* (see chapter 8.3.3.3.8).
* Similar, dual-mode ports in source mode should transition
* to PE_SNK_Transition_to_default.
*/
if (port->pd_capable) {
tcpm_set_current_limit(port,
tcpm_get_current_limit(port),
5000);
/* Not sink vbus if operational current is 0mA */
tcpm_set_charge(port, !!pdo_max_current(port->snk_pdo[0]));
}
if (port->ams == HARD_RESET)
tcpm_ams_finish(port);
tcpm_set_attached_state(port, true);
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
tcpm_set_state(port, SNK_STARTUP, 0);
break;
/* Soft_Reset states */
case SOFT_RESET:
port->message_id = 0;
port->rx_msgid = -1;
/* remove existing capabilities */
usb_power_delivery_unregister_capabilities(port->partner_source_caps);
port->partner_source_caps = NULL;
tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
tcpm_ams_finish(port);
if (port->pwr_role == TYPEC_SOURCE) {
port->upcoming_state = SRC_SEND_CAPABILITIES;
tcpm_ams_start(port, POWER_NEGOTIATION);
} else {
tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
}
break;
case SRC_SOFT_RESET_WAIT_SNK_TX:
case SNK_SOFT_RESET:
if (port->ams != NONE_AMS)
tcpm_ams_finish(port);
port->upcoming_state = SOFT_RESET_SEND;
tcpm_ams_start(port, SOFT_RESET_AMS);
break;
case SOFT_RESET_SEND:
port->message_id = 0;
port->rx_msgid = -1;
/* remove existing capabilities */
usb_power_delivery_unregister_capabilities(port->partner_source_caps);
port->partner_source_caps = NULL;
if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
tcpm_set_state_cond(port, hard_reset_state(port), 0);
else
tcpm_set_state_cond(port, hard_reset_state(port),
PD_T_SENDER_RESPONSE);
break;
/* DR_Swap states */
case DR_SWAP_SEND:
tcpm_pd_send_control(port, PD_CTRL_DR_SWAP);
if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
port->send_discover = true;
tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
PD_T_SENDER_RESPONSE);
break;
case DR_SWAP_ACCEPT:
tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
port->send_discover = true;
tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
break;
case DR_SWAP_SEND_TIMEOUT:
tcpm_swap_complete(port, -ETIMEDOUT);
port->send_discover = false;
tcpm_ams_finish(port);
tcpm_set_state(port, ready_state(port), 0);
break;
case DR_SWAP_CHANGE_DR:
tcpm_unregister_altmodes(port);
if (port->data_role == TYPEC_HOST)
tcpm_set_roles(port, true, port->pwr_role,
TYPEC_DEVICE);
else
tcpm_set_roles(port, true, port->pwr_role,
TYPEC_HOST);
tcpm_ams_finish(port);
tcpm_set_state(port, ready_state(port), 0);
break;
case FR_SWAP_SEND:
if (tcpm_pd_send_control(port, PD_CTRL_FR_SWAP)) {
tcpm_set_state(port, ERROR_RECOVERY, 0);
break;
}
tcpm_set_state_cond(port, FR_SWAP_SEND_TIMEOUT, PD_T_SENDER_RESPONSE);
break;
case FR_SWAP_SEND_TIMEOUT:
tcpm_set_state(port, ERROR_RECOVERY, 0);
break;
case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_OFF);
break;
case FR_SWAP_SNK_SRC_NEW_SINK_READY:
if (port->vbus_source)
tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
else
tcpm_set_state(port, ERROR_RECOVERY, PD_T_RECEIVER_RESPONSE);
break;
case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
tcpm_set_pwr_role(port, TYPEC_SOURCE);
if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
tcpm_set_state(port, ERROR_RECOVERY, 0);
break;
}
tcpm_set_cc(port, tcpm_rp_cc(port));
tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
break;
/* PR_Swap states */
case PR_SWAP_ACCEPT:
tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
tcpm_set_state(port, PR_SWAP_START, 0);
break;
case PR_SWAP_SEND:
tcpm_pd_send_control(port, PD_CTRL_PR_SWAP);
tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT,
PD_T_SENDER_RESPONSE);
break;
case PR_SWAP_SEND_TIMEOUT:
tcpm_swap_complete(port, -ETIMEDOUT);
tcpm_set_state(port, ready_state(port), 0);
break;
case PR_SWAP_START:
tcpm_apply_rc(port);
if (port->pwr_role == TYPEC_SOURCE)
tcpm_set_state(port, PR_SWAP_SRC_SNK_TRANSITION_OFF,
PD_T_SRC_TRANSITION);
else
tcpm_set_state(port, PR_SWAP_SNK_SRC_SINK_OFF, 0);
break;
case PR_SWAP_SRC_SNK_TRANSITION_OFF:
/*
* Prevent vbus discharge circuit from turning on during PR_SWAP
* as this is not a disconnect.
*/
tcpm_set_vbus(port, false);
port->explicit_contract = false;
/* allow time for Vbus discharge, must be < tSrcSwapStdby */
tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF,
PD_T_SRCSWAPSTDBY);
break;
case PR_SWAP_SRC_SNK_SOURCE_OFF:
tcpm_set_cc(port, TYPEC_CC_RD);
/* allow CC debounce */
tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED,
PD_T_CC_DEBOUNCE);
break;
case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
/*
* USB-PD standard, 6.2.1.4, Port Power Role:
* "During the Power Role Swap Sequence, for the initial Source
* Port, the Port Power Role field shall be set to Sink in the
* PS_RDY Message indicating that the initial Source’s power
* supply is turned off"
*/
tcpm_set_pwr_role(port, TYPEC_SINK);
if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
tcpm_set_state(port, ERROR_RECOVERY, 0);
break;
}
tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_ON_PRS);
break;
case PR_SWAP_SRC_SNK_SINK_ON:
tcpm_enable_auto_vbus_discharge(port, true);
/* Set the vbus disconnect threshold for implicit contract */
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
tcpm_set_state(port, SNK_STARTUP, 0);
break;
case PR_SWAP_SNK_SRC_SINK_OFF:
/* will be source, remove existing capabilities */
usb_power_delivery_unregister_capabilities(port->partner_source_caps);
port->partner_source_caps = NULL;
/*
* Prevent vbus discharge circuit from turning on during PR_SWAP
* as this is not a disconnect.
*/
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB,
port->pps_data.active, 0);
tcpm_set_charge(port, false);
tcpm_set_state(port, hard_reset_state(port),
PD_T_PS_SOURCE_OFF);
break;
case PR_SWAP_SNK_SRC_SOURCE_ON:
tcpm_enable_auto_vbus_discharge(port, true);
tcpm_set_cc(port, tcpm_rp_cc(port));
tcpm_set_vbus(port, true);
/*
* allow time VBUS ramp-up, must be < tNewSrc
* Also, this window overlaps with CC debounce as well.
* So, Wait for the max of two which is PD_T_NEWSRC
*/
tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP,
PD_T_NEWSRC);
break;
case PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP:
/*
* USB PD standard, 6.2.1.4:
* "Subsequent Messages initiated by the Policy Engine,
* such as the PS_RDY Message sent to indicate that Vbus
* is ready, will have the Port Power Role field set to
* Source."
*/
tcpm_set_pwr_role(port, TYPEC_SOURCE);
tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
break;
case VCONN_SWAP_ACCEPT:
tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
tcpm_ams_finish(port);
tcpm_set_state(port, VCONN_SWAP_START, 0);
break;
case VCONN_SWAP_SEND:
tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP);
tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT,
PD_T_SENDER_RESPONSE);
break;
case VCONN_SWAP_SEND_TIMEOUT:
tcpm_swap_complete(port, -ETIMEDOUT);
tcpm_set_state(port, ready_state(port), 0);
break;
case VCONN_SWAP_START:
if (port->vconn_role == TYPEC_SOURCE)
tcpm_set_state(port, VCONN_SWAP_WAIT_FOR_VCONN, 0);
else
tcpm_set_state(port, VCONN_SWAP_TURN_ON_VCONN, 0);
break;
case VCONN_SWAP_WAIT_FOR_VCONN:
tcpm_set_state(port, hard_reset_state(port),
PD_T_VCONN_SOURCE_ON);
break;
case VCONN_SWAP_TURN_ON_VCONN:
tcpm_set_vconn(port, true);
tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
tcpm_set_state(port, ready_state(port), 0);
break;
case VCONN_SWAP_TURN_OFF_VCONN:
tcpm_set_vconn(port, false);
tcpm_set_state(port, ready_state(port), 0);
break;
case DR_SWAP_CANCEL:
case PR_SWAP_CANCEL:
case VCONN_SWAP_CANCEL:
tcpm_swap_complete(port, port->swap_status);
if (port->pwr_role == TYPEC_SOURCE)
tcpm_set_state(port, SRC_READY, 0);
else
tcpm_set_state(port, SNK_READY, 0);
break;
case FR_SWAP_CANCEL:
if (port->pwr_role == TYPEC_SOURCE)
tcpm_set_state(port, SRC_READY, 0);
else
tcpm_set_state(port, SNK_READY, 0);
break;
case BIST_RX:
switch (BDO_MODE_MASK(port->bist_request)) {
case BDO_MODE_CARRIER2:
tcpm_pd_transmit(port, TCPC_TX_BIST_MODE_2, NULL);
tcpm_set_state(port, unattached_state(port),
PD_T_BIST_CONT_MODE);
break;
case BDO_MODE_TESTDATA:
if (port->tcpc->set_bist_data) {
tcpm_log(port, "Enable BIST MODE TESTDATA");
port->tcpc->set_bist_data(port->tcpc, true);
}
break;
default:
break;
}
break;
case GET_STATUS_SEND:
tcpm_pd_send_control(port, PD_CTRL_GET_STATUS);
tcpm_set_state(port, GET_STATUS_SEND_TIMEOUT,
PD_T_SENDER_RESPONSE);
break;
case GET_STATUS_SEND_TIMEOUT:
tcpm_set_state(port, ready_state(port), 0);
break;
case GET_PPS_STATUS_SEND:
tcpm_pd_send_control(port, PD_CTRL_GET_PPS_STATUS);
tcpm_set_state(port, GET_PPS_STATUS_SEND_TIMEOUT,
PD_T_SENDER_RESPONSE);
break;
case GET_PPS_STATUS_SEND_TIMEOUT:
tcpm_set_state(port, ready_state(port), 0);
break;
case GET_SINK_CAP:
tcpm_pd_send_control(port, PD_CTRL_GET_SINK_CAP);
tcpm_set_state(port, GET_SINK_CAP_TIMEOUT, PD_T_SENDER_RESPONSE);
break;
case GET_SINK_CAP_TIMEOUT:
port->sink_cap_done = true;
tcpm_set_state(port, ready_state(port), 0);
break;
case ERROR_RECOVERY:
tcpm_swap_complete(port, -EPROTO);
tcpm_pps_complete(port, -EPROTO);
tcpm_set_state(port, PORT_RESET, 0);
break;
case PORT_RESET:
tcpm_reset_port(port);
tcpm_set_cc(port, tcpm_default_state(port) == SNK_UNATTACHED ?
TYPEC_CC_RD : tcpm_rp_cc(port));
tcpm_set_state(port, PORT_RESET_WAIT_OFF,
PD_T_ERROR_RECOVERY);
break;
case PORT_RESET_WAIT_OFF:
tcpm_set_state(port,
tcpm_default_state(port),
port->vbus_present ? PD_T_PS_SOURCE_OFF : 0);
break;
/* AMS intermediate state */
case AMS_START:
if (port->upcoming_state == INVALID_STATE) {
tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
SRC_READY : SNK_READY, 0);
break;
}
upcoming_state = port->upcoming_state;
port->upcoming_state = INVALID_STATE;
tcpm_set_state(port, upcoming_state, 0);
break;
/* Chunk state */
case CHUNK_NOT_SUPP:
tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP);
tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? SRC_READY : SNK_READY, 0);
break;
default:
WARN(1, "Unexpected port state %d\n", port->state);
break;
}
}
static void tcpm_state_machine_work(struct kthread_work *work)
{
struct tcpm_port *port = container_of(work, struct tcpm_port, state_machine);
enum tcpm_state prev_state;
mutex_lock(&port->lock);
port->state_machine_running = true;
if (port->queued_message && tcpm_send_queued_message(port))
goto done;
/* If we were queued due to a delayed state change, update it now */
if (port->delayed_state) {
tcpm_log(port, "state change %s -> %s [delayed %ld ms]",
tcpm_states[port->state],
tcpm_states[port->delayed_state], port->delay_ms);
port->prev_state = port->state;
port->state = port->delayed_state;
port->delayed_state = INVALID_STATE;
}
/*
* Continue running as long as we have (non-delayed) state changes
* to make.
*/
do {
prev_state = port->state;
run_state_machine(port);
if (port->queued_message)
tcpm_send_queued_message(port);
} while (port->state != prev_state && !port->delayed_state);
done:
port->state_machine_running = false;
mutex_unlock(&port->lock);
}
static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
enum typec_cc_status cc2)
{
enum typec_cc_status old_cc1, old_cc2;
enum tcpm_state new_state;
old_cc1 = port->cc1;
old_cc2 = port->cc2;
port->cc1 = cc1;
port->cc2 = cc2;
tcpm_log_force(port,
"CC1: %u -> %u, CC2: %u -> %u [state %s, polarity %d, %s]",
old_cc1, cc1, old_cc2, cc2, tcpm_states[port->state],
port->polarity,
tcpm_port_is_disconnected(port) ? "disconnected"
: "connected");
switch (port->state) {
case TOGGLING:
if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
tcpm_port_is_source(port))
tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
else if (tcpm_port_is_sink(port))
tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
break;
case CHECK_CONTAMINANT:
/* Wait for Toggling to be resumed */
break;
case SRC_UNATTACHED:
case ACC_UNATTACHED:
if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
tcpm_port_is_source(port))
tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
break;
case SRC_ATTACH_WAIT:
if (tcpm_port_is_disconnected(port) ||
tcpm_port_is_audio_detached(port))
tcpm_set_state(port, SRC_UNATTACHED, 0);
else if (cc1 != old_cc1 || cc2 != old_cc2)
tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
break;
case SRC_ATTACHED:
case SRC_STARTUP:
case SRC_SEND_CAPABILITIES:
case SRC_READY:
if (tcpm_port_is_disconnected(port) ||
!tcpm_port_is_source(port)) {
if (port->port_type == TYPEC_PORT_SRC)
tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
else
tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
}
break;
case SNK_UNATTACHED:
if (tcpm_port_is_sink(port))
tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
break;
case SNK_ATTACH_WAIT:
if ((port->cc1 == TYPEC_CC_OPEN &&
port->cc2 != TYPEC_CC_OPEN) ||
(port->cc1 != TYPEC_CC_OPEN &&
port->cc2 == TYPEC_CC_OPEN))
new_state = SNK_DEBOUNCED;
else if (tcpm_port_is_disconnected(port))
new_state = SNK_UNATTACHED;
else
break;
if (new_state != port->delayed_state)
tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
break;
case SNK_DEBOUNCED:
if (tcpm_port_is_disconnected(port))
new_state = SNK_UNATTACHED;
else if (port->vbus_present)
new_state = tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED;
else
new_state = SNK_UNATTACHED;
if (new_state != port->delayed_state)
tcpm_set_state(port, SNK_DEBOUNCED, 0);
break;
case SNK_READY:
/*
* EXIT condition is based primarily on vbus disconnect and CC is secondary.
* "A port that has entered into USB PD communications with the Source and
* has seen the CC voltage exceed vRd-USB may monitor the CC pin to detect
* cable disconnect in addition to monitoring VBUS.
*
* A port that is monitoring the CC voltage for disconnect (but is not in
* the process of a USB PD PR_Swap or USB PD FR_Swap) shall transition to
* Unattached.SNK within tSinkDisconnect after the CC voltage remains below
* vRd-USB for tPDDebounce."
*
* When set_auto_vbus_discharge_threshold is enabled, CC pins go
* away before vbus decays to disconnect threshold. Allow
* disconnect to be driven by vbus disconnect when auto vbus
* discharge is enabled.
*/
if (!port->auto_vbus_discharge_enabled && tcpm_port_is_disconnected(port))
tcpm_set_state(port, unattached_state(port), 0);
else if (!port->pd_capable &&
(cc1 != old_cc1 || cc2 != old_cc2))
tcpm_set_current_limit(port,
tcpm_get_current_limit(port),
5000);
break;
case AUDIO_ACC_ATTACHED:
if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
tcpm_set_state(port, AUDIO_ACC_DEBOUNCE, 0);
break;
case AUDIO_ACC_DEBOUNCE:
if (tcpm_port_is_audio(port))
tcpm_set_state(port, AUDIO_ACC_ATTACHED, 0);
break;
case DEBUG_ACC_ATTACHED:
if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
tcpm_set_state(port, ACC_UNATTACHED, 0);
break;
case SNK_TRY:
/* Do nothing, waiting for timeout */
break;
case SNK_DISCOVERY:
/* CC line is unstable, wait for debounce */
if (tcpm_port_is_disconnected(port))
tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE, 0);
break;
case SNK_DISCOVERY_DEBOUNCE:
break;
case SRC_TRYWAIT:
/* Hand over to state machine if needed */
if (!port->vbus_present && tcpm_port_is_source(port))
tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
break;
case SRC_TRYWAIT_DEBOUNCE:
if (port->vbus_present || !tcpm_port_is_source(port))
tcpm_set_state(port, SRC_TRYWAIT, 0);
break;
case SNK_TRY_WAIT_DEBOUNCE:
if (!tcpm_port_is_sink(port)) {
port->max_wait = 0;
tcpm_set_state(port, SRC_TRYWAIT, 0);
}
break;
case SRC_TRY_WAIT:
if (tcpm_port_is_source(port))
tcpm_set_state(port, SRC_TRY_DEBOUNCE, 0);
break;
case SRC_TRY_DEBOUNCE:
tcpm_set_state(port, SRC_TRY_WAIT, 0);
break;
case SNK_TRYWAIT_DEBOUNCE:
if (tcpm_port_is_sink(port))
tcpm_set_state(port, SNK_TRYWAIT_VBUS, 0);
break;
case SNK_TRYWAIT_VBUS:
if (!tcpm_port_is_sink(port))
tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
break;
case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
if (!tcpm_port_is_sink(port))
tcpm_set_state(port, SRC_TRYWAIT, PD_T_TRY_CC_DEBOUNCE);
else
tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS, 0);
break;
case SNK_TRYWAIT:
/* Do nothing, waiting for tCCDebounce */
break;
case PR_SWAP_SNK_SRC_SINK_OFF:
case PR_SWAP_SRC_SNK_TRANSITION_OFF:
case PR_SWAP_SRC_SNK_SOURCE_OFF:
case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
case PR_SWAP_SNK_SRC_SOURCE_ON:
/*
* CC state change is expected in PR_SWAP
* Ignore it.
*/
break;
case FR_SWAP_SEND:
case FR_SWAP_SEND_TIMEOUT:
case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
case FR_SWAP_SNK_SRC_NEW_SINK_READY:
case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
/* Do nothing, CC change expected */
break;
case PORT_RESET:
case PORT_RESET_WAIT_OFF:
/*
* State set back to default mode once the timer completes.
* Ignore CC changes here.
*/
break;
default:
/*
* While acting as sink and auto vbus discharge is enabled, Allow disconnect
* to be driven by vbus disconnect.
*/
if (tcpm_port_is_disconnected(port) && !(port->pwr_role == TYPEC_SINK &&
port->auto_vbus_discharge_enabled))
tcpm_set_state(port, unattached_state(port), 0);
break;
}
}
static void _tcpm_pd_vbus_on(struct tcpm_port *port)
{
tcpm_log_force(port, "VBUS on");
port->vbus_present = true;
/*
* When vbus_present is true i.e. Voltage at VBUS is greater than VSAFE5V implicitly
* states that vbus is not at VSAFE0V, hence clear the vbus_vsafe0v flag here.
*/
port->vbus_vsafe0v = false;
switch (port->state) {
case SNK_TRANSITION_SINK_VBUS:
port->explicit_contract = true;
tcpm_set_state(port, SNK_READY, 0);
break;
case SNK_DISCOVERY:
tcpm_set_state(port, SNK_DISCOVERY, 0);
break;
case SNK_DEBOUNCED:
tcpm_set_state(port, tcpm_try_src(port) ? SRC_TRY
: SNK_ATTACHED,
0);
break;
case SNK_HARD_RESET_WAIT_VBUS:
tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, 0);
break;
case SRC_ATTACHED:
tcpm_set_state(port, SRC_STARTUP, 0);
break;
case SRC_HARD_RESET_VBUS_ON:
tcpm_set_state(port, SRC_STARTUP, 0);
break;
case SNK_TRY:
/* Do nothing, waiting for timeout */
break;
case SRC_TRYWAIT:
/* Do nothing, Waiting for Rd to be detected */
break;
case SRC_TRYWAIT_DEBOUNCE:
tcpm_set_state(port, SRC_TRYWAIT, 0);
break;
case SNK_TRY_WAIT_DEBOUNCE:
/* Do nothing, waiting for PD_DEBOUNCE to do be done */
break;
case SNK_TRYWAIT:
/* Do nothing, waiting for tCCDebounce */
break;
case SNK_TRYWAIT_VBUS:
if (tcpm_port_is_sink(port))
tcpm_set_state(port, SNK_ATTACHED, 0);
break;
case SNK_TRYWAIT_DEBOUNCE:
/* Do nothing, waiting for Rp */
break;
case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
if (port->vbus_present && tcpm_port_is_sink(port))
tcpm_set_state(port, SNK_ATTACHED, 0);
break;
case SRC_TRY_WAIT:
case SRC_TRY_DEBOUNCE:
/* Do nothing, waiting for sink detection */
break;
case FR_SWAP_SEND:
case FR_SWAP_SEND_TIMEOUT:
case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
if (port->tcpc->frs_sourcing_vbus)
port->tcpc->frs_sourcing_vbus(port->tcpc);
break;
case FR_SWAP_SNK_SRC_NEW_SINK_READY:
if (port->tcpc->frs_sourcing_vbus)
port->tcpc->frs_sourcing_vbus(port->tcpc);
tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
break;
case PORT_RESET:
case PORT_RESET_WAIT_OFF:
/*
* State set back to default mode once the timer completes.
* Ignore vbus changes here.
*/
break;
default:
break;
}
}
static void _tcpm_pd_vbus_off(struct tcpm_port *port)
{
tcpm_log_force(port, "VBUS off");
port->vbus_present = false;
port->vbus_never_low = false;
switch (port->state) {
case SNK_HARD_RESET_SINK_OFF:
tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0);
break;
case HARD_RESET_SEND:
break;
case SNK_TRY:
/* Do nothing, waiting for timeout */
break;
case SRC_TRYWAIT:
/* Hand over to state machine if needed */
if (tcpm_port_is_source(port))
tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
break;
case SNK_TRY_WAIT_DEBOUNCE:
/* Do nothing, waiting for PD_DEBOUNCE to do be done */
break;
case SNK_TRYWAIT:
case SNK_TRYWAIT_VBUS:
case SNK_TRYWAIT_DEBOUNCE:
break;
case SNK_ATTACH_WAIT:
case SNK_DEBOUNCED:
/* Do nothing, as TCPM is still waiting for vbus to reaach VSAFE5V to connect */
break;
case SNK_NEGOTIATE_CAPABILITIES:
break;
case PR_SWAP_SRC_SNK_TRANSITION_OFF:
tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF, 0);
break;
case PR_SWAP_SNK_SRC_SINK_OFF:
/* Do nothing, expected */
break;
case PR_SWAP_SNK_SRC_SOURCE_ON:
/*
* Do nothing when vbus off notification is received.
* TCPM can wait for PD_T_NEWSRC in PR_SWAP_SNK_SRC_SOURCE_ON
* for the vbus source to ramp up.
*/
break;
case PORT_RESET_WAIT_OFF:
tcpm_set_state(port, tcpm_default_state(port), 0);
break;
case SRC_TRY_WAIT:
case SRC_TRY_DEBOUNCE:
/* Do nothing, waiting for sink detection */
break;
case SRC_STARTUP:
case SRC_SEND_CAPABILITIES:
case SRC_SEND_CAPABILITIES_TIMEOUT:
case SRC_NEGOTIATE_CAPABILITIES:
case SRC_TRANSITION_SUPPLY:
case SRC_READY:
case SRC_WAIT_NEW_CAPABILITIES:
/*
* Force to unattached state to re-initiate connection.
* DRP port should move to Unattached.SNK instead of Unattached.SRC if
* sink removed. Although sink removal here is due to source's vbus collapse,
* treat it the same way for consistency.
*/
if (port->port_type == TYPEC_PORT_SRC)
tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
else
tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
break;
case PORT_RESET:
/*
* State set back to default mode once the timer completes.
* Ignore vbus changes here.
*/
break;
case FR_SWAP_SEND:
case FR_SWAP_SEND_TIMEOUT:
case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
case FR_SWAP_SNK_SRC_NEW_SINK_READY:
case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
/* Do nothing, vbus drop expected */
break;
case SNK_HARD_RESET_WAIT_VBUS:
/* Do nothing, its OK to receive vbus off events */
break;
default:
if (port->pwr_role == TYPEC_SINK && port->attached)
tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
break;
}
}
static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
{
tcpm_log_force(port, "VBUS VSAFE0V");
port->vbus_vsafe0v = true;
switch (port->state) {
case SRC_HARD_RESET_VBUS_OFF:
/*
* After establishing the vSafe0V voltage condition on VBUS, the Source Shall wait
* tSrcRecover before re-applying VCONN and restoring VBUS to vSafe5V.
*/
tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
break;
case SRC_ATTACH_WAIT:
if (tcpm_port_is_source(port))
tcpm_set_state(port, tcpm_try_snk(port) ? SNK_TRY : SRC_ATTACHED,
PD_T_CC_DEBOUNCE);
break;
case SRC_STARTUP:
case SRC_SEND_CAPABILITIES:
case SRC_SEND_CAPABILITIES_TIMEOUT:
case SRC_NEGOTIATE_CAPABILITIES:
case SRC_TRANSITION_SUPPLY:
case SRC_READY:
case SRC_WAIT_NEW_CAPABILITIES:
if (port->auto_vbus_discharge_enabled) {
if (port->port_type == TYPEC_PORT_SRC)
tcpm_set_state(port, SRC_UNATTACHED, 0);
else
tcpm_set_state(port, SNK_UNATTACHED, 0);
}
break;
case PR_SWAP_SNK_SRC_SINK_OFF:
case PR_SWAP_SNK_SRC_SOURCE_ON:
/* Do nothing, vsafe0v is expected during transition */
break;
case SNK_ATTACH_WAIT:
case SNK_DEBOUNCED:
/*Do nothing, still waiting for VSAFE5V for connect */
break;
case SNK_HARD_RESET_WAIT_VBUS:
/* Do nothing, its OK to receive vbus off events */
break;
default:
if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
tcpm_set_state(port, SNK_UNATTACHED, 0);
break;
}
}
static void _tcpm_pd_hard_reset(struct tcpm_port *port)
{
tcpm_log_force(port, "Received hard reset");
if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data)
port->tcpc->set_bist_data(port->tcpc, false);
if (port->ams != NONE_AMS)
port->ams = NONE_AMS;
if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
port->ams = HARD_RESET;
/*
* If we keep receiving hard reset requests, executing the hard reset
* must have failed. Revert to error recovery if that happens.
*/
tcpm_set_state(port,
port->hard_reset_count < PD_N_HARD_RESET_COUNT ?
HARD_RESET_START : ERROR_RECOVERY,
0);
}
static void tcpm_pd_event_handler(struct kthread_work *work)
{
struct tcpm_port *port = container_of(work, struct tcpm_port,
event_work);
u32 events;
mutex_lock(&port->lock);
spin_lock(&port->pd_event_lock);
while (port->pd_events) {
events = port->pd_events;
port->pd_events = 0;
spin_unlock(&port->pd_event_lock);
if (events & TCPM_RESET_EVENT)
_tcpm_pd_hard_reset(port);
if (events & TCPM_VBUS_EVENT) {
bool vbus;
vbus = port->tcpc->get_vbus(port->tcpc);
if (vbus) {
_tcpm_pd_vbus_on(port);
} else {
_tcpm_pd_vbus_off(port);
/*
* When TCPC does not support detecting vsafe0v voltage level,
* treat vbus absent as vsafe0v. Else invoke is_vbus_vsafe0v
* to see if vbus has discharge to VSAFE0V.
*/
if (!port->tcpc->is_vbus_vsafe0v ||
port->tcpc->is_vbus_vsafe0v(port->tcpc))
_tcpm_pd_vbus_vsafe0v(port);
}
}
if (events & TCPM_CC_EVENT) {
enum typec_cc_status cc1, cc2;
if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
_tcpm_cc_change(port, cc1, cc2);
}
if (events & TCPM_FRS_EVENT) {
if (port->state == SNK_READY) {
int ret;
port->upcoming_state = FR_SWAP_SEND;
ret = tcpm_ams_start(port, FAST_ROLE_SWAP);
if (ret == -EAGAIN)
port->upcoming_state = INVALID_STATE;
} else {
tcpm_log(port, "Discarding FRS_SIGNAL! Not in sink ready");
}
}
if (events & TCPM_SOURCING_VBUS) {
tcpm_log(port, "sourcing vbus");
/*
* In fast role swap case TCPC autonomously sources vbus. Set vbus_source
* true as TCPM wouldn't have called tcpm_set_vbus.
*
* When vbus is sourced on the command on TCPM i.e. TCPM called
* tcpm_set_vbus to source vbus, vbus_source would already be true.
*/
port->vbus_source = true;
_tcpm_pd_vbus_on(port);
}
if (events & TCPM_PORT_CLEAN) {
tcpm_log(port, "port clean");
if (port->state == CHECK_CONTAMINANT) {
if (tcpm_start_toggling(port, tcpm_rp_cc(port)))
tcpm_set_state(port, TOGGLING, 0);
else
tcpm_set_state(port, tcpm_default_state(port), 0);
}
}
spin_lock(&port->pd_event_lock);
}
spin_unlock(&port->pd_event_lock);
mutex_unlock(&port->lock);
}
void tcpm_cc_change(struct tcpm_port *port)
{
spin_lock(&port->pd_event_lock);
port->pd_events |= TCPM_CC_EVENT;
spin_unlock(&port->pd_event_lock);
kthread_queue_work(port->wq, &port->event_work);
}
EXPORT_SYMBOL_GPL(tcpm_cc_change);
void tcpm_vbus_change(struct tcpm_port *port)
{
spin_lock(&port->pd_event_lock);
port->pd_events |= TCPM_VBUS_EVENT;
spin_unlock(&port->pd_event_lock);
kthread_queue_work(port->wq, &port->event_work);
}
EXPORT_SYMBOL_GPL(tcpm_vbus_change);
void tcpm_pd_hard_reset(struct tcpm_port *port)
{
spin_lock(&port->pd_event_lock);
port->pd_events = TCPM_RESET_EVENT;
spin_unlock(&port->pd_event_lock);
kthread_queue_work(port->wq, &port->event_work);
}
EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
void tcpm_sink_frs(struct tcpm_port *port)
{
spin_lock(&port->pd_event_lock);
port->pd_events |= TCPM_FRS_EVENT;
spin_unlock(&port->pd_event_lock);
kthread_queue_work(port->wq, &port->event_work);
}
EXPORT_SYMBOL_GPL(tcpm_sink_frs);
void tcpm_sourcing_vbus(struct tcpm_port *port)
{
spin_lock(&port->pd_event_lock);
port->pd_events |= TCPM_SOURCING_VBUS;
spin_unlock(&port->pd_event_lock);
kthread_queue_work(port->wq, &port->event_work);
}
EXPORT_SYMBOL_GPL(tcpm_sourcing_vbus);
void tcpm_port_clean(struct tcpm_port *port)
{
spin_lock(&port->pd_event_lock);
port->pd_events |= TCPM_PORT_CLEAN;
spin_unlock(&port->pd_event_lock);
kthread_queue_work(port->wq, &port->event_work);
}
EXPORT_SYMBOL_GPL(tcpm_port_clean);
bool tcpm_port_is_toggling(struct tcpm_port *port)
{
return port->port_type == TYPEC_PORT_DRP && port->state == TOGGLING;
}
EXPORT_SYMBOL_GPL(tcpm_port_is_toggling);
static void tcpm_enable_frs_work(struct kthread_work *work)
{
struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs);
int ret;
mutex_lock(&port->lock);
/* Not FRS capable */
if (!port->connected || port->port_type != TYPEC_PORT_DRP ||
port->pwr_opmode != TYPEC_PWR_MODE_PD ||
!port->tcpc->enable_frs ||
/* Sink caps queried */
port->sink_cap_done || port->negotiated_rev < PD_REV30)
goto unlock;
/* Send when the state machine is idle */
if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover)
goto resched;
port->upcoming_state = GET_SINK_CAP;
ret = tcpm_ams_start(port, GET_SINK_CAPABILITIES);
if (ret == -EAGAIN) {
port->upcoming_state = INVALID_STATE;
} else {
port->sink_cap_done = true;
goto unlock;
}
resched:
mod_enable_frs_delayed_work(port, GET_SINK_CAP_RETRY_MS);
unlock:
mutex_unlock(&port->lock);
}
static void tcpm_send_discover_work(struct kthread_work *work)
{
struct tcpm_port *port = container_of(work, struct tcpm_port, send_discover_work);
mutex_lock(&port->lock);
/* No need to send DISCOVER_IDENTITY anymore */
if (!port->send_discover)
goto unlock;
if (port->data_role == TYPEC_DEVICE && port->negotiated_rev < PD_REV30) {
port->send_discover = false;
goto unlock;
}
/* Retry if the port is not idle */
if ((port->state != SRC_READY && port->state != SNK_READY) || port->vdm_sm_running) {
mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
goto unlock;
}
tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
unlock:
mutex_unlock(&port->lock);
}
static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
{
struct tcpm_port *port = typec_get_drvdata(p);
int ret;
mutex_lock(&port->swap_lock);
mutex_lock(&port->lock);
if (port->typec_caps.data != TYPEC_PORT_DRD) {
ret = -EINVAL;
goto port_unlock;
}
if (port->state != SRC_READY && port->state != SNK_READY) {
ret = -EAGAIN;
goto port_unlock;
}
if (port->data_role == data) {
ret = 0;
goto port_unlock;
}
/*
* XXX
* 6.3.9: If an alternate mode is active, a request to swap
* alternate modes shall trigger a port reset.
* Reject data role swap request in this case.
*/
if (!port->pd_capable) {
/*
* If the partner is not PD capable, reset the port to
* trigger a role change. This can only work if a preferred
* role is configured, and if it matches the requested role.
*/
if (port->try_role == TYPEC_NO_PREFERRED_ROLE ||
port->try_role == port->pwr_role) {
ret = -EINVAL;
goto port_unlock;
}
port->non_pd_role_swap = true;
tcpm_set_state(port, PORT_RESET, 0);
} else {
port->upcoming_state = DR_SWAP_SEND;
ret = tcpm_ams_start(port, DATA_ROLE_SWAP);
if (ret == -EAGAIN) {
port->upcoming_state = INVALID_STATE;
goto port_unlock;
}
}
port->swap_status = 0;
port->swap_pending = true;
reinit_completion(&port->swap_complete);
mutex_unlock(&port->lock);
if (!wait_for_completion_timeout(&port->swap_complete,
msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
ret = -ETIMEDOUT;
else
ret = port->swap_status;
port->non_pd_role_swap = false;
goto swap_unlock;
port_unlock:
mutex_unlock(&port->lock);
swap_unlock:
mutex_unlock(&port->swap_lock);
return ret;
}
static int tcpm_pr_set(struct typec_port *p, enum typec_role role)
{
struct tcpm_port *port = typec_get_drvdata(p);
int ret;
mutex_lock(&port->swap_lock);
mutex_lock(&port->lock);
if (port->port_type != TYPEC_PORT_DRP) {
ret = -EINVAL;
goto port_unlock;
}
if (port->state != SRC_READY && port->state != SNK_READY) {
ret = -EAGAIN;
goto port_unlock;
}
if (role == port->pwr_role) {
ret = 0;
goto port_unlock;
}
port->upcoming_state = PR_SWAP_SEND;
ret = tcpm_ams_start(port, POWER_ROLE_SWAP);
if (ret == -EAGAIN) {
port->upcoming_state = INVALID_STATE;
goto port_unlock;
}
port->swap_status = 0;
port->swap_pending = true;
reinit_completion(&port->swap_complete);
mutex_unlock(&port->lock);
if (!wait_for_completion_timeout(&port->swap_complete,
msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
ret = -ETIMEDOUT;
else
ret = port->swap_status;
goto swap_unlock;
port_unlock:
mutex_unlock(&port->lock);
swap_unlock:
mutex_unlock(&port->swap_lock);
return ret;
}
static int tcpm_vconn_set(struct typec_port *p, enum typec_role role)
{
struct tcpm_port *port = typec_get_drvdata(p);
int ret;
mutex_lock(&port->swap_lock);
mutex_lock(&port->lock);
if (port->state != SRC_READY && port->state != SNK_READY) {
ret = -EAGAIN;
goto port_unlock;
}
if (role == port->vconn_role) {
ret = 0;
goto port_unlock;
}
port->upcoming_state = VCONN_SWAP_SEND;
ret = tcpm_ams_start(port, VCONN_SWAP);
if (ret == -EAGAIN) {
port->upcoming_state = INVALID_STATE;
goto port_unlock;
}
port->swap_status = 0;
port->swap_pending = true;
reinit_completion(&port->swap_complete);
mutex_unlock(&port->lock);
if (!wait_for_completion_timeout(&port->swap_complete,
msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
ret = -ETIMEDOUT;
else
ret = port->swap_status;
goto swap_unlock;
port_unlock:
mutex_unlock(&port->lock);
swap_unlock:
mutex_unlock(&port->swap_lock);
return ret;
}
static int tcpm_try_role(struct typec_port *p, int role)
{
struct tcpm_port *port = typec_get_drvdata(p);
struct tcpc_dev *tcpc = port->tcpc;
int ret = 0;
mutex_lock(&port->lock);
if (tcpc->try_role)
ret = tcpc->try_role(tcpc, role);
if (!ret)
port->try_role = role;
port->try_src_count = 0;
port->try_snk_count = 0;
mutex_unlock(&port->lock);
return ret;
}
static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 req_op_curr)
{
unsigned int target_mw;
int ret;
mutex_lock(&port->swap_lock);
mutex_lock(&port->lock);
if (!port->pps_data.active) {
ret = -EOPNOTSUPP;
goto port_unlock;
}
if (port->state != SNK_READY) {
ret = -EAGAIN;
goto port_unlock;
}
if (req_op_curr > port->pps_data.max_curr) {
ret = -EINVAL;
goto port_unlock;
}
target_mw = (req_op_curr * port->supply_voltage) / 1000;
if (target_mw < port->operating_snk_mw) {
ret = -EINVAL;
goto port_unlock;
}
port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
ret = tcpm_ams_start(port, POWER_NEGOTIATION);
if (ret == -EAGAIN) {
port->upcoming_state = INVALID_STATE;
goto port_unlock;
}
/* Round down operating current to align with PPS valid steps */
req_op_curr = req_op_curr - (req_op_curr % RDO_PROG_CURR_MA_STEP);
reinit_completion(&port->pps_complete);
port->pps_data.req_op_curr = req_op_curr;
port->pps_status = 0;
port->pps_pending = true;
mutex_unlock(&port->lock);
if (!wait_for_completion_timeout(&port->pps_complete,
msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
ret = -ETIMEDOUT;
else
ret = port->pps_status;
goto swap_unlock;
port_unlock:
mutex_unlock(&port->lock);
swap_unlock:
mutex_unlock(&port->swap_lock);
return ret;
}
static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt)
{
unsigned int target_mw;
int ret;
mutex_lock(&port->swap_lock);
mutex_lock(&port->lock);
if (!port->pps_data.active) {
ret = -EOPNOTSUPP;
goto port_unlock;
}
if (port->state != SNK_READY) {
ret = -EAGAIN;
goto port_unlock;
}
target_mw = (port->current_limit * req_out_volt) / 1000;
if (target_mw < port->operating_snk_mw) {
ret = -EINVAL;
goto port_unlock;
}
port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
ret = tcpm_ams_start(port, POWER_NEGOTIATION);
if (ret == -EAGAIN) {
port->upcoming_state = INVALID_STATE;
goto port_unlock;
}
/* Round down output voltage to align with PPS valid steps */
req_out_volt = req_out_volt - (req_out_volt % RDO_PROG_VOLT_MV_STEP);
reinit_completion(&port->pps_complete);
port->pps_data.req_out_volt = req_out_volt;
port->pps_status = 0;
port->pps_pending = true;
mutex_unlock(&port->lock);
if (!wait_for_completion_timeout(&port->pps_complete,
msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
ret = -ETIMEDOUT;
else
ret = port->pps_status;
goto swap_unlock;
port_unlock:
mutex_unlock(&port->lock);
swap_unlock:
mutex_unlock(&port->swap_lock);
return ret;
}
static int tcpm_pps_activate(struct tcpm_port *port, bool activate)
{
int ret = 0;
mutex_lock(&port->swap_lock);
mutex_lock(&port->lock);
if (!port->pps_data.supported) {
ret = -EOPNOTSUPP;
goto port_unlock;
}
/* Trying to deactivate PPS when already deactivated so just bail */
if (!port->pps_data.active && !activate)
goto port_unlock;
if (port->state != SNK_READY) {
ret = -EAGAIN;
goto port_unlock;
}
if (activate)
port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
else
port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
ret = tcpm_ams_start(port, POWER_NEGOTIATION);
if (ret == -EAGAIN) {
port->upcoming_state = INVALID_STATE;
goto port_unlock;
}
reinit_completion(&port->pps_complete);
port->pps_status = 0;
port->pps_pending = true;
/* Trigger PPS request or move back to standard PDO contract */
if (activate) {
port->pps_data.req_out_volt = port->supply_voltage;
port->pps_data.req_op_curr = port->current_limit;
}
mutex_unlock(&port->lock);
if (!wait_for_completion_timeout(&port->pps_complete,
msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
ret = -ETIMEDOUT;
else
ret = port->pps_status;
goto swap_unlock;
port_unlock:
mutex_unlock(&port->lock);
swap_unlock:
mutex_unlock(&port->swap_lock);
return ret;
}
static void tcpm_init(struct tcpm_port *port)
{
enum typec_cc_status cc1, cc2;
port->tcpc->init(port->tcpc);
tcpm_reset_port(port);
/*
* XXX
* Should possibly wait for VBUS to settle if it was enabled locally
* since tcpm_reset_port() will disable VBUS.
*/
port->vbus_present = port->tcpc->get_vbus(port->tcpc);
if (port->vbus_present)
port->vbus_never_low = true;
/*
* 1. When vbus_present is true, voltage on VBUS is already at VSAFE5V.
* So implicitly vbus_vsafe0v = false.
*
* 2. When vbus_present is false and TCPC does NOT support querying
* vsafe0v status, then, it's best to assume vbus is at VSAFE0V i.e.
* vbus_vsafe0v is true.
*
* 3. When vbus_present is false and TCPC does support querying vsafe0v,
* then, query tcpc for vsafe0v status.
*/
if (port->vbus_present)
port->vbus_vsafe0v = false;
else if (!port->tcpc->is_vbus_vsafe0v)
port->vbus_vsafe0v = true;
else
port->vbus_vsafe0v = port->tcpc->is_vbus_vsafe0v(port->tcpc);
tcpm_set_state(port, tcpm_default_state(port), 0);
if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
_tcpm_cc_change(port, cc1, cc2);
/*
* Some adapters need a clean slate at startup, and won't recover
* otherwise. So do not try to be fancy and force a clean disconnect.
*/
tcpm_set_state(port, PORT_RESET, 0);
}
static int tcpm_port_type_set(struct typec_port *p, enum typec_port_type type)
{
struct tcpm_port *port = typec_get_drvdata(p);
mutex_lock(&port->lock);
if (type == port->port_type)
goto port_unlock;
port->port_type = type;
if (!port->connected) {
tcpm_set_state(port, PORT_RESET, 0);
} else if (type == TYPEC_PORT_SNK) {
if (!(port->pwr_role == TYPEC_SINK &&
port->data_role == TYPEC_DEVICE))
tcpm_set_state(port, PORT_RESET, 0);
} else if (type == TYPEC_PORT_SRC) {
if (!(port->pwr_role == TYPEC_SOURCE &&
port->data_role == TYPEC_HOST))
tcpm_set_state(port, PORT_RESET, 0);
}
port_unlock:
mutex_unlock(&port->lock);
return 0;
}
static const struct typec_operations tcpm_ops = {
.try_role = tcpm_try_role,
.dr_set = tcpm_dr_set,
.pr_set = tcpm_pr_set,
.vconn_set = tcpm_vconn_set,
.port_type_set = tcpm_port_type_set
};
void tcpm_tcpc_reset(struct tcpm_port *port)
{
mutex_lock(&port->lock);
/* XXX: Maintain PD connection if possible? */
tcpm_init(port);
mutex_unlock(&port->lock);
}
EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
static void tcpm_port_unregister_pd(struct tcpm_port *port)
{
usb_power_delivery_unregister_capabilities(port->port_sink_caps);
port->port_sink_caps = NULL;
usb_power_delivery_unregister_capabilities(port->port_source_caps);
port->port_source_caps = NULL;
usb_power_delivery_unregister(port->pd);
port->pd = NULL;
}
static int tcpm_port_register_pd(struct tcpm_port *port)
{
struct usb_power_delivery_desc desc = { port->typec_caps.pd_revision };
struct usb_power_delivery_capabilities_desc caps = { };
struct usb_power_delivery_capabilities *cap;
int ret;
if (!port->nr_src_pdo && !port->nr_snk_pdo)
return 0;
port->pd = usb_power_delivery_register(port->dev, &desc);
if (IS_ERR(port->pd)) {
ret = PTR_ERR(port->pd);
goto err_unregister;
}
if (port->nr_src_pdo) {
memcpy_and_pad(caps.pdo, sizeof(caps.pdo), port->src_pdo,
port->nr_src_pdo * sizeof(u32), 0);
caps.role = TYPEC_SOURCE;
cap = usb_power_delivery_register_capabilities(port->pd, &caps);
if (IS_ERR(cap)) {
ret = PTR_ERR(cap);
goto err_unregister;
}
port->port_source_caps = cap;
}
if (port->nr_snk_pdo) {
memcpy_and_pad(caps.pdo, sizeof(caps.pdo), port->snk_pdo,
port->nr_snk_pdo * sizeof(u32), 0);
caps.role = TYPEC_SINK;
cap = usb_power_delivery_register_capabilities(port->pd, &caps);
if (IS_ERR(cap)) {
ret = PTR_ERR(cap);
goto err_unregister;
}
port->port_sink_caps = cap;
}
return 0;
err_unregister:
tcpm_port_unregister_pd(port);
return ret;
}
static int tcpm_fw_get_caps(struct tcpm_port *port,
struct fwnode_handle *fwnode)
{
const char *opmode_str;
int ret;
u32 mw, frs_current;
if (!fwnode)
return -EINVAL;
/*
* This fwnode has a "compatible" property, but is never populated as a
* struct device. Instead we simply parse it to read the properties.
* This it breaks fw_devlink=on. To maintain backward compatibility
* with existing DT files, we work around this by deleting any
* fwnode_links to/from this fwnode.
*/
fw_devlink_purge_absent_suppliers(fwnode);
ret = typec_get_fw_cap(&port->typec_caps, fwnode);
if (ret < 0)
return ret;
port->port_type = port->typec_caps.type;
port->pd_supported = !fwnode_property_read_bool(fwnode, "pd-disable");
port->slow_charger_loop = fwnode_property_read_bool(fwnode, "slow-charger-loop");
if (port->port_type == TYPEC_PORT_SNK)
goto sink;
/* Get Source PDOs for the PD port or Source Rp value for the non-PD port */
if (port->pd_supported) {
ret = fwnode_property_count_u32(fwnode, "source-pdos");
if (ret == 0)
return -EINVAL;
else if (ret < 0)
return ret;
port->nr_src_pdo = min(ret, PDO_MAX_OBJECTS);
ret = fwnode_property_read_u32_array(fwnode, "source-pdos",
port->src_pdo, port->nr_src_pdo);
if (ret)
return ret;
ret = tcpm_validate_caps(port, port->src_pdo, port->nr_src_pdo);
if (ret)
return ret;
} else {
ret = fwnode_property_read_string(fwnode, "typec-power-opmode", &opmode_str);
if (ret)
return ret;
ret = typec_find_pwr_opmode(opmode_str);
if (ret < 0)
return ret;
port->src_rp = tcpm_pwr_opmode_to_rp(ret);
}
if (port->port_type == TYPEC_PORT_SRC)
return 0;
sink:
port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
if (!port->pd_supported)
return 0;
/* Get sink pdos */
ret = fwnode_property_count_u32(fwnode, "sink-pdos");
if (ret <= 0)
return -EINVAL;
port->nr_snk_pdo = min(ret, PDO_MAX_OBJECTS);
ret = fwnode_property_read_u32_array(fwnode, "sink-pdos",
port->snk_pdo, port->nr_snk_pdo);
if ((ret < 0) || tcpm_validate_caps(port, port->snk_pdo,
port->nr_snk_pdo))
return -EINVAL;
if (fwnode_property_read_u32(fwnode, "op-sink-microwatt", &mw) < 0)
return -EINVAL;
port->operating_snk_mw = mw / 1000;
/* FRS can only be supported by DRP ports */
if (port->port_type == TYPEC_PORT_DRP) {
ret = fwnode_property_read_u32(fwnode, "new-source-frs-typec-current",
&frs_current);
if (ret >= 0 && frs_current <= FRS_5V_3A)
port->new_source_frs_current = frs_current;
}
/* sink-vdos is optional */
ret = fwnode_property_count_u32(fwnode, "sink-vdos");
if (ret < 0)
ret = 0;
port->nr_snk_vdo = min(ret, VDO_MAX_OBJECTS);
if (port->nr_snk_vdo) {
ret = fwnode_property_read_u32_array(fwnode, "sink-vdos",
port->snk_vdo,
port->nr_snk_vdo);
if (ret < 0)
return ret;
}
/* If sink-vdos is found, sink-vdos-v1 is expected for backward compatibility. */
if (port->nr_snk_vdo) {
ret = fwnode_property_count_u32(fwnode, "sink-vdos-v1");
if (ret < 0)
return ret;
else if (ret == 0)
return -ENODATA;
port->nr_snk_vdo_v1 = min(ret, VDO_MAX_OBJECTS);
ret = fwnode_property_read_u32_array(fwnode, "sink-vdos-v1",
port->snk_vdo_v1,
port->nr_snk_vdo_v1);
if (ret < 0)
return ret;
}
return 0;
}
/* Power Supply access to expose source power information */
enum tcpm_psy_online_states {
TCPM_PSY_OFFLINE = 0,
TCPM_PSY_FIXED_ONLINE,
TCPM_PSY_PROG_ONLINE,
};
static enum power_supply_property tcpm_psy_props[] = {
POWER_SUPPLY_PROP_USB_TYPE,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_VOLTAGE_MIN,
POWER_SUPPLY_PROP_VOLTAGE_MAX,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_MAX,
POWER_SUPPLY_PROP_CURRENT_NOW,
};
static int tcpm_psy_get_online(struct tcpm_port *port,
union power_supply_propval *val)
{
if (port->vbus_charge) {
if (port->pps_data.active)
val->intval = TCPM_PSY_PROG_ONLINE;
else
val->intval = TCPM_PSY_FIXED_ONLINE;
} else {
val->intval = TCPM_PSY_OFFLINE;
}
return 0;
}
static int tcpm_psy_get_voltage_min(struct tcpm_port *port,
union power_supply_propval *val)
{
if (port->pps_data.active)
val->intval = port->pps_data.min_volt * 1000;
else
val->intval = port->supply_voltage * 1000;
return 0;
}
static int tcpm_psy_get_voltage_max(struct tcpm_port *port,
union power_supply_propval *val)
{
if (port->pps_data.active)
val->intval = port->pps_data.max_volt * 1000;
else
val->intval = port->supply_voltage * 1000;
return 0;
}
static int tcpm_psy_get_voltage_now(struct tcpm_port *port,
union power_supply_propval *val)
{
val->intval = port->supply_voltage * 1000;
return 0;
}
static int tcpm_psy_get_current_max(struct tcpm_port *port,
union power_supply_propval *val)
{
if (port->pps_data.active)
val->intval = port->pps_data.max_curr * 1000;
else
val->intval = port->current_limit * 1000;
return 0;
}
static int tcpm_psy_get_current_now(struct tcpm_port *port,
union power_supply_propval *val)
{
val->intval = port->current_limit * 1000;
return 0;
}
static int tcpm_psy_get_input_power_limit(struct tcpm_port *port,
union power_supply_propval *val)
{
unsigned int src_mv, src_ma, max_src_uw = 0;
unsigned int i, tmp;
for (i = 0; i < port->nr_source_caps; i++) {
u32 pdo = port->source_caps[i];
if (pdo_type(pdo) == PDO_TYPE_FIXED) {
src_mv = pdo_fixed_voltage(pdo);
src_ma = pdo_max_current(pdo);
tmp = src_mv * src_ma;
max_src_uw = tmp > max_src_uw ? tmp : max_src_uw;
}
}
val->intval = max_src_uw;
return 0;
}
static int tcpm_psy_get_prop(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct tcpm_port *port = power_supply_get_drvdata(psy);
int ret = 0;
switch (psp) {
case POWER_SUPPLY_PROP_USB_TYPE:
val->intval = port->usb_type;
break;
case POWER_SUPPLY_PROP_ONLINE:
ret = tcpm_psy_get_online(port, val);
break;
case POWER_SUPPLY_PROP_VOLTAGE_MIN:
ret = tcpm_psy_get_voltage_min(port, val);
break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
ret = tcpm_psy_get_voltage_max(port, val);
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
ret = tcpm_psy_get_voltage_now(port, val);
break;
case POWER_SUPPLY_PROP_CURRENT_MAX:
ret = tcpm_psy_get_current_max(port, val);
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
ret = tcpm_psy_get_current_now(port, val);
break;
case POWER_SUPPLY_PROP_INPUT_POWER_LIMIT:
tcpm_psy_get_input_power_limit(port, val);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int tcpm_psy_set_online(struct tcpm_port *port,
const union power_supply_propval *val)
{
int ret;
switch (val->intval) {
case TCPM_PSY_FIXED_ONLINE:
ret = tcpm_pps_activate(port, false);
break;
case TCPM_PSY_PROG_ONLINE:
ret = tcpm_pps_activate(port, true);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int tcpm_psy_set_prop(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val)
{
struct tcpm_port *port = power_supply_get_drvdata(psy);
int ret;
/*
* All the properties below are related to USB PD. The check needs to be
* property specific when a non-pd related property is added.
*/
if (!port->pd_supported)
return -EOPNOTSUPP;
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
ret = tcpm_psy_set_online(port, val);
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
ret = tcpm_pps_set_out_volt(port, val->intval / 1000);
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
if (val->intval > port->pps_data.max_curr * 1000)
ret = -EINVAL;
else
ret = tcpm_pps_set_op_curr(port, val->intval / 1000);
break;
default:
ret = -EINVAL;
break;
}
power_supply_changed(port->psy);
return ret;
}
static int tcpm_psy_prop_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
case POWER_SUPPLY_PROP_CURRENT_NOW:
return 1;
default:
return 0;
}
}
static enum power_supply_usb_type tcpm_psy_usb_types[] = {
POWER_SUPPLY_USB_TYPE_C,
POWER_SUPPLY_USB_TYPE_PD,
POWER_SUPPLY_USB_TYPE_PD_PPS,
};
static const char *tcpm_psy_name_prefix = "tcpm-source-psy-";
static int devm_tcpm_psy_register(struct tcpm_port *port)
{
struct power_supply_config psy_cfg = {};
const char *port_dev_name = dev_name(port->dev);
size_t psy_name_len = strlen(tcpm_psy_name_prefix) +
strlen(port_dev_name) + 1;
char *psy_name;
psy_cfg.drv_data = port;
psy_cfg.fwnode = dev_fwnode(port->dev);
psy_name = devm_kzalloc(port->dev, psy_name_len, GFP_KERNEL);
if (!psy_name)
return -ENOMEM;
snprintf(psy_name, psy_name_len, "%s%s", tcpm_psy_name_prefix,
port_dev_name);
port->psy_desc.name = psy_name;
port->psy_desc.type = POWER_SUPPLY_TYPE_USB;
port->psy_desc.usb_types = tcpm_psy_usb_types;
port->psy_desc.num_usb_types = ARRAY_SIZE(tcpm_psy_usb_types);
port->psy_desc.properties = tcpm_psy_props;
port->psy_desc.num_properties = ARRAY_SIZE(tcpm_psy_props);
port->psy_desc.get_property = tcpm_psy_get_prop;
port->psy_desc.set_property = tcpm_psy_set_prop;
port->psy_desc.property_is_writeable = tcpm_psy_prop_writeable;
port->usb_type = POWER_SUPPLY_USB_TYPE_C;
port->psy = devm_power_supply_register(port->dev, &port->psy_desc,
&psy_cfg);
return PTR_ERR_OR_ZERO(port->psy);
}
static enum hrtimer_restart state_machine_timer_handler(struct hrtimer *timer)
{
struct tcpm_port *port = container_of(timer, struct tcpm_port, state_machine_timer);
if (port->registered)
kthread_queue_work(port->wq, &port->state_machine);
return HRTIMER_NORESTART;
}
static enum hrtimer_restart vdm_state_machine_timer_handler(struct hrtimer *timer)
{
struct tcpm_port *port = container_of(timer, struct tcpm_port, vdm_state_machine_timer);
if (port->registered)
kthread_queue_work(port->wq, &port->vdm_state_machine);
return HRTIMER_NORESTART;
}
static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer)
{
struct tcpm_port *port = container_of(timer, struct tcpm_port, enable_frs_timer);
if (port->registered)
kthread_queue_work(port->wq, &port->enable_frs);
return HRTIMER_NORESTART;
}
static enum hrtimer_restart send_discover_timer_handler(struct hrtimer *timer)
{
struct tcpm_port *port = container_of(timer, struct tcpm_port, send_discover_timer);
if (port->registered)
kthread_queue_work(port->wq, &port->send_discover_work);
return HRTIMER_NORESTART;
}
struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
{
struct tcpm_port *port;
int err;
if (!dev || !tcpc ||
!tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
!tcpc->set_polarity || !tcpc->set_vconn || !tcpc->set_vbus ||
!tcpc->set_pd_rx || !tcpc->set_roles || !tcpc->pd_transmit)
return ERR_PTR(-EINVAL);
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
if (!port)
return ERR_PTR(-ENOMEM);
port->dev = dev;
port->tcpc = tcpc;
mutex_init(&port->lock);
mutex_init(&port->swap_lock);
port->wq = kthread_create_worker(0, dev_name(dev));
if (IS_ERR(port->wq))
return ERR_CAST(port->wq);
sched_set_fifo(port->wq->task);
kthread_init_work(&port->state_machine, tcpm_state_machine_work);
kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work);
kthread_init_work(&port->event_work, tcpm_pd_event_handler);
kthread_init_work(&port->enable_frs, tcpm_enable_frs_work);
kthread_init_work(&port->send_discover_work, tcpm_send_discover_work);
hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
port->state_machine_timer.function = state_machine_timer_handler;
hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler;
hrtimer_init(&port->enable_frs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
port->enable_frs_timer.function = enable_frs_timer_handler;
hrtimer_init(&port->send_discover_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
port->send_discover_timer.function = send_discover_timer_handler;
spin_lock_init(&port->pd_event_lock);
init_completion(&port->tx_complete);
init_completion(&port->swap_complete);
init_completion(&port->pps_complete);
tcpm_debugfs_init(port);
err = tcpm_fw_get_caps(port, tcpc->fwnode);
if (err < 0)
goto out_destroy_wq;
port->try_role = port->typec_caps.prefer_role;
port->typec_caps.fwnode = tcpc->fwnode;
port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */
port->typec_caps.svdm_version = SVDM_VER_2_0;
port->typec_caps.driver_data = port;
port->typec_caps.ops = &tcpm_ops;
port->typec_caps.orientation_aware = 1;
port->partner_desc.identity = &port->partner_ident;
port->port_type = port->typec_caps.type;
port->role_sw = usb_role_switch_get(port->dev);
if (!port->role_sw)
port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode);
if (IS_ERR(port->role_sw)) {
err = PTR_ERR(port->role_sw);
goto out_destroy_wq;
}
err = devm_tcpm_psy_register(port);
if (err)
goto out_role_sw_put;
power_supply_changed(port->psy);
err = tcpm_port_register_pd(port);
if (err)
goto out_role_sw_put;
port->typec_caps.pd = port->pd;
port->typec_port = typec_register_port(port->dev, &port->typec_caps);
if (IS_ERR(port->typec_port)) {
err = PTR_ERR(port->typec_port);
goto out_unregister_pd;
}
typec_port_register_altmodes(port->typec_port,
&tcpm_altmode_ops, port,
port->port_altmode, ALTMODE_DISCOVERY_MAX);
port->registered = true;
mutex_lock(&port->lock);
tcpm_init(port);
mutex_unlock(&port->lock);
tcpm_log(port, "%s: registered", dev_name(dev));
return port;
out_unregister_pd:
tcpm_port_unregister_pd(port);
out_role_sw_put:
usb_role_switch_put(port->role_sw);
out_destroy_wq:
tcpm_debugfs_exit(port);
kthread_destroy_worker(port->wq);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(tcpm_register_port);
void tcpm_unregister_port(struct tcpm_port *port)
{
int i;
port->registered = false;
kthread_destroy_worker(port->wq);
hrtimer_cancel(&port->send_discover_timer);
hrtimer_cancel(&port->enable_frs_timer);
hrtimer_cancel(&port->vdm_state_machine_timer);
hrtimer_cancel(&port->state_machine_timer);
tcpm_reset_port(port);
tcpm_port_unregister_pd(port);
for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
typec_unregister_altmode(port->port_altmode[i]);
typec_unregister_port(port->typec_port);
usb_role_switch_put(port->role_sw);
tcpm_debugfs_exit(port);
}
EXPORT_SYMBOL_GPL(tcpm_unregister_port);
MODULE_AUTHOR("Guenter Roeck <[email protected]>");
MODULE_DESCRIPTION("USB Type-C Port Manager");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/typec/tcpm/tcpm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2023, Linaro Ltd. All rights reserved.
*/
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/usb/pd.h>
#include <linux/usb/tcpm.h>
#include "qcom_pmic_typec_pdphy.h"
struct pmic_typec_pdphy_irq_data {
int virq;
int irq;
struct pmic_typec_pdphy *pmic_typec_pdphy;
};
struct pmic_typec_pdphy {
struct device *dev;
struct tcpm_port *tcpm_port;
struct regmap *regmap;
u32 base;
unsigned int nr_irqs;
struct pmic_typec_pdphy_irq_data *irq_data;
struct work_struct reset_work;
struct work_struct receive_work;
struct regulator *vdd_pdphy;
spinlock_t lock; /* Register atomicity */
};
static void qcom_pmic_typec_pdphy_reset_on(struct pmic_typec_pdphy *pmic_typec_pdphy)
{
struct device *dev = pmic_typec_pdphy->dev;
int ret;
/* Terminate TX */
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, 0);
if (ret)
goto err;
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_FRAME_FILTER_REG, 0);
if (ret)
goto err;
return;
err:
dev_err(dev, "pd_reset_on error\n");
}
static void qcom_pmic_typec_pdphy_reset_off(struct pmic_typec_pdphy *pmic_typec_pdphy)
{
struct device *dev = pmic_typec_pdphy->dev;
int ret;
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_FRAME_FILTER_REG,
FRAME_FILTER_EN_SOP | FRAME_FILTER_EN_HARD_RESET);
if (ret)
dev_err(dev, "pd_reset_off error\n");
}
static void qcom_pmic_typec_pdphy_sig_reset_work(struct work_struct *work)
{
struct pmic_typec_pdphy *pmic_typec_pdphy = container_of(work, struct pmic_typec_pdphy,
reset_work);
unsigned long flags;
spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
qcom_pmic_typec_pdphy_reset_off(pmic_typec_pdphy);
spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
tcpm_pd_hard_reset(pmic_typec_pdphy->tcpm_port);
}
static int
qcom_pmic_typec_pdphy_clear_tx_control_reg(struct pmic_typec_pdphy *pmic_typec_pdphy)
{
struct device *dev = pmic_typec_pdphy->dev;
unsigned int val;
int ret;
/* Clear TX control register */
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, 0);
if (ret)
goto done;
/* Perform readback to ensure sufficient delay for command to latch */
ret = regmap_read(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, &val);
done:
if (ret)
dev_err(dev, "pd_clear_tx_control_reg: clear tx flag\n");
return ret;
}
static int
qcom_pmic_typec_pdphy_pd_transmit_signal(struct pmic_typec_pdphy *pmic_typec_pdphy,
enum tcpm_transmit_type type,
unsigned int negotiated_rev)
{
struct device *dev = pmic_typec_pdphy->dev;
unsigned int val;
unsigned long flags;
int ret;
spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
/* Clear TX control register */
ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy);
if (ret)
goto done;
val = TX_CONTROL_SEND_SIGNAL;
if (negotiated_rev == PD_REV30)
val |= TX_CONTROL_RETRY_COUNT(2);
else
val |= TX_CONTROL_RETRY_COUNT(3);
if (type == TCPC_TX_CABLE_RESET || type == TCPC_TX_HARD_RESET)
val |= TX_CONTROL_FRAME_TYPE(1);
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, val);
done:
spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
dev_vdbg(dev, "pd_transmit_signal: type %d negotiate_rev %d send %d\n",
type, negotiated_rev, ret);
return ret;
}
static int
qcom_pmic_typec_pdphy_pd_transmit_payload(struct pmic_typec_pdphy *pmic_typec_pdphy,
enum tcpm_transmit_type type,
const struct pd_message *msg,
unsigned int negotiated_rev)
{
struct device *dev = pmic_typec_pdphy->dev;
unsigned int val, hdr_len, txbuf_len, txsize_len;
unsigned long flags;
int ret;
spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
ret = regmap_read(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG,
&val);
if (ret)
goto done;
if (val) {
dev_err(dev, "pd_transmit_payload: RX message pending\n");
ret = -EBUSY;
goto done;
}
/* Clear TX control register */
ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy);
if (ret)
goto done;
hdr_len = sizeof(msg->header);
txbuf_len = pd_header_cnt_le(msg->header) * 4;
txsize_len = hdr_len + txbuf_len - 1;
/* Write message header sizeof(u16) to USB_PDPHY_TX_BUFFER_HDR_REG */
ret = regmap_bulk_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_TX_BUFFER_HDR_REG,
&msg->header, hdr_len);
if (ret)
goto done;
/* Write payload to USB_PDPHY_TX_BUFFER_DATA_REG for txbuf_len */
if (txbuf_len) {
ret = regmap_bulk_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_TX_BUFFER_DATA_REG,
&msg->payload, txbuf_len);
if (ret)
goto done;
}
/* Write total length ((header + data) - 1) to USB_PDPHY_TX_SIZE_REG */
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_TX_SIZE_REG,
txsize_len);
if (ret)
goto done;
/* Clear TX control register */
ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy);
if (ret)
goto done;
/* Initiate transmit with retry count as indicated by PD revision */
val = TX_CONTROL_FRAME_TYPE(type) | TX_CONTROL_SEND_MSG;
if (pd_header_rev(msg->header) == PD_REV30)
val |= TX_CONTROL_RETRY_COUNT(2);
else
val |= TX_CONTROL_RETRY_COUNT(3);
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, val);
done:
spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
if (ret) {
dev_err(dev, "pd_transmit_payload: hdr %*ph data %*ph ret %d\n",
hdr_len, &msg->header, txbuf_len, &msg->payload, ret);
}
return ret;
}
int qcom_pmic_typec_pdphy_pd_transmit(struct pmic_typec_pdphy *pmic_typec_pdphy,
enum tcpm_transmit_type type,
const struct pd_message *msg,
unsigned int negotiated_rev)
{
struct device *dev = pmic_typec_pdphy->dev;
int ret;
if (msg) {
ret = qcom_pmic_typec_pdphy_pd_transmit_payload(pmic_typec_pdphy,
type, msg,
negotiated_rev);
} else {
ret = qcom_pmic_typec_pdphy_pd_transmit_signal(pmic_typec_pdphy,
type,
negotiated_rev);
}
if (ret)
dev_dbg(dev, "pd_transmit: type %x result %d\n", type, ret);
return ret;
}
static void qcom_pmic_typec_pdphy_pd_receive(struct pmic_typec_pdphy *pmic_typec_pdphy)
{
struct device *dev = pmic_typec_pdphy->dev;
struct pd_message msg;
unsigned int size, rx_status;
unsigned long flags;
int ret;
spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
ret = regmap_read(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_RX_SIZE_REG, &size);
if (ret)
goto done;
/* Hardware requires +1 of the real read value to be passed */
if (size < 1 || size > sizeof(msg.payload) + 1) {
dev_dbg(dev, "pd_receive: invalid size %d\n", size);
goto done;
}
size += 1;
ret = regmap_read(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_RX_STATUS_REG,
&rx_status);
if (ret)
goto done;
ret = regmap_bulk_read(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_RX_BUFFER_REG,
(u8 *)&msg, size);
if (ret)
goto done;
/* Return ownership of RX buffer to hardware */
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG, 0);
done:
spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
if (!ret) {
dev_vdbg(dev, "pd_receive: handing %d bytes to tcpm\n", size);
tcpm_pd_receive(pmic_typec_pdphy->tcpm_port, &msg);
}
}
static irqreturn_t qcom_pmic_typec_pdphy_isr(int irq, void *dev_id)
{
struct pmic_typec_pdphy_irq_data *irq_data = dev_id;
struct pmic_typec_pdphy *pmic_typec_pdphy = irq_data->pmic_typec_pdphy;
struct device *dev = pmic_typec_pdphy->dev;
switch (irq_data->virq) {
case PMIC_PDPHY_SIG_TX_IRQ:
dev_err(dev, "isr: tx_sig\n");
break;
case PMIC_PDPHY_SIG_RX_IRQ:
schedule_work(&pmic_typec_pdphy->reset_work);
break;
case PMIC_PDPHY_MSG_TX_IRQ:
tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port,
TCPC_TX_SUCCESS);
break;
case PMIC_PDPHY_MSG_RX_IRQ:
qcom_pmic_typec_pdphy_pd_receive(pmic_typec_pdphy);
break;
case PMIC_PDPHY_MSG_TX_FAIL_IRQ:
tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port,
TCPC_TX_FAILED);
break;
case PMIC_PDPHY_MSG_TX_DISCARD_IRQ:
tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port,
TCPC_TX_DISCARDED);
break;
}
return IRQ_HANDLED;
}
int qcom_pmic_typec_pdphy_set_pd_rx(struct pmic_typec_pdphy *pmic_typec_pdphy, bool on)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG, !on);
spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
dev_dbg(pmic_typec_pdphy->dev, "set_pd_rx: %s\n", on ? "on" : "off");
return ret;
}
int qcom_pmic_typec_pdphy_set_roles(struct pmic_typec_pdphy *pmic_typec_pdphy,
bool data_role_host, bool power_role_src)
{
struct device *dev = pmic_typec_pdphy->dev;
unsigned long flags;
int ret;
spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
ret = regmap_update_bits(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_MSG_CONFIG_REG,
MSG_CONFIG_PORT_DATA_ROLE |
MSG_CONFIG_PORT_POWER_ROLE,
data_role_host << 3 | power_role_src << 2);
spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
dev_dbg(dev, "pdphy_set_roles: data_role_host=%d power_role_src=%d\n",
data_role_host, power_role_src);
return ret;
}
static int qcom_pmic_typec_pdphy_enable(struct pmic_typec_pdphy *pmic_typec_pdphy)
{
struct device *dev = pmic_typec_pdphy->dev;
int ret;
ret = regulator_enable(pmic_typec_pdphy->vdd_pdphy);
if (ret)
return ret;
/* PD 2.0, DR=TYPEC_DEVICE, PR=TYPEC_SINK */
ret = regmap_update_bits(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_MSG_CONFIG_REG,
MSG_CONFIG_SPEC_REV_MASK, PD_REV20);
if (ret)
goto done;
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG, 0);
if (ret)
goto done;
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG,
CONTROL_ENABLE);
if (ret)
goto done;
qcom_pmic_typec_pdphy_reset_off(pmic_typec_pdphy);
done:
if (ret) {
regulator_disable(pmic_typec_pdphy->vdd_pdphy);
dev_err(dev, "pdphy_enable fail %d\n", ret);
}
return ret;
}
static int qcom_pmic_typec_pdphy_disable(struct pmic_typec_pdphy *pmic_typec_pdphy)
{
int ret;
qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG, 0);
regulator_disable(pmic_typec_pdphy->vdd_pdphy);
return ret;
}
static int pmic_typec_pdphy_reset(struct pmic_typec_pdphy *pmic_typec_pdphy)
{
int ret;
ret = qcom_pmic_typec_pdphy_disable(pmic_typec_pdphy);
if (ret)
goto done;
usleep_range(400, 500);
ret = qcom_pmic_typec_pdphy_enable(pmic_typec_pdphy);
done:
return ret;
}
int qcom_pmic_typec_pdphy_start(struct pmic_typec_pdphy *pmic_typec_pdphy,
struct tcpm_port *tcpm_port)
{
int i;
int ret;
pmic_typec_pdphy->tcpm_port = tcpm_port;
ret = pmic_typec_pdphy_reset(pmic_typec_pdphy);
if (ret)
return ret;
for (i = 0; i < pmic_typec_pdphy->nr_irqs; i++)
enable_irq(pmic_typec_pdphy->irq_data[i].irq);
return 0;
}
void qcom_pmic_typec_pdphy_stop(struct pmic_typec_pdphy *pmic_typec_pdphy)
{
int i;
for (i = 0; i < pmic_typec_pdphy->nr_irqs; i++)
disable_irq(pmic_typec_pdphy->irq_data[i].irq);
qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
}
struct pmic_typec_pdphy *qcom_pmic_typec_pdphy_alloc(struct device *dev)
{
return devm_kzalloc(dev, sizeof(struct pmic_typec_pdphy), GFP_KERNEL);
}
int qcom_pmic_typec_pdphy_probe(struct platform_device *pdev,
struct pmic_typec_pdphy *pmic_typec_pdphy,
struct pmic_typec_pdphy_resources *res,
struct regmap *regmap,
u32 base)
{
struct device *dev = &pdev->dev;
struct pmic_typec_pdphy_irq_data *irq_data;
int i, ret, irq;
if (!res->nr_irqs || res->nr_irqs > PMIC_PDPHY_MAX_IRQS)
return -EINVAL;
irq_data = devm_kzalloc(dev, sizeof(*irq_data) * res->nr_irqs,
GFP_KERNEL);
if (!irq_data)
return -ENOMEM;
pmic_typec_pdphy->vdd_pdphy = devm_regulator_get(dev, "vdd-pdphy");
if (IS_ERR(pmic_typec_pdphy->vdd_pdphy))
return PTR_ERR(pmic_typec_pdphy->vdd_pdphy);
pmic_typec_pdphy->dev = dev;
pmic_typec_pdphy->base = base;
pmic_typec_pdphy->regmap = regmap;
pmic_typec_pdphy->nr_irqs = res->nr_irqs;
pmic_typec_pdphy->irq_data = irq_data;
spin_lock_init(&pmic_typec_pdphy->lock);
INIT_WORK(&pmic_typec_pdphy->reset_work, qcom_pmic_typec_pdphy_sig_reset_work);
for (i = 0; i < res->nr_irqs; i++, irq_data++) {
irq = platform_get_irq_byname(pdev, res->irq_params[i].irq_name);
if (irq < 0)
return irq;
irq_data->pmic_typec_pdphy = pmic_typec_pdphy;
irq_data->irq = irq;
irq_data->virq = res->irq_params[i].virq;
ret = devm_request_threaded_irq(dev, irq, NULL,
qcom_pmic_typec_pdphy_isr,
IRQF_ONESHOT | IRQF_NO_AUTOEN,
res->irq_params[i].irq_name,
irq_data);
if (ret)
return ret;
}
return 0;
}
| linux-master | drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2023, Linaro Ltd. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/usb/tcpm.h>
#include <linux/usb/typec_mux.h>
#include <linux/workqueue.h>
#include "qcom_pmic_typec_port.h"
struct pmic_typec_port_irq_data {
int virq;
int irq;
struct pmic_typec_port *pmic_typec_port;
};
struct pmic_typec_port {
struct device *dev;
struct tcpm_port *tcpm_port;
struct regmap *regmap;
u32 base;
unsigned int nr_irqs;
struct pmic_typec_port_irq_data *irq_data;
struct regulator *vdd_vbus;
int cc;
bool debouncing_cc;
struct delayed_work cc_debounce_dwork;
spinlock_t lock; /* Register atomicity */
};
static const char * const typec_cc_status_name[] = {
[TYPEC_CC_OPEN] = "Open",
[TYPEC_CC_RA] = "Ra",
[TYPEC_CC_RD] = "Rd",
[TYPEC_CC_RP_DEF] = "Rp-def",
[TYPEC_CC_RP_1_5] = "Rp-1.5",
[TYPEC_CC_RP_3_0] = "Rp-3.0",
};
static const char *rp_unknown = "unknown";
static const char *cc_to_name(enum typec_cc_status cc)
{
if (cc > TYPEC_CC_RP_3_0)
return rp_unknown;
return typec_cc_status_name[cc];
}
static const char * const rp_sel_name[] = {
[TYPEC_SRC_RP_SEL_80UA] = "Rp-def-80uA",
[TYPEC_SRC_RP_SEL_180UA] = "Rp-1.5-180uA",
[TYPEC_SRC_RP_SEL_330UA] = "Rp-3.0-330uA",
};
static const char *rp_sel_to_name(int rp_sel)
{
if (rp_sel > TYPEC_SRC_RP_SEL_330UA)
return rp_unknown;
return rp_sel_name[rp_sel];
}
#define misc_to_cc(msic) !!(misc & CC_ORIENTATION) ? "cc1" : "cc2"
#define misc_to_vconn(msic) !!(misc & CC_ORIENTATION) ? "cc2" : "cc1"
static void qcom_pmic_typec_port_cc_debounce(struct work_struct *work)
{
struct pmic_typec_port *pmic_typec_port =
container_of(work, struct pmic_typec_port, cc_debounce_dwork.work);
unsigned long flags;
spin_lock_irqsave(&pmic_typec_port->lock, flags);
pmic_typec_port->debouncing_cc = false;
spin_unlock_irqrestore(&pmic_typec_port->lock, flags);
dev_dbg(pmic_typec_port->dev, "Debounce cc complete\n");
}
static irqreturn_t pmic_typec_port_isr(int irq, void *dev_id)
{
struct pmic_typec_port_irq_data *irq_data = dev_id;
struct pmic_typec_port *pmic_typec_port = irq_data->pmic_typec_port;
u32 misc_stat;
bool vbus_change = false;
bool cc_change = false;
unsigned long flags;
int ret;
spin_lock_irqsave(&pmic_typec_port->lock, flags);
ret = regmap_read(pmic_typec_port->regmap,
pmic_typec_port->base + TYPEC_MISC_STATUS_REG,
&misc_stat);
if (ret)
goto done;
switch (irq_data->virq) {
case PMIC_TYPEC_VBUS_IRQ:
vbus_change = true;
break;
case PMIC_TYPEC_CC_STATE_IRQ:
case PMIC_TYPEC_ATTACH_DETACH_IRQ:
if (!pmic_typec_port->debouncing_cc)
cc_change = true;
break;
}
done:
spin_unlock_irqrestore(&pmic_typec_port->lock, flags);
if (vbus_change)
tcpm_vbus_change(pmic_typec_port->tcpm_port);
if (cc_change)
tcpm_cc_change(pmic_typec_port->tcpm_port);
return IRQ_HANDLED;
}
int qcom_pmic_typec_port_get_vbus(struct pmic_typec_port *pmic_typec_port)
{
struct device *dev = pmic_typec_port->dev;
unsigned int misc;
int ret;
ret = regmap_read(pmic_typec_port->regmap,
pmic_typec_port->base + TYPEC_MISC_STATUS_REG,
&misc);
if (ret)
misc = 0;
dev_dbg(dev, "get_vbus: 0x%08x detect %d\n", misc, !!(misc & TYPEC_VBUS_DETECT));
return !!(misc & TYPEC_VBUS_DETECT);
}
int qcom_pmic_typec_port_set_vbus(struct pmic_typec_port *pmic_typec_port, bool on)
{
u32 sm_stat;
u32 val;
int ret;
if (on) {
ret = regulator_enable(pmic_typec_port->vdd_vbus);
if (ret)
return ret;
val = TYPEC_SM_VBUS_VSAFE5V;
} else {
ret = regulator_disable(pmic_typec_port->vdd_vbus);
if (ret)
return ret;
val = TYPEC_SM_VBUS_VSAFE0V;
}
/* Poll waiting for transition to required vSafe5V or vSafe0V */
ret = regmap_read_poll_timeout(pmic_typec_port->regmap,
pmic_typec_port->base + TYPEC_SM_STATUS_REG,
sm_stat, sm_stat & val,
100, 250000);
if (ret)
dev_warn(pmic_typec_port->dev, "vbus vsafe%dv fail\n", on ? 5 : 0);
return 0;
}
int qcom_pmic_typec_port_get_cc(struct pmic_typec_port *pmic_typec_port,
enum typec_cc_status *cc1,
enum typec_cc_status *cc2)
{
struct device *dev = pmic_typec_port->dev;
unsigned int misc, val;
bool attached;
int ret = 0;
ret = regmap_read(pmic_typec_port->regmap,
pmic_typec_port->base + TYPEC_MISC_STATUS_REG, &misc);
if (ret)
goto done;
attached = !!(misc & CC_ATTACHED);
if (pmic_typec_port->debouncing_cc) {
ret = -EBUSY;
goto done;
}
*cc1 = TYPEC_CC_OPEN;
*cc2 = TYPEC_CC_OPEN;
if (!attached)
goto done;
if (misc & SNK_SRC_MODE) {
ret = regmap_read(pmic_typec_port->regmap,
pmic_typec_port->base + TYPEC_SRC_STATUS_REG,
&val);
if (ret)
goto done;
switch (val & DETECTED_SRC_TYPE_MASK) {
case AUDIO_ACCESS_RA_RA:
val = TYPEC_CC_RA;
*cc1 = TYPEC_CC_RA;
*cc2 = TYPEC_CC_RA;
break;
case SRC_RD_OPEN:
val = TYPEC_CC_RD;
break;
case SRC_RD_RA_VCONN:
val = TYPEC_CC_RD;
*cc1 = TYPEC_CC_RA;
*cc2 = TYPEC_CC_RA;
break;
default:
dev_warn(dev, "unexpected src status %.2x\n", val);
val = TYPEC_CC_RD;
break;
}
} else {
ret = regmap_read(pmic_typec_port->regmap,
pmic_typec_port->base + TYPEC_SNK_STATUS_REG,
&val);
if (ret)
goto done;
switch (val & DETECTED_SNK_TYPE_MASK) {
case SNK_RP_STD:
val = TYPEC_CC_RP_DEF;
break;
case SNK_RP_1P5:
val = TYPEC_CC_RP_1_5;
break;
case SNK_RP_3P0:
val = TYPEC_CC_RP_3_0;
break;
default:
dev_warn(dev, "unexpected snk status %.2x\n", val);
val = TYPEC_CC_RP_DEF;
break;
}
val = TYPEC_CC_RP_DEF;
}
if (misc & CC_ORIENTATION)
*cc2 = val;
else
*cc1 = val;
done:
dev_dbg(dev, "get_cc: misc 0x%08x cc1 0x%08x %s cc2 0x%08x %s attached %d cc=%s\n",
misc, *cc1, cc_to_name(*cc1), *cc2, cc_to_name(*cc2), attached,
misc_to_cc(misc));
return ret;
}
static void qcom_pmic_set_cc_debounce(struct pmic_typec_port *pmic_typec_port)
{
pmic_typec_port->debouncing_cc = true;
schedule_delayed_work(&pmic_typec_port->cc_debounce_dwork,
msecs_to_jiffies(2));
}
int qcom_pmic_typec_port_set_cc(struct pmic_typec_port *pmic_typec_port,
enum typec_cc_status cc)
{
struct device *dev = pmic_typec_port->dev;
unsigned int mode, currsrc;
unsigned int misc;
unsigned long flags;
int ret;
spin_lock_irqsave(&pmic_typec_port->lock, flags);
ret = regmap_read(pmic_typec_port->regmap,
pmic_typec_port->base + TYPEC_MISC_STATUS_REG,
&misc);
if (ret)
goto done;
mode = EN_SRC_ONLY;
switch (cc) {
case TYPEC_CC_OPEN:
currsrc = TYPEC_SRC_RP_SEL_80UA;
break;
case TYPEC_CC_RP_DEF:
currsrc = TYPEC_SRC_RP_SEL_80UA;
break;
case TYPEC_CC_RP_1_5:
currsrc = TYPEC_SRC_RP_SEL_180UA;
break;
case TYPEC_CC_RP_3_0:
currsrc = TYPEC_SRC_RP_SEL_330UA;
break;
case TYPEC_CC_RD:
currsrc = TYPEC_SRC_RP_SEL_80UA;
mode = EN_SNK_ONLY;
break;
default:
dev_warn(dev, "unexpected set_cc %d\n", cc);
ret = -EINVAL;
goto done;
}
if (mode == EN_SRC_ONLY) {
ret = regmap_write(pmic_typec_port->regmap,
pmic_typec_port->base + TYPEC_CURRSRC_CFG_REG,
currsrc);
if (ret)
goto done;
}
pmic_typec_port->cc = cc;
qcom_pmic_set_cc_debounce(pmic_typec_port);
ret = 0;
done:
spin_unlock_irqrestore(&pmic_typec_port->lock, flags);
dev_dbg(dev, "set_cc: currsrc=%x %s mode %s debounce %d attached %d cc=%s\n",
currsrc, rp_sel_to_name(currsrc),
mode == EN_SRC_ONLY ? "EN_SRC_ONLY" : "EN_SNK_ONLY",
pmic_typec_port->debouncing_cc, !!(misc & CC_ATTACHED),
misc_to_cc(misc));
return ret;
}
int qcom_pmic_typec_port_set_vconn(struct pmic_typec_port *pmic_typec_port, bool on)
{
struct device *dev = pmic_typec_port->dev;
unsigned int orientation, misc, mask, value;
unsigned long flags;
int ret;
spin_lock_irqsave(&pmic_typec_port->lock, flags);
ret = regmap_read(pmic_typec_port->regmap,
pmic_typec_port->base + TYPEC_MISC_STATUS_REG, &misc);
if (ret)
goto done;
/* Set VCONN on the inversion of the active CC channel */
orientation = (misc & CC_ORIENTATION) ? 0 : VCONN_EN_ORIENTATION;
if (on) {
mask = VCONN_EN_ORIENTATION | VCONN_EN_VALUE;
value = orientation | VCONN_EN_VALUE | VCONN_EN_SRC;
} else {
mask = VCONN_EN_VALUE;
value = 0;
}
ret = regmap_update_bits(pmic_typec_port->regmap,
pmic_typec_port->base + TYPEC_VCONN_CONTROL_REG,
mask, value);
done:
spin_unlock_irqrestore(&pmic_typec_port->lock, flags);
dev_dbg(dev, "set_vconn: orientation %d control 0x%08x state %s cc %s vconn %s\n",
orientation, value, on ? "on" : "off", misc_to_vconn(misc), misc_to_cc(misc));
return ret;
}
int qcom_pmic_typec_port_start_toggling(struct pmic_typec_port *pmic_typec_port,
enum typec_port_type port_type,
enum typec_cc_status cc)
{
struct device *dev = pmic_typec_port->dev;
unsigned int misc;
u8 mode = 0;
unsigned long flags;
int ret;
switch (port_type) {
case TYPEC_PORT_SRC:
mode = EN_SRC_ONLY;
break;
case TYPEC_PORT_SNK:
mode = EN_SNK_ONLY;
break;
case TYPEC_PORT_DRP:
mode = EN_TRY_SNK;
break;
}
spin_lock_irqsave(&pmic_typec_port->lock, flags);
ret = regmap_read(pmic_typec_port->regmap,
pmic_typec_port->base + TYPEC_MISC_STATUS_REG, &misc);
if (ret)
goto done;
dev_dbg(dev, "start_toggling: misc 0x%08x attached %d port_type %d current cc %d new %d\n",
misc, !!(misc & CC_ATTACHED), port_type, pmic_typec_port->cc, cc);
qcom_pmic_set_cc_debounce(pmic_typec_port);
/* force it to toggle at least once */
ret = regmap_write(pmic_typec_port->regmap,
pmic_typec_port->base + TYPEC_MODE_CFG_REG,
TYPEC_DISABLE_CMD);
if (ret)
goto done;
ret = regmap_write(pmic_typec_port->regmap,
pmic_typec_port->base + TYPEC_MODE_CFG_REG,
mode);
done:
spin_unlock_irqrestore(&pmic_typec_port->lock, flags);
return ret;
}
#define TYPEC_INTR_EN_CFG_1_MASK \
(TYPEC_LEGACY_CABLE_INT_EN | \
TYPEC_NONCOMPLIANT_LEGACY_CABLE_INT_EN | \
TYPEC_TRYSOURCE_DETECT_INT_EN | \
TYPEC_TRYSINK_DETECT_INT_EN | \
TYPEC_CCOUT_DETACH_INT_EN | \
TYPEC_CCOUT_ATTACH_INT_EN | \
TYPEC_VBUS_DEASSERT_INT_EN | \
TYPEC_VBUS_ASSERT_INT_EN)
#define TYPEC_INTR_EN_CFG_2_MASK \
(TYPEC_STATE_MACHINE_CHANGE_INT_EN | TYPEC_VBUS_ERROR_INT_EN | \
TYPEC_DEBOUNCE_DONE_INT_EN)
int qcom_pmic_typec_port_start(struct pmic_typec_port *pmic_typec_port,
struct tcpm_port *tcpm_port)
{
int i;
int mask;
int ret;
/* Configure interrupt sources */
ret = regmap_write(pmic_typec_port->regmap,
pmic_typec_port->base + TYPEC_INTERRUPT_EN_CFG_1_REG,
TYPEC_INTR_EN_CFG_1_MASK);
if (ret)
goto done;
ret = regmap_write(pmic_typec_port->regmap,
pmic_typec_port->base + TYPEC_INTERRUPT_EN_CFG_2_REG,
TYPEC_INTR_EN_CFG_2_MASK);
if (ret)
goto done;
/* start in TRY_SNK mode */
ret = regmap_write(pmic_typec_port->regmap,
pmic_typec_port->base + TYPEC_MODE_CFG_REG, EN_TRY_SNK);
if (ret)
goto done;
/* Configure VCONN for software control */
ret = regmap_update_bits(pmic_typec_port->regmap,
pmic_typec_port->base + TYPEC_VCONN_CONTROL_REG,
VCONN_EN_SRC | VCONN_EN_VALUE, VCONN_EN_SRC);
if (ret)
goto done;
/* Set CC threshold to 1.6 Volts | tPDdebounce = 10-20ms */
mask = SEL_SRC_UPPER_REF | USE_TPD_FOR_EXITING_ATTACHSRC;
ret = regmap_update_bits(pmic_typec_port->regmap,
pmic_typec_port->base + TYPEC_EXIT_STATE_CFG_REG,
mask, mask);
if (ret)
goto done;
pmic_typec_port->tcpm_port = tcpm_port;
for (i = 0; i < pmic_typec_port->nr_irqs; i++)
enable_irq(pmic_typec_port->irq_data[i].irq);
done:
return ret;
}
void qcom_pmic_typec_port_stop(struct pmic_typec_port *pmic_typec_port)
{
int i;
for (i = 0; i < pmic_typec_port->nr_irqs; i++)
disable_irq(pmic_typec_port->irq_data[i].irq);
}
struct pmic_typec_port *qcom_pmic_typec_port_alloc(struct device *dev)
{
return devm_kzalloc(dev, sizeof(struct pmic_typec_port), GFP_KERNEL);
}
int qcom_pmic_typec_port_probe(struct platform_device *pdev,
struct pmic_typec_port *pmic_typec_port,
struct pmic_typec_port_resources *res,
struct regmap *regmap,
u32 base)
{
struct device *dev = &pdev->dev;
struct pmic_typec_port_irq_data *irq_data;
int i, ret, irq;
if (!res->nr_irqs || res->nr_irqs > PMIC_TYPEC_MAX_IRQS)
return -EINVAL;
irq_data = devm_kzalloc(dev, sizeof(*irq_data) * res->nr_irqs,
GFP_KERNEL);
if (!irq_data)
return -ENOMEM;
pmic_typec_port->vdd_vbus = devm_regulator_get(dev, "vdd-vbus");
if (IS_ERR(pmic_typec_port->vdd_vbus))
return PTR_ERR(pmic_typec_port->vdd_vbus);
pmic_typec_port->dev = dev;
pmic_typec_port->base = base;
pmic_typec_port->regmap = regmap;
pmic_typec_port->nr_irqs = res->nr_irqs;
pmic_typec_port->irq_data = irq_data;
spin_lock_init(&pmic_typec_port->lock);
INIT_DELAYED_WORK(&pmic_typec_port->cc_debounce_dwork,
qcom_pmic_typec_port_cc_debounce);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
for (i = 0; i < res->nr_irqs; i++, irq_data++) {
irq = platform_get_irq_byname(pdev,
res->irq_params[i].irq_name);
if (irq < 0)
return irq;
irq_data->pmic_typec_port = pmic_typec_port;
irq_data->irq = irq;
irq_data->virq = res->irq_params[i].virq;
ret = devm_request_threaded_irq(dev, irq, NULL, pmic_typec_port_isr,
IRQF_ONESHOT | IRQF_NO_AUTOEN,
res->irq_params[i].irq_name,
irq_data);
if (ret)
return ret;
}
return 0;
}
| linux-master | drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2023, Linaro Ltd. All rights reserved.
*/
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/usb/role.h>
#include <linux/usb/tcpm.h>
#include <linux/usb/typec_mux.h>
#include <drm/drm_bridge.h>
#include "qcom_pmic_typec_pdphy.h"
#include "qcom_pmic_typec_port.h"
struct pmic_typec_resources {
struct pmic_typec_pdphy_resources *pdphy_res;
struct pmic_typec_port_resources *port_res;
};
struct pmic_typec {
struct device *dev;
struct tcpm_port *tcpm_port;
struct tcpc_dev tcpc;
struct pmic_typec_pdphy *pmic_typec_pdphy;
struct pmic_typec_port *pmic_typec_port;
bool vbus_enabled;
struct mutex lock; /* VBUS state serialization */
struct drm_bridge bridge;
};
#define tcpc_to_tcpm(_tcpc_) container_of(_tcpc_, struct pmic_typec, tcpc)
static int qcom_pmic_typec_get_vbus(struct tcpc_dev *tcpc)
{
struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
int ret;
mutex_lock(&tcpm->lock);
ret = tcpm->vbus_enabled || qcom_pmic_typec_port_get_vbus(tcpm->pmic_typec_port);
mutex_unlock(&tcpm->lock);
return ret;
}
static int qcom_pmic_typec_set_vbus(struct tcpc_dev *tcpc, bool on, bool sink)
{
struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
int ret = 0;
mutex_lock(&tcpm->lock);
if (tcpm->vbus_enabled == on)
goto done;
ret = qcom_pmic_typec_port_set_vbus(tcpm->pmic_typec_port, on);
if (ret)
goto done;
tcpm->vbus_enabled = on;
tcpm_vbus_change(tcpm->tcpm_port);
done:
dev_dbg(tcpm->dev, "set_vbus set: %d result %d\n", on, ret);
mutex_unlock(&tcpm->lock);
return ret;
}
static int qcom_pmic_typec_set_vconn(struct tcpc_dev *tcpc, bool on)
{
struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
return qcom_pmic_typec_port_set_vconn(tcpm->pmic_typec_port, on);
}
static int qcom_pmic_typec_get_cc(struct tcpc_dev *tcpc,
enum typec_cc_status *cc1,
enum typec_cc_status *cc2)
{
struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
return qcom_pmic_typec_port_get_cc(tcpm->pmic_typec_port, cc1, cc2);
}
static int qcom_pmic_typec_set_cc(struct tcpc_dev *tcpc,
enum typec_cc_status cc)
{
struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
return qcom_pmic_typec_port_set_cc(tcpm->pmic_typec_port, cc);
}
static int qcom_pmic_typec_set_polarity(struct tcpc_dev *tcpc,
enum typec_cc_polarity pol)
{
/* Polarity is set separately by phy-qcom-qmp.c */
return 0;
}
static int qcom_pmic_typec_start_toggling(struct tcpc_dev *tcpc,
enum typec_port_type port_type,
enum typec_cc_status cc)
{
struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
return qcom_pmic_typec_port_start_toggling(tcpm->pmic_typec_port,
port_type, cc);
}
static int qcom_pmic_typec_set_roles(struct tcpc_dev *tcpc, bool attached,
enum typec_role power_role,
enum typec_data_role data_role)
{
struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
return qcom_pmic_typec_pdphy_set_roles(tcpm->pmic_typec_pdphy,
data_role, power_role);
}
static int qcom_pmic_typec_set_pd_rx(struct tcpc_dev *tcpc, bool on)
{
struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
return qcom_pmic_typec_pdphy_set_pd_rx(tcpm->pmic_typec_pdphy, on);
}
static int qcom_pmic_typec_pd_transmit(struct tcpc_dev *tcpc,
enum tcpm_transmit_type type,
const struct pd_message *msg,
unsigned int negotiated_rev)
{
struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
return qcom_pmic_typec_pdphy_pd_transmit(tcpm->pmic_typec_pdphy, type,
msg, negotiated_rev);
}
static int qcom_pmic_typec_init(struct tcpc_dev *tcpc)
{
return 0;
}
#if IS_ENABLED(CONFIG_DRM)
static int qcom_pmic_typec_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
}
static const struct drm_bridge_funcs qcom_pmic_typec_bridge_funcs = {
.attach = qcom_pmic_typec_attach,
};
static int qcom_pmic_typec_init_drm(struct pmic_typec *tcpm)
{
tcpm->bridge.funcs = &qcom_pmic_typec_bridge_funcs;
#ifdef CONFIG_OF
tcpm->bridge.of_node = of_get_child_by_name(tcpm->dev->of_node, "connector");
#endif
tcpm->bridge.ops = DRM_BRIDGE_OP_HPD;
tcpm->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
return devm_drm_bridge_add(tcpm->dev, &tcpm->bridge);
}
#else
static int qcom_pmic_typec_init_drm(struct pmic_typec *tcpm)
{
return 0;
}
#endif
static int qcom_pmic_typec_probe(struct platform_device *pdev)
{
struct pmic_typec *tcpm;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
const struct pmic_typec_resources *res;
struct regmap *regmap;
u32 base[2];
int ret;
res = of_device_get_match_data(dev);
if (!res)
return -ENODEV;
tcpm = devm_kzalloc(dev, sizeof(*tcpm), GFP_KERNEL);
if (!tcpm)
return -ENOMEM;
tcpm->dev = dev;
tcpm->tcpc.init = qcom_pmic_typec_init;
tcpm->tcpc.get_vbus = qcom_pmic_typec_get_vbus;
tcpm->tcpc.set_vbus = qcom_pmic_typec_set_vbus;
tcpm->tcpc.set_cc = qcom_pmic_typec_set_cc;
tcpm->tcpc.get_cc = qcom_pmic_typec_get_cc;
tcpm->tcpc.set_polarity = qcom_pmic_typec_set_polarity;
tcpm->tcpc.set_vconn = qcom_pmic_typec_set_vconn;
tcpm->tcpc.start_toggling = qcom_pmic_typec_start_toggling;
tcpm->tcpc.set_pd_rx = qcom_pmic_typec_set_pd_rx;
tcpm->tcpc.set_roles = qcom_pmic_typec_set_roles;
tcpm->tcpc.pd_transmit = qcom_pmic_typec_pd_transmit;
regmap = dev_get_regmap(dev->parent, NULL);
if (!regmap) {
dev_err(dev, "Failed to get regmap\n");
return -ENODEV;
}
ret = of_property_read_u32_array(np, "reg", base, 2);
if (ret)
return ret;
tcpm->pmic_typec_port = qcom_pmic_typec_port_alloc(dev);
if (IS_ERR(tcpm->pmic_typec_port))
return PTR_ERR(tcpm->pmic_typec_port);
tcpm->pmic_typec_pdphy = qcom_pmic_typec_pdphy_alloc(dev);
if (IS_ERR(tcpm->pmic_typec_pdphy))
return PTR_ERR(tcpm->pmic_typec_pdphy);
ret = qcom_pmic_typec_port_probe(pdev, tcpm->pmic_typec_port,
res->port_res, regmap, base[0]);
if (ret)
return ret;
ret = qcom_pmic_typec_pdphy_probe(pdev, tcpm->pmic_typec_pdphy,
res->pdphy_res, regmap, base[1]);
if (ret)
return ret;
mutex_init(&tcpm->lock);
platform_set_drvdata(pdev, tcpm);
ret = qcom_pmic_typec_init_drm(tcpm);
if (ret)
return ret;
tcpm->tcpc.fwnode = device_get_named_child_node(tcpm->dev, "connector");
if (!tcpm->tcpc.fwnode)
return -EINVAL;
tcpm->tcpm_port = tcpm_register_port(tcpm->dev, &tcpm->tcpc);
if (IS_ERR(tcpm->tcpm_port)) {
ret = PTR_ERR(tcpm->tcpm_port);
goto fwnode_remove;
}
ret = qcom_pmic_typec_port_start(tcpm->pmic_typec_port,
tcpm->tcpm_port);
if (ret)
goto fwnode_remove;
ret = qcom_pmic_typec_pdphy_start(tcpm->pmic_typec_pdphy,
tcpm->tcpm_port);
if (ret)
goto fwnode_remove;
return 0;
fwnode_remove:
fwnode_remove_software_node(tcpm->tcpc.fwnode);
return ret;
}
static void qcom_pmic_typec_remove(struct platform_device *pdev)
{
struct pmic_typec *tcpm = platform_get_drvdata(pdev);
qcom_pmic_typec_pdphy_stop(tcpm->pmic_typec_pdphy);
qcom_pmic_typec_port_stop(tcpm->pmic_typec_port);
tcpm_unregister_port(tcpm->tcpm_port);
fwnode_remove_software_node(tcpm->tcpc.fwnode);
}
static struct pmic_typec_pdphy_resources pm8150b_pdphy_res = {
.irq_params = {
{
.virq = PMIC_PDPHY_SIG_TX_IRQ,
.irq_name = "sig-tx",
},
{
.virq = PMIC_PDPHY_SIG_RX_IRQ,
.irq_name = "sig-rx",
},
{
.virq = PMIC_PDPHY_MSG_TX_IRQ,
.irq_name = "msg-tx",
},
{
.virq = PMIC_PDPHY_MSG_RX_IRQ,
.irq_name = "msg-rx",
},
{
.virq = PMIC_PDPHY_MSG_TX_FAIL_IRQ,
.irq_name = "msg-tx-failed",
},
{
.virq = PMIC_PDPHY_MSG_TX_DISCARD_IRQ,
.irq_name = "msg-tx-discarded",
},
{
.virq = PMIC_PDPHY_MSG_RX_DISCARD_IRQ,
.irq_name = "msg-rx-discarded",
},
},
.nr_irqs = 7,
};
static struct pmic_typec_port_resources pm8150b_port_res = {
.irq_params = {
{
.irq_name = "vpd-detect",
.virq = PMIC_TYPEC_VPD_IRQ,
},
{
.irq_name = "cc-state-change",
.virq = PMIC_TYPEC_CC_STATE_IRQ,
},
{
.irq_name = "vconn-oc",
.virq = PMIC_TYPEC_VCONN_OC_IRQ,
},
{
.irq_name = "vbus-change",
.virq = PMIC_TYPEC_VBUS_IRQ,
},
{
.irq_name = "attach-detach",
.virq = PMIC_TYPEC_ATTACH_DETACH_IRQ,
},
{
.irq_name = "legacy-cable-detect",
.virq = PMIC_TYPEC_LEGACY_CABLE_IRQ,
},
{
.irq_name = "try-snk-src-detect",
.virq = PMIC_TYPEC_TRY_SNK_SRC_IRQ,
},
},
.nr_irqs = 7,
};
static struct pmic_typec_resources pm8150b_typec_res = {
.pdphy_res = &pm8150b_pdphy_res,
.port_res = &pm8150b_port_res,
};
static const struct of_device_id qcom_pmic_typec_table[] = {
{ .compatible = "qcom,pm8150b-typec", .data = &pm8150b_typec_res },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_pmic_typec_table);
static struct platform_driver qcom_pmic_typec_driver = {
.driver = {
.name = "qcom,pmic-typec",
.of_match_table = qcom_pmic_typec_table,
},
.probe = qcom_pmic_typec_probe,
.remove_new = qcom_pmic_typec_remove,
};
module_platform_driver(qcom_pmic_typec_driver);
MODULE_DESCRIPTION("QCOM PMIC USB Type-C Port Manager Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Linaro Ltd.
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/usb/typec_dp.h>
#include <linux/usb/typec_mux.h>
struct gpio_sbu_mux {
struct gpio_desc *enable_gpio;
struct gpio_desc *select_gpio;
struct typec_switch_dev *sw;
struct typec_mux_dev *mux;
struct mutex lock; /* protect enabled and swapped */
bool enabled;
bool swapped;
};
static int gpio_sbu_switch_set(struct typec_switch_dev *sw,
enum typec_orientation orientation)
{
struct gpio_sbu_mux *sbu_mux = typec_switch_get_drvdata(sw);
bool enabled;
bool swapped;
mutex_lock(&sbu_mux->lock);
enabled = sbu_mux->enabled;
swapped = sbu_mux->swapped;
switch (orientation) {
case TYPEC_ORIENTATION_NONE:
enabled = false;
break;
case TYPEC_ORIENTATION_NORMAL:
swapped = false;
break;
case TYPEC_ORIENTATION_REVERSE:
swapped = true;
break;
}
if (enabled != sbu_mux->enabled)
gpiod_set_value(sbu_mux->enable_gpio, enabled);
if (swapped != sbu_mux->swapped)
gpiod_set_value(sbu_mux->select_gpio, swapped);
sbu_mux->enabled = enabled;
sbu_mux->swapped = swapped;
mutex_unlock(&sbu_mux->lock);
return 0;
}
static int gpio_sbu_mux_set(struct typec_mux_dev *mux,
struct typec_mux_state *state)
{
struct gpio_sbu_mux *sbu_mux = typec_mux_get_drvdata(mux);
mutex_lock(&sbu_mux->lock);
switch (state->mode) {
case TYPEC_STATE_SAFE:
case TYPEC_STATE_USB:
sbu_mux->enabled = false;
break;
case TYPEC_DP_STATE_C:
case TYPEC_DP_STATE_D:
case TYPEC_DP_STATE_E:
sbu_mux->enabled = true;
break;
default:
break;
}
gpiod_set_value(sbu_mux->enable_gpio, sbu_mux->enabled);
mutex_unlock(&sbu_mux->lock);
return 0;
}
static int gpio_sbu_mux_probe(struct platform_device *pdev)
{
struct typec_switch_desc sw_desc = { };
struct typec_mux_desc mux_desc = { };
struct device *dev = &pdev->dev;
struct gpio_sbu_mux *sbu_mux;
sbu_mux = devm_kzalloc(dev, sizeof(*sbu_mux), GFP_KERNEL);
if (!sbu_mux)
return -ENOMEM;
mutex_init(&sbu_mux->lock);
sbu_mux->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(sbu_mux->enable_gpio))
return dev_err_probe(dev, PTR_ERR(sbu_mux->enable_gpio),
"unable to acquire enable gpio\n");
sbu_mux->select_gpio = devm_gpiod_get(dev, "select", GPIOD_OUT_LOW);
if (IS_ERR(sbu_mux->select_gpio))
return dev_err_probe(dev, PTR_ERR(sbu_mux->select_gpio),
"unable to acquire select gpio\n");
sw_desc.drvdata = sbu_mux;
sw_desc.fwnode = dev_fwnode(dev);
sw_desc.set = gpio_sbu_switch_set;
sbu_mux->sw = typec_switch_register(dev, &sw_desc);
if (IS_ERR(sbu_mux->sw))
return dev_err_probe(dev, PTR_ERR(sbu_mux->sw),
"failed to register typec switch\n");
mux_desc.drvdata = sbu_mux;
mux_desc.fwnode = dev_fwnode(dev);
mux_desc.set = gpio_sbu_mux_set;
sbu_mux->mux = typec_mux_register(dev, &mux_desc);
if (IS_ERR(sbu_mux->mux)) {
typec_switch_unregister(sbu_mux->sw);
return dev_err_probe(dev, PTR_ERR(sbu_mux->mux),
"failed to register typec mux\n");
}
platform_set_drvdata(pdev, sbu_mux);
return 0;
}
static void gpio_sbu_mux_remove(struct platform_device *pdev)
{
struct gpio_sbu_mux *sbu_mux = platform_get_drvdata(pdev);
gpiod_set_value(sbu_mux->enable_gpio, 0);
typec_mux_unregister(sbu_mux->mux);
typec_switch_unregister(sbu_mux->sw);
}
static const struct of_device_id gpio_sbu_mux_match[] = {
{ .compatible = "gpio-sbu-mux", },
{}
};
MODULE_DEVICE_TABLE(of, gpio_sbu_mux_match);
static struct platform_driver gpio_sbu_mux_driver = {
.probe = gpio_sbu_mux_probe,
.remove_new = gpio_sbu_mux_remove,
.driver = {
.name = "gpio_sbu_mux",
.of_match_table = gpio_sbu_mux_match,
},
};
module_platform_driver(gpio_sbu_mux_driver);
MODULE_DESCRIPTION("GPIO based SBU mux driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/typec/mux/gpio-sbu-mux.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Pericom PI3USB30532 Type-C cross switch / mux driver
*
* Copyright (c) 2017-2018 Hans de Goede <[email protected]>
*/
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/usb/typec_dp.h>
#include <linux/usb/typec_mux.h>
#define PI3USB30532_CONF 0x00
#define PI3USB30532_CONF_OPEN 0x00
#define PI3USB30532_CONF_SWAP 0x01
#define PI3USB30532_CONF_4LANE_DP 0x02
#define PI3USB30532_CONF_USB3 0x04
#define PI3USB30532_CONF_USB3_AND_2LANE_DP 0x06
struct pi3usb30532 {
struct i2c_client *client;
struct mutex lock; /* protects the cached conf register */
struct typec_switch_dev *sw;
struct typec_mux_dev *mux;
u8 conf;
};
static int pi3usb30532_set_conf(struct pi3usb30532 *pi, u8 new_conf)
{
int ret = 0;
if (pi->conf == new_conf)
return 0;
ret = i2c_smbus_write_byte_data(pi->client, PI3USB30532_CONF, new_conf);
if (ret) {
dev_err(&pi->client->dev, "Error writing conf: %d\n", ret);
return ret;
}
pi->conf = new_conf;
return 0;
}
static int pi3usb30532_sw_set(struct typec_switch_dev *sw,
enum typec_orientation orientation)
{
struct pi3usb30532 *pi = typec_switch_get_drvdata(sw);
u8 new_conf;
int ret;
mutex_lock(&pi->lock);
new_conf = pi->conf;
switch (orientation) {
case TYPEC_ORIENTATION_NONE:
new_conf = PI3USB30532_CONF_OPEN;
break;
case TYPEC_ORIENTATION_NORMAL:
new_conf &= ~PI3USB30532_CONF_SWAP;
break;
case TYPEC_ORIENTATION_REVERSE:
new_conf |= PI3USB30532_CONF_SWAP;
break;
}
ret = pi3usb30532_set_conf(pi, new_conf);
mutex_unlock(&pi->lock);
return ret;
}
static int
pi3usb30532_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state)
{
struct pi3usb30532 *pi = typec_mux_get_drvdata(mux);
u8 new_conf;
int ret;
mutex_lock(&pi->lock);
new_conf = pi->conf;
switch (state->mode) {
case TYPEC_STATE_SAFE:
new_conf = (new_conf & PI3USB30532_CONF_SWAP) |
PI3USB30532_CONF_OPEN;
break;
case TYPEC_STATE_USB:
new_conf = (new_conf & PI3USB30532_CONF_SWAP) |
PI3USB30532_CONF_USB3;
break;
case TYPEC_DP_STATE_C:
case TYPEC_DP_STATE_E:
new_conf = (new_conf & PI3USB30532_CONF_SWAP) |
PI3USB30532_CONF_4LANE_DP;
break;
case TYPEC_DP_STATE_D:
new_conf = (new_conf & PI3USB30532_CONF_SWAP) |
PI3USB30532_CONF_USB3_AND_2LANE_DP;
break;
default:
break;
}
ret = pi3usb30532_set_conf(pi, new_conf);
mutex_unlock(&pi->lock);
return ret;
}
static int pi3usb30532_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct typec_switch_desc sw_desc = { };
struct typec_mux_desc mux_desc = { };
struct pi3usb30532 *pi;
int ret;
pi = devm_kzalloc(dev, sizeof(*pi), GFP_KERNEL);
if (!pi)
return -ENOMEM;
pi->client = client;
mutex_init(&pi->lock);
ret = i2c_smbus_read_byte_data(client, PI3USB30532_CONF);
if (ret < 0) {
dev_err(dev, "Error reading config register %d\n", ret);
return ret;
}
pi->conf = ret;
sw_desc.drvdata = pi;
sw_desc.fwnode = dev->fwnode;
sw_desc.set = pi3usb30532_sw_set;
pi->sw = typec_switch_register(dev, &sw_desc);
if (IS_ERR(pi->sw)) {
dev_err(dev, "Error registering typec switch: %ld\n",
PTR_ERR(pi->sw));
return PTR_ERR(pi->sw);
}
mux_desc.drvdata = pi;
mux_desc.fwnode = dev->fwnode;
mux_desc.set = pi3usb30532_mux_set;
pi->mux = typec_mux_register(dev, &mux_desc);
if (IS_ERR(pi->mux)) {
typec_switch_unregister(pi->sw);
dev_err(dev, "Error registering typec mux: %ld\n",
PTR_ERR(pi->mux));
return PTR_ERR(pi->mux);
}
i2c_set_clientdata(client, pi);
return 0;
}
static void pi3usb30532_remove(struct i2c_client *client)
{
struct pi3usb30532 *pi = i2c_get_clientdata(client);
typec_mux_unregister(pi->mux);
typec_switch_unregister(pi->sw);
}
static const struct i2c_device_id pi3usb30532_table[] = {
{ "pi3usb30532" },
{ }
};
MODULE_DEVICE_TABLE(i2c, pi3usb30532_table);
static struct i2c_driver pi3usb30532_driver = {
.driver = {
.name = "pi3usb30532",
},
.probe = pi3usb30532_probe,
.remove = pi3usb30532_remove,
.id_table = pi3usb30532_table,
};
module_i2c_driver(pi3usb30532_driver);
MODULE_AUTHOR("Hans de Goede <[email protected]>");
MODULE_DESCRIPTION("Pericom PI3USB30532 Type-C mux driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/typec/mux/pi3usb30532.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Intel PMC USB mux control
*
* Copyright (C) 2020 Intel Corporation
* Author: Heikki Krogerus <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/usb/pd.h>
#include <linux/usb/role.h>
#include <linux/usb/typec_mux.h>
#include <linux/usb/typec_dp.h>
#include <linux/usb/typec_tbt.h>
#include <linux/debugfs.h>
#include <linux/usb.h>
#include <asm/intel_scu_ipc.h>
#define PMC_USBC_CMD 0xa7
/* Response status bits */
#define PMC_USB_RESP_STATUS_FAILURE BIT(0)
#define PMC_USB_RESP_STATUS_FATAL BIT(1)
/* "Usage" OOB Message field values */
enum {
PMC_USB_CONNECT,
PMC_USB_DISCONNECT,
PMC_USB_SAFE_MODE,
PMC_USB_ALT_MODE,
PMC_USB_DP_HPD,
};
#define PMC_USB_MSG_USB2_PORT_SHIFT 0
#define PMC_USB_MSG_USB3_PORT_SHIFT 4
#define PMC_USB_MSG_UFP_SHIFT 4
#define PMC_USB_MSG_ORI_HSL_SHIFT 5
#define PMC_USB_MSG_ORI_AUX_SHIFT 6
/* Alt Mode Request */
struct altmode_req {
u8 usage;
u8 mode_type;
u8 mode_id;
u8 reserved;
u32 mode_data;
} __packed;
#define PMC_USB_MODE_TYPE_SHIFT 4
enum {
PMC_USB_MODE_TYPE_USB,
PMC_USB_MODE_TYPE_DP,
PMC_USB_MODE_TYPE_TBT,
};
/* Common Mode Data bits */
#define PMC_USB_ALTMODE_RETIMER_CABLE BIT(2)
#define PMC_USB_ALTMODE_ORI_SHIFT 1
#define PMC_USB_ALTMODE_UFP_SHIFT 3
/* DP specific Mode Data bits */
#define PMC_USB_ALTMODE_DP_MODE_SHIFT 8
/* TBT specific Mode Data bits */
#define PMC_USB_ALTMODE_TBT_TYPE BIT(17)
#define PMC_USB_ALTMODE_CABLE_TYPE BIT(18)
#define PMC_USB_ALTMODE_ACTIVE_LINK BIT(20)
#define PMC_USB_ALTMODE_ACTIVE_CABLE BIT(22)
#define PMC_USB_ALTMODE_FORCE_LSR BIT(23)
#define PMC_USB_ALTMODE_CABLE_SPD(_s_) (((_s_) & GENMASK(2, 0)) << 25)
#define PMC_USB_ALTMODE_CABLE_USB31 1
#define PMC_USB_ALTMODE_CABLE_10GPS 2
#define PMC_USB_ALTMODE_CABLE_20GPS 3
#define PMC_USB_ALTMODE_TBT_GEN(_g_) (((_g_) & GENMASK(1, 0)) << 28)
/* Display HPD Request bits */
#define PMC_USB_DP_HPD_LVL BIT(4)
#define PMC_USB_DP_HPD_IRQ BIT(5)
/*
* Input Output Manager (IOM) PORT STATUS
*/
#define IOM_PORT_STATUS_ACTIVITY_TYPE_MASK GENMASK(9, 6)
#define IOM_PORT_STATUS_ACTIVITY_TYPE_SHIFT 6
#define IOM_PORT_STATUS_ACTIVITY_TYPE_USB 0x03
/* activity type: Safe Mode */
#define IOM_PORT_STATUS_ACTIVITY_TYPE_SAFE_MODE 0x04
/* activity type: Display Port */
#define IOM_PORT_STATUS_ACTIVITY_TYPE_DP 0x05
/* activity type: Display Port Multi Function Device */
#define IOM_PORT_STATUS_ACTIVITY_TYPE_DP_MFD 0x06
/* activity type: Thunderbolt */
#define IOM_PORT_STATUS_ACTIVITY_TYPE_TBT 0x07
#define IOM_PORT_STATUS_ACTIVITY_TYPE_ALT_MODE_USB 0x0c
#define IOM_PORT_STATUS_ACTIVITY_TYPE_ALT_MODE_TBT_USB 0x0d
/* Upstream Facing Port Information */
#define IOM_PORT_STATUS_UFP BIT(10)
/* Display Port Hot Plug Detect status */
#define IOM_PORT_STATUS_DHPD_HPD_STATUS_MASK GENMASK(13, 12)
#define IOM_PORT_STATUS_DHPD_HPD_STATUS_SHIFT 12
#define IOM_PORT_STATUS_DHPD_HPD_STATUS_ASSERT 0x01
#define IOM_PORT_STATUS_DHPD_HPD_SOURCE_TBT BIT(14)
#define IOM_PORT_STATUS_CONNECTED BIT(31)
#define IOM_PORT_ACTIVITY_IS(_status_, _type_) \
((((_status_) & IOM_PORT_STATUS_ACTIVITY_TYPE_MASK) >> \
IOM_PORT_STATUS_ACTIVITY_TYPE_SHIFT) == \
(IOM_PORT_STATUS_ACTIVITY_TYPE_##_type_))
#define IOM_PORT_HPD_ASSERTED(_status_) \
((((_status_) & IOM_PORT_STATUS_DHPD_HPD_STATUS_MASK) >> \
IOM_PORT_STATUS_DHPD_HPD_STATUS_SHIFT) & \
IOM_PORT_STATUS_DHPD_HPD_STATUS_ASSERT)
/* IOM port status register */
#define IOM_PORT_STATUS_REGS(_offset_, _size_) ((_offset_) | (_size_))
#define IOM_PORT_STATUS_REGS_SZ_MASK BIT(0)
#define IOM_PORT_STATUS_REGS_SZ_4 0
#define IOM_PORT_STATUS_REGS_SZ_8 1
#define IOM_PORT_STATUS_REGS_OFFSET(_d_) \
((_d_) & ~IOM_PORT_STATUS_REGS_SZ_MASK)
#define IOM_PORT_STATUS_REGS_SIZE(_d_) \
(4 << ((_d_) & IOM_PORT_STATUS_REGS_SZ_MASK))
struct pmc_usb;
struct pmc_usb_port {
int num;
u32 iom_status;
struct pmc_usb *pmc;
struct typec_mux_dev *typec_mux;
struct typec_switch_dev *typec_sw;
struct usb_role_switch *usb_sw;
enum typec_orientation orientation;
enum usb_role role;
u8 usb2_port;
u8 usb3_port;
enum typec_orientation sbu_orientation;
enum typec_orientation hsl_orientation;
};
struct pmc_usb {
u8 num_ports;
struct device *dev;
struct intel_scu_ipc_dev *ipc;
struct pmc_usb_port *port;
struct acpi_device *iom_adev;
void __iomem *iom_base;
u32 iom_port_status_offset;
u8 iom_port_status_size;
struct dentry *dentry;
};
static struct dentry *pmc_mux_debugfs_root;
static void update_port_status(struct pmc_usb_port *port)
{
u8 port_num;
/* SoC expects the USB Type-C port numbers to start with 0 */
port_num = port->usb3_port - 1;
port->iom_status = readl(port->pmc->iom_base +
port->pmc->iom_port_status_offset +
port_num * port->pmc->iom_port_status_size);
}
static int sbu_orientation(struct pmc_usb_port *port)
{
if (port->sbu_orientation)
return port->sbu_orientation - 1;
return port->orientation - 1;
}
static int hsl_orientation(struct pmc_usb_port *port)
{
if (port->hsl_orientation)
return port->hsl_orientation - 1;
return port->orientation - 1;
}
static int pmc_usb_send_command(struct intel_scu_ipc_dev *ipc, u8 *msg, u32 len)
{
u8 response[4];
u8 status_res;
int ret;
/*
* Error bit will always be 0 with the USBC command.
* Status can be checked from the response message if the
* function intel_scu_ipc_dev_command succeeds.
*/
ret = intel_scu_ipc_dev_command(ipc, PMC_USBC_CMD, 0, msg,
len, response, sizeof(response));
if (ret)
return ret;
status_res = (msg[0] & 0xf) < PMC_USB_SAFE_MODE ?
response[2] : response[1];
if (status_res & PMC_USB_RESP_STATUS_FAILURE) {
if (status_res & PMC_USB_RESP_STATUS_FATAL)
return -EIO;
return -EBUSY;
}
return 0;
}
static int pmc_usb_command(struct pmc_usb_port *port, u8 *msg, u32 len)
{
int retry_count = 3;
int ret;
/*
* If PMC is busy then retry the command once again
*/
while (retry_count--) {
ret = pmc_usb_send_command(port->pmc->ipc, msg, len);
if (ret != -EBUSY)
break;
}
return ret;
}
static int
pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_displayport_data *dp)
{
u8 msg[2] = { };
int ret;
msg[0] = PMC_USB_DP_HPD;
msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
/* Configure HPD first if HPD,IRQ comes together */
if (!IOM_PORT_HPD_ASSERTED(port->iom_status) &&
dp->status & DP_STATUS_IRQ_HPD &&
dp->status & DP_STATUS_HPD_STATE) {
msg[1] = PMC_USB_DP_HPD_LVL;
ret = pmc_usb_command(port, msg, sizeof(msg));
if (ret)
return ret;
}
if (dp->status & DP_STATUS_IRQ_HPD)
msg[1] = PMC_USB_DP_HPD_IRQ;
if (dp->status & DP_STATUS_HPD_STATE)
msg[1] |= PMC_USB_DP_HPD_LVL;
return pmc_usb_command(port, msg, sizeof(msg));
}
static int
pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state)
{
struct typec_displayport_data *data = state->data;
struct altmode_req req = { };
int ret;
if (IOM_PORT_ACTIVITY_IS(port->iom_status, DP) ||
IOM_PORT_ACTIVITY_IS(port->iom_status, DP_MFD)) {
if (IOM_PORT_HPD_ASSERTED(port->iom_status) &&
(!(data->status & DP_STATUS_IRQ_HPD) &&
data->status & DP_STATUS_HPD_STATE))
return 0;
return pmc_usb_mux_dp_hpd(port, state->data);
}
req.usage = PMC_USB_ALT_MODE;
req.usage |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
req.mode_type = PMC_USB_MODE_TYPE_DP << PMC_USB_MODE_TYPE_SHIFT;
req.mode_data = (port->orientation - 1) << PMC_USB_ALTMODE_ORI_SHIFT;
req.mode_data |= (port->role - 1) << PMC_USB_ALTMODE_UFP_SHIFT;
req.mode_data |= (state->mode - TYPEC_STATE_MODAL) <<
PMC_USB_ALTMODE_DP_MODE_SHIFT;
ret = pmc_usb_command(port, (void *)&req, sizeof(req));
if (ret)
return ret;
if (data->status & (DP_STATUS_IRQ_HPD | DP_STATUS_HPD_STATE))
return pmc_usb_mux_dp_hpd(port, state->data);
return 0;
}
static int
pmc_usb_mux_tbt(struct pmc_usb_port *port, struct typec_mux_state *state)
{
struct typec_thunderbolt_data *data = state->data;
u8 cable_rounded = TBT_CABLE_ROUNDED_SUPPORT(data->cable_mode);
u8 cable_speed = TBT_CABLE_SPEED(data->cable_mode);
struct altmode_req req = { };
if (IOM_PORT_ACTIVITY_IS(port->iom_status, TBT) ||
IOM_PORT_ACTIVITY_IS(port->iom_status, ALT_MODE_TBT_USB))
return 0;
req.usage = PMC_USB_ALT_MODE;
req.usage |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
req.mode_type = PMC_USB_MODE_TYPE_TBT << PMC_USB_MODE_TYPE_SHIFT;
req.mode_data = (port->orientation - 1) << PMC_USB_ALTMODE_ORI_SHIFT;
req.mode_data |= (port->role - 1) << PMC_USB_ALTMODE_UFP_SHIFT;
if (TBT_ADAPTER(data->device_mode) == TBT_ADAPTER_TBT3)
req.mode_data |= PMC_USB_ALTMODE_TBT_TYPE;
if (data->cable_mode & TBT_CABLE_OPTICAL)
req.mode_data |= PMC_USB_ALTMODE_CABLE_TYPE;
if (data->cable_mode & TBT_CABLE_LINK_TRAINING)
req.mode_data |= PMC_USB_ALTMODE_ACTIVE_LINK;
if (acpi_dev_hid_uid_match(port->pmc->iom_adev, "INTC1072", NULL) ||
acpi_dev_hid_uid_match(port->pmc->iom_adev, "INTC1079", NULL)) {
if ((data->enter_vdo & TBT_ENTER_MODE_ACTIVE_CABLE) ||
(data->cable_mode & TBT_CABLE_RETIMER))
req.mode_data |= PMC_USB_ALTMODE_RETIMER_CABLE;
} else {
if (data->enter_vdo & TBT_ENTER_MODE_ACTIVE_CABLE)
req.mode_data |= PMC_USB_ALTMODE_ACTIVE_CABLE;
if (data->cable_mode & TBT_CABLE_RETIMER)
req.mode_data |= PMC_USB_ALTMODE_RETIMER_CABLE;
}
req.mode_data |= PMC_USB_ALTMODE_CABLE_SPD(cable_speed);
req.mode_data |= PMC_USB_ALTMODE_TBT_GEN(cable_rounded);
return pmc_usb_command(port, (void *)&req, sizeof(req));
}
static int
pmc_usb_mux_usb4(struct pmc_usb_port *port, struct typec_mux_state *state)
{
struct enter_usb_data *data = state->data;
struct altmode_req req = { };
u8 cable_speed;
if (IOM_PORT_ACTIVITY_IS(port->iom_status, TBT) ||
IOM_PORT_ACTIVITY_IS(port->iom_status, ALT_MODE_TBT_USB))
return 0;
req.usage = PMC_USB_ALT_MODE;
req.usage |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
req.mode_type = PMC_USB_MODE_TYPE_TBT << PMC_USB_MODE_TYPE_SHIFT;
/* USB4 Mode */
req.mode_data = PMC_USB_ALTMODE_FORCE_LSR;
if (data->active_link_training)
req.mode_data |= PMC_USB_ALTMODE_ACTIVE_LINK;
req.mode_data |= (port->orientation - 1) << PMC_USB_ALTMODE_ORI_SHIFT;
req.mode_data |= (port->role - 1) << PMC_USB_ALTMODE_UFP_SHIFT;
switch ((data->eudo & EUDO_CABLE_TYPE_MASK) >> EUDO_CABLE_TYPE_SHIFT) {
case EUDO_CABLE_TYPE_PASSIVE:
break;
case EUDO_CABLE_TYPE_OPTICAL:
req.mode_data |= PMC_USB_ALTMODE_CABLE_TYPE;
fallthrough;
case EUDO_CABLE_TYPE_RE_TIMER:
if (!acpi_dev_hid_uid_match(port->pmc->iom_adev, "INTC1072", NULL) ||
!acpi_dev_hid_uid_match(port->pmc->iom_adev, "INTC1079", NULL))
req.mode_data |= PMC_USB_ALTMODE_RETIMER_CABLE;
fallthrough;
default:
if (acpi_dev_hid_uid_match(port->pmc->iom_adev, "INTC1072", NULL) ||
acpi_dev_hid_uid_match(port->pmc->iom_adev, "INTC1079", NULL))
req.mode_data |= PMC_USB_ALTMODE_RETIMER_CABLE;
else
req.mode_data |= PMC_USB_ALTMODE_ACTIVE_CABLE;
/* Configure data rate to rounded in the case of Active TBT3
* and USB4 cables.
*/
req.mode_data |= PMC_USB_ALTMODE_TBT_GEN(1);
break;
}
cable_speed = (data->eudo & EUDO_CABLE_SPEED_MASK) >> EUDO_CABLE_SPEED_SHIFT;
req.mode_data |= PMC_USB_ALTMODE_CABLE_SPD(cable_speed);
return pmc_usb_command(port, (void *)&req, sizeof(req));
}
static int pmc_usb_mux_safe_state(struct pmc_usb_port *port,
struct typec_mux_state *state)
{
u8 msg;
if (IOM_PORT_ACTIVITY_IS(port->iom_status, SAFE_MODE))
return 0;
if ((IOM_PORT_ACTIVITY_IS(port->iom_status, DP) ||
IOM_PORT_ACTIVITY_IS(port->iom_status, DP_MFD)) &&
state->alt && state->alt->svid == USB_TYPEC_DP_SID)
return 0;
if ((IOM_PORT_ACTIVITY_IS(port->iom_status, TBT) ||
IOM_PORT_ACTIVITY_IS(port->iom_status, ALT_MODE_TBT_USB)) &&
state->alt && state->alt->svid == USB_TYPEC_TBT_SID)
return 0;
msg = PMC_USB_SAFE_MODE;
msg |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
return pmc_usb_command(port, &msg, sizeof(msg));
}
static int pmc_usb_disconnect(struct pmc_usb_port *port)
{
struct typec_displayport_data data = { };
u8 msg[2];
if (!(port->iom_status & IOM_PORT_STATUS_CONNECTED))
return 0;
/* Clear DisplayPort HPD if it's still asserted. */
if (IOM_PORT_HPD_ASSERTED(port->iom_status))
pmc_usb_mux_dp_hpd(port, &data);
msg[0] = PMC_USB_DISCONNECT;
msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
msg[1] = port->usb2_port << PMC_USB_MSG_USB2_PORT_SHIFT;
return pmc_usb_command(port, msg, sizeof(msg));
}
static int pmc_usb_connect(struct pmc_usb_port *port, enum usb_role role)
{
u8 ufp = role == USB_ROLE_DEVICE ? 1 : 0;
u8 msg[2];
int ret;
if (port->orientation == TYPEC_ORIENTATION_NONE)
return -EINVAL;
if (port->iom_status & IOM_PORT_STATUS_CONNECTED) {
if (port->role == role || port->role == USB_ROLE_NONE)
return 0;
/* Role swap */
ret = pmc_usb_disconnect(port);
if (ret)
return ret;
}
msg[0] = PMC_USB_CONNECT;
msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
msg[1] = port->usb2_port << PMC_USB_MSG_USB2_PORT_SHIFT;
msg[1] |= ufp << PMC_USB_MSG_UFP_SHIFT;
msg[1] |= hsl_orientation(port) << PMC_USB_MSG_ORI_HSL_SHIFT;
msg[1] |= sbu_orientation(port) << PMC_USB_MSG_ORI_AUX_SHIFT;
return pmc_usb_command(port, msg, sizeof(msg));
}
static int
pmc_usb_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state)
{
struct pmc_usb_port *port = typec_mux_get_drvdata(mux);
update_port_status(port);
if (port->orientation == TYPEC_ORIENTATION_NONE || port->role == USB_ROLE_NONE)
return 0;
if (state->mode == TYPEC_STATE_SAFE)
return pmc_usb_mux_safe_state(port, state);
if (state->mode == TYPEC_STATE_USB)
return pmc_usb_connect(port, port->role);
if (state->alt) {
switch (state->alt->svid) {
case USB_TYPEC_TBT_SID:
return pmc_usb_mux_tbt(port, state);
case USB_TYPEC_DP_SID:
return pmc_usb_mux_dp(port, state);
}
} else {
switch (state->mode) {
case TYPEC_MODE_USB2:
/* REVISIT: Try with usb3_port set to 0? */
break;
case TYPEC_MODE_USB3:
return pmc_usb_connect(port, port->role);
case TYPEC_MODE_USB4:
return pmc_usb_mux_usb4(port, state);
}
}
return -EOPNOTSUPP;
}
static int pmc_usb_set_orientation(struct typec_switch_dev *sw,
enum typec_orientation orientation)
{
struct pmc_usb_port *port = typec_switch_get_drvdata(sw);
update_port_status(port);
port->orientation = orientation;
return 0;
}
static int pmc_usb_set_role(struct usb_role_switch *sw, enum usb_role role)
{
struct pmc_usb_port *port = usb_role_switch_get_drvdata(sw);
int ret;
update_port_status(port);
if (role == USB_ROLE_NONE)
ret = pmc_usb_disconnect(port);
else
ret = pmc_usb_connect(port, role);
port->role = role;
return ret;
}
static int pmc_usb_register_port(struct pmc_usb *pmc, int index,
struct fwnode_handle *fwnode)
{
struct pmc_usb_port *port = &pmc->port[index];
struct usb_role_switch_desc desc = { };
struct typec_switch_desc sw_desc = { };
struct typec_mux_desc mux_desc = { };
const char *str;
int ret;
ret = fwnode_property_read_u8(fwnode, "usb2-port-number", &port->usb2_port);
if (ret)
return ret;
ret = fwnode_property_read_u8(fwnode, "usb3-port-number", &port->usb3_port);
if (ret)
return ret;
ret = fwnode_property_read_string(fwnode, "sbu-orientation", &str);
if (!ret)
port->sbu_orientation = typec_find_orientation(str);
ret = fwnode_property_read_string(fwnode, "hsl-orientation", &str);
if (!ret)
port->hsl_orientation = typec_find_orientation(str);
port->num = index;
port->pmc = pmc;
sw_desc.fwnode = fwnode;
sw_desc.drvdata = port;
sw_desc.name = fwnode_get_name(fwnode);
sw_desc.set = pmc_usb_set_orientation;
port->typec_sw = typec_switch_register(pmc->dev, &sw_desc);
if (IS_ERR(port->typec_sw))
return PTR_ERR(port->typec_sw);
mux_desc.fwnode = fwnode;
mux_desc.drvdata = port;
mux_desc.name = fwnode_get_name(fwnode);
mux_desc.set = pmc_usb_mux_set;
port->typec_mux = typec_mux_register(pmc->dev, &mux_desc);
if (IS_ERR(port->typec_mux)) {
ret = PTR_ERR(port->typec_mux);
goto err_unregister_switch;
}
desc.fwnode = fwnode;
desc.driver_data = port;
desc.name = fwnode_get_name(fwnode);
desc.set = pmc_usb_set_role;
port->usb_sw = usb_role_switch_register(pmc->dev, &desc);
if (IS_ERR(port->usb_sw)) {
ret = PTR_ERR(port->usb_sw);
goto err_unregister_mux;
}
return 0;
err_unregister_mux:
typec_mux_unregister(port->typec_mux);
err_unregister_switch:
typec_switch_unregister(port->typec_sw);
return ret;
}
/* IOM ACPI IDs and IOM_PORT_STATUS_OFFSET */
static const struct acpi_device_id iom_acpi_ids[] = {
/* TigerLake */
{ "INTC1072", IOM_PORT_STATUS_REGS(0x560, IOM_PORT_STATUS_REGS_SZ_4) },
/* AlderLake */
{ "INTC1079", IOM_PORT_STATUS_REGS(0x160, IOM_PORT_STATUS_REGS_SZ_4) },
/* Meteor Lake */
{ "INTC107A", IOM_PORT_STATUS_REGS(0x160, IOM_PORT_STATUS_REGS_SZ_4) },
/* Lunar Lake */
{ "INTC10EA", IOM_PORT_STATUS_REGS(0x150, IOM_PORT_STATUS_REGS_SZ_8) },
{}
};
static int pmc_usb_probe_iom(struct pmc_usb *pmc)
{
struct list_head resource_list;
struct resource_entry *rentry;
static const struct acpi_device_id *dev_id;
struct acpi_device *adev = NULL;
int ret;
for (dev_id = &iom_acpi_ids[0]; dev_id->id[0]; dev_id++) {
adev = acpi_dev_get_first_match_dev(dev_id->id, NULL, -1);
if (adev)
break;
}
if (!adev)
return -ENODEV;
pmc->iom_port_status_offset = IOM_PORT_STATUS_REGS_OFFSET(dev_id->driver_data);
pmc->iom_port_status_size = IOM_PORT_STATUS_REGS_SIZE(dev_id->driver_data);
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_memory_resources(adev, &resource_list);
if (ret < 0) {
acpi_dev_put(adev);
return ret;
}
rentry = list_first_entry_or_null(&resource_list, struct resource_entry, node);
if (rentry)
pmc->iom_base = devm_ioremap_resource(pmc->dev, rentry->res);
acpi_dev_free_resource_list(&resource_list);
if (!pmc->iom_base) {
acpi_dev_put(adev);
return -ENOMEM;
}
if (IS_ERR(pmc->iom_base)) {
acpi_dev_put(adev);
return PTR_ERR(pmc->iom_base);
}
pmc->iom_adev = adev;
return 0;
}
static int port_iom_status_show(struct seq_file *s, void *unused)
{
struct pmc_usb_port *port = s->private;
update_port_status(port);
seq_printf(s, "0x%08x\n", port->iom_status);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(port_iom_status);
static void pmc_mux_port_debugfs_init(struct pmc_usb_port *port)
{
struct dentry *debugfs_dir;
char name[6];
snprintf(name, sizeof(name), "port%d", port->usb3_port - 1);
debugfs_dir = debugfs_create_dir(name, port->pmc->dentry);
debugfs_create_file("iom_status", 0400, debugfs_dir, port,
&port_iom_status_fops);
}
static int pmc_usb_probe(struct platform_device *pdev)
{
struct fwnode_handle *fwnode = NULL;
struct pmc_usb *pmc;
int i = 0;
int ret;
pmc = devm_kzalloc(&pdev->dev, sizeof(*pmc), GFP_KERNEL);
if (!pmc)
return -ENOMEM;
device_for_each_child_node(&pdev->dev, fwnode)
pmc->num_ports++;
/* The IOM microcontroller has a limitation of max 4 ports. */
if (pmc->num_ports > 4) {
dev_err(&pdev->dev, "driver limited to 4 ports\n");
return -ERANGE;
}
pmc->port = devm_kcalloc(&pdev->dev, pmc->num_ports,
sizeof(struct pmc_usb_port), GFP_KERNEL);
if (!pmc->port)
return -ENOMEM;
pmc->ipc = devm_intel_scu_ipc_dev_get(&pdev->dev);
if (!pmc->ipc)
return -ENODEV;
pmc->dev = &pdev->dev;
ret = pmc_usb_probe_iom(pmc);
if (ret)
return ret;
pmc->dentry = debugfs_create_dir(dev_name(pmc->dev), pmc_mux_debugfs_root);
/*
* For every physical USB connector (USB2 and USB3 combo) there is a
* child ACPI device node under the PMC mux ACPI device object.
*/
for (i = 0; i < pmc->num_ports; i++) {
fwnode = device_get_next_child_node(pmc->dev, fwnode);
if (!fwnode)
break;
ret = pmc_usb_register_port(pmc, i, fwnode);
if (ret) {
fwnode_handle_put(fwnode);
goto err_remove_ports;
}
pmc_mux_port_debugfs_init(&pmc->port[i]);
}
platform_set_drvdata(pdev, pmc);
return 0;
err_remove_ports:
for (i = 0; i < pmc->num_ports; i++) {
typec_switch_unregister(pmc->port[i].typec_sw);
typec_mux_unregister(pmc->port[i].typec_mux);
usb_role_switch_unregister(pmc->port[i].usb_sw);
}
acpi_dev_put(pmc->iom_adev);
debugfs_remove(pmc->dentry);
return ret;
}
static void pmc_usb_remove(struct platform_device *pdev)
{
struct pmc_usb *pmc = platform_get_drvdata(pdev);
int i;
for (i = 0; i < pmc->num_ports; i++) {
typec_switch_unregister(pmc->port[i].typec_sw);
typec_mux_unregister(pmc->port[i].typec_mux);
usb_role_switch_unregister(pmc->port[i].usb_sw);
}
acpi_dev_put(pmc->iom_adev);
debugfs_remove(pmc->dentry);
}
static const struct acpi_device_id pmc_usb_acpi_ids[] = {
{ "INTC105C", },
{ }
};
MODULE_DEVICE_TABLE(acpi, pmc_usb_acpi_ids);
static struct platform_driver pmc_usb_driver = {
.driver = {
.name = "intel_pmc_usb",
.acpi_match_table = ACPI_PTR(pmc_usb_acpi_ids),
},
.probe = pmc_usb_probe,
.remove_new = pmc_usb_remove,
};
static int __init pmc_usb_init(void)
{
pmc_mux_debugfs_root = debugfs_create_dir("intel_pmc_mux", usb_debug_root);
return platform_driver_register(&pmc_usb_driver);
}
module_init(pmc_usb_init);
static void __exit pmc_usb_exit(void)
{
platform_driver_unregister(&pmc_usb_driver);
debugfs_remove(pmc_mux_debugfs_root);
}
module_exit(pmc_usb_exit);
MODULE_AUTHOR("Heikki Krogerus <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Intel PMC USB mux control");
| linux-master | drivers/usb/typec/mux/intel_pmc_mux.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* OnSemi NB7VPQ904M Type-C driver
*
* Copyright (C) 2023 Dmitry Baryshkov <[email protected]>
*/
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/bitfield.h>
#include <linux/of_graph.h>
#include <drm/drm_bridge.h>
#include <linux/usb/typec_dp.h>
#include <linux/usb/typec_mux.h>
#include <linux/usb/typec_retimer.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
#define NB7_CHNA 0
#define NB7_CHNB 1
#define NB7_CHNC 2
#define NB7_CHND 3
#define NB7_IS_CHAN_AD(channel) (channel == NB7_CHNA || channel == NB7_CHND)
#define GEN_DEV_SET_REG 0x00
#define GEN_DEV_SET_CHIP_EN BIT(0)
#define GEN_DEV_SET_CHNA_EN BIT(4)
#define GEN_DEV_SET_CHNB_EN BIT(5)
#define GEN_DEV_SET_CHNC_EN BIT(6)
#define GEN_DEV_SET_CHND_EN BIT(7)
#define GEN_DEV_SET_OP_MODE_MASK GENMASK(3, 1)
#define GEN_DEV_SET_OP_MODE_DP_CC2 0
#define GEN_DEV_SET_OP_MODE_DP_CC1 1
#define GEN_DEV_SET_OP_MODE_DP_4LANE 2
#define GEN_DEV_SET_OP_MODE_USB 5
#define EQ_SETTING_REG_BASE 0x01
#define EQ_SETTING_REG(n) (EQ_SETTING_REG_BASE + (n) * 2)
#define EQ_SETTING_MASK GENMASK(3, 1)
#define OUTPUT_COMPRESSION_AND_POL_REG_BASE 0x02
#define OUTPUT_COMPRESSION_AND_POL_REG(n) (OUTPUT_COMPRESSION_AND_POL_REG_BASE + (n) * 2)
#define OUTPUT_COMPRESSION_MASK GENMASK(2, 1)
#define FLAT_GAIN_REG_BASE 0x18
#define FLAT_GAIN_REG(n) (FLAT_GAIN_REG_BASE + (n) * 2)
#define FLAT_GAIN_MASK GENMASK(1, 0)
#define LOSS_MATCH_REG_BASE 0x19
#define LOSS_MATCH_REG(n) (LOSS_MATCH_REG_BASE + (n) * 2)
#define LOSS_MATCH_MASK GENMASK(1, 0)
#define AUX_CC_REG 0x09
#define CHIP_VERSION_REG 0x17
struct nb7vpq904m {
struct i2c_client *client;
struct gpio_desc *enable_gpio;
struct regulator *vcc_supply;
struct regmap *regmap;
struct typec_switch_dev *sw;
struct typec_retimer *retimer;
bool swap_data_lanes;
struct typec_switch *typec_switch;
struct drm_bridge bridge;
struct mutex lock; /* protect non-concurrent retimer & switch */
enum typec_orientation orientation;
unsigned long mode;
unsigned int svid;
};
static void nb7vpq904m_set_channel(struct nb7vpq904m *nb7, unsigned int channel, bool dp)
{
u8 eq, out_comp, flat_gain, loss_match;
if (dp) {
eq = NB7_IS_CHAN_AD(channel) ? 0x6 : 0x4;
out_comp = 0x3;
flat_gain = NB7_IS_CHAN_AD(channel) ? 0x2 : 0x1;
loss_match = 0x3;
} else {
eq = 0x4;
out_comp = 0x3;
flat_gain = NB7_IS_CHAN_AD(channel) ? 0x3 : 0x1;
loss_match = NB7_IS_CHAN_AD(channel) ? 0x1 : 0x3;
}
regmap_update_bits(nb7->regmap, EQ_SETTING_REG(channel),
EQ_SETTING_MASK, FIELD_PREP(EQ_SETTING_MASK, eq));
regmap_update_bits(nb7->regmap, OUTPUT_COMPRESSION_AND_POL_REG(channel),
OUTPUT_COMPRESSION_MASK, FIELD_PREP(OUTPUT_COMPRESSION_MASK, out_comp));
regmap_update_bits(nb7->regmap, FLAT_GAIN_REG(channel),
FLAT_GAIN_MASK, FIELD_PREP(FLAT_GAIN_MASK, flat_gain));
regmap_update_bits(nb7->regmap, LOSS_MATCH_REG(channel),
LOSS_MATCH_MASK, FIELD_PREP(LOSS_MATCH_MASK, loss_match));
}
static int nb7vpq904m_set(struct nb7vpq904m *nb7)
{
bool reverse = (nb7->orientation == TYPEC_ORIENTATION_REVERSE);
switch (nb7->mode) {
case TYPEC_STATE_SAFE:
regmap_write(nb7->regmap, GEN_DEV_SET_REG,
GEN_DEV_SET_CHIP_EN |
GEN_DEV_SET_CHNA_EN |
GEN_DEV_SET_CHNB_EN |
GEN_DEV_SET_CHNC_EN |
GEN_DEV_SET_CHND_EN |
FIELD_PREP(GEN_DEV_SET_OP_MODE_MASK,
GEN_DEV_SET_OP_MODE_USB));
nb7vpq904m_set_channel(nb7, NB7_CHNA, false);
nb7vpq904m_set_channel(nb7, NB7_CHNB, false);
nb7vpq904m_set_channel(nb7, NB7_CHNC, false);
nb7vpq904m_set_channel(nb7, NB7_CHND, false);
regmap_write(nb7->regmap, AUX_CC_REG, 0x2);
return 0;
case TYPEC_STATE_USB:
/*
* Normal Orientation (CC1)
* A -> USB RX
* B -> USB TX
* C -> X
* D -> X
* Flipped Orientation (CC2)
* A -> X
* B -> X
* C -> USB TX
* D -> USB RX
*
* Reversed if data lanes are swapped
*/
if (reverse ^ nb7->swap_data_lanes) {
regmap_write(nb7->regmap, GEN_DEV_SET_REG,
GEN_DEV_SET_CHIP_EN |
GEN_DEV_SET_CHNA_EN |
GEN_DEV_SET_CHNB_EN |
FIELD_PREP(GEN_DEV_SET_OP_MODE_MASK,
GEN_DEV_SET_OP_MODE_USB));
nb7vpq904m_set_channel(nb7, NB7_CHNA, false);
nb7vpq904m_set_channel(nb7, NB7_CHNB, false);
} else {
regmap_write(nb7->regmap, GEN_DEV_SET_REG,
GEN_DEV_SET_CHIP_EN |
GEN_DEV_SET_CHNC_EN |
GEN_DEV_SET_CHND_EN |
FIELD_PREP(GEN_DEV_SET_OP_MODE_MASK,
GEN_DEV_SET_OP_MODE_USB));
nb7vpq904m_set_channel(nb7, NB7_CHNC, false);
nb7vpq904m_set_channel(nb7, NB7_CHND, false);
}
regmap_write(nb7->regmap, AUX_CC_REG, 0x2);
return 0;
default:
if (nb7->svid != USB_TYPEC_DP_SID)
return -EINVAL;
break;
}
/* DP Altmode Setup */
regmap_write(nb7->regmap, AUX_CC_REG, reverse ? 0x1 : 0x0);
switch (nb7->mode) {
case TYPEC_DP_STATE_C:
case TYPEC_DP_STATE_E:
/*
* Normal Orientation (CC1)
* A -> DP3
* B -> DP2
* C -> DP1
* D -> DP0
* Flipped Orientation (CC2)
* A -> DP0
* B -> DP1
* C -> DP2
* D -> DP3
*/
regmap_write(nb7->regmap, GEN_DEV_SET_REG,
GEN_DEV_SET_CHIP_EN |
GEN_DEV_SET_CHNA_EN |
GEN_DEV_SET_CHNB_EN |
GEN_DEV_SET_CHNC_EN |
GEN_DEV_SET_CHND_EN |
FIELD_PREP(GEN_DEV_SET_OP_MODE_MASK,
GEN_DEV_SET_OP_MODE_DP_4LANE));
nb7vpq904m_set_channel(nb7, NB7_CHNA, true);
nb7vpq904m_set_channel(nb7, NB7_CHNB, true);
nb7vpq904m_set_channel(nb7, NB7_CHNC, true);
nb7vpq904m_set_channel(nb7, NB7_CHND, true);
break;
case TYPEC_DP_STATE_D:
case TYPEC_DP_STATE_F:
regmap_write(nb7->regmap, GEN_DEV_SET_REG,
GEN_DEV_SET_CHIP_EN |
GEN_DEV_SET_CHNA_EN |
GEN_DEV_SET_CHNB_EN |
GEN_DEV_SET_CHNC_EN |
GEN_DEV_SET_CHND_EN |
FIELD_PREP(GEN_DEV_SET_OP_MODE_MASK,
reverse ^ nb7->swap_data_lanes ?
GEN_DEV_SET_OP_MODE_DP_CC2
: GEN_DEV_SET_OP_MODE_DP_CC1));
/*
* Normal Orientation (CC1)
* A -> USB RX
* B -> USB TX
* C -> DP1
* D -> DP0
* Flipped Orientation (CC2)
* A -> DP0
* B -> DP1
* C -> USB TX
* D -> USB RX
*
* Reversed if data lanes are swapped
*/
if (nb7->swap_data_lanes) {
nb7vpq904m_set_channel(nb7, NB7_CHNA, !reverse);
nb7vpq904m_set_channel(nb7, NB7_CHNB, !reverse);
nb7vpq904m_set_channel(nb7, NB7_CHNC, reverse);
nb7vpq904m_set_channel(nb7, NB7_CHND, reverse);
} else {
nb7vpq904m_set_channel(nb7, NB7_CHNA, reverse);
nb7vpq904m_set_channel(nb7, NB7_CHNB, reverse);
nb7vpq904m_set_channel(nb7, NB7_CHNC, !reverse);
nb7vpq904m_set_channel(nb7, NB7_CHND, !reverse);
}
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int nb7vpq904m_sw_set(struct typec_switch_dev *sw, enum typec_orientation orientation)
{
struct nb7vpq904m *nb7 = typec_switch_get_drvdata(sw);
int ret;
ret = typec_switch_set(nb7->typec_switch, orientation);
if (ret)
return ret;
mutex_lock(&nb7->lock);
if (nb7->orientation != orientation) {
nb7->orientation = orientation;
ret = nb7vpq904m_set(nb7);
}
mutex_unlock(&nb7->lock);
return ret;
}
static int nb7vpq904m_retimer_set(struct typec_retimer *retimer, struct typec_retimer_state *state)
{
struct nb7vpq904m *nb7 = typec_retimer_get_drvdata(retimer);
int ret = 0;
mutex_lock(&nb7->lock);
if (nb7->mode != state->mode) {
nb7->mode = state->mode;
if (state->alt)
nb7->svid = state->alt->svid;
else
nb7->svid = 0; // No SVID
ret = nb7vpq904m_set(nb7);
}
mutex_unlock(&nb7->lock);
return ret;
}
#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE)
static int nb7vpq904m_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct nb7vpq904m *nb7 = container_of(bridge, struct nb7vpq904m, bridge);
struct drm_bridge *next_bridge;
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
next_bridge = devm_drm_of_get_bridge(&nb7->client->dev, nb7->client->dev.of_node, 0, 0);
if (IS_ERR(next_bridge)) {
dev_err(&nb7->client->dev, "failed to acquire drm_bridge: %pe\n", next_bridge);
return PTR_ERR(next_bridge);
}
return drm_bridge_attach(bridge->encoder, next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
static const struct drm_bridge_funcs nb7vpq904m_bridge_funcs = {
.attach = nb7vpq904m_bridge_attach,
};
static int nb7vpq904m_register_bridge(struct nb7vpq904m *nb7)
{
nb7->bridge.funcs = &nb7vpq904m_bridge_funcs;
nb7->bridge.of_node = nb7->client->dev.of_node;
return devm_drm_bridge_add(&nb7->client->dev, &nb7->bridge);
}
#else
static int nb7vpq904m_register_bridge(struct nb7vpq904m *nb7)
{
return 0;
}
#endif
static const struct regmap_config nb7_regmap = {
.max_register = 0x1f,
.reg_bits = 8,
.val_bits = 8,
};
enum {
NORMAL_LANE_MAPPING,
INVERT_LANE_MAPPING,
};
#define DATA_LANES_COUNT 4
static const int supported_data_lane_mapping[][DATA_LANES_COUNT] = {
[NORMAL_LANE_MAPPING] = { 0, 1, 2, 3 },
[INVERT_LANE_MAPPING] = { 3, 2, 1, 0 },
};
static int nb7vpq904m_parse_data_lanes_mapping(struct nb7vpq904m *nb7)
{
struct device_node *ep;
u32 data_lanes[4];
int ret, i, j;
ep = of_graph_get_endpoint_by_regs(nb7->client->dev.of_node, 1, 0);
if (ep) {
ret = of_property_count_u32_elems(ep, "data-lanes");
if (ret == -EINVAL)
/* Property isn't here, consider default mapping */
goto out_done;
if (ret < 0)
goto out_error;
if (ret != DATA_LANES_COUNT) {
dev_err(&nb7->client->dev, "expected 4 data lanes\n");
ret = -EINVAL;
goto out_error;
}
ret = of_property_read_u32_array(ep, "data-lanes", data_lanes, DATA_LANES_COUNT);
if (ret)
goto out_error;
for (i = 0; i < ARRAY_SIZE(supported_data_lane_mapping); i++) {
for (j = 0; j < DATA_LANES_COUNT; j++) {
if (data_lanes[j] != supported_data_lane_mapping[i][j])
break;
}
if (j == DATA_LANES_COUNT)
break;
}
switch (i) {
case NORMAL_LANE_MAPPING:
break;
case INVERT_LANE_MAPPING:
nb7->swap_data_lanes = true;
dev_info(&nb7->client->dev, "using inverted data lanes mapping\n");
break;
default:
dev_err(&nb7->client->dev, "invalid data lanes mapping\n");
ret = -EINVAL;
goto out_error;
}
}
out_done:
ret = 0;
out_error:
of_node_put(ep);
return ret;
}
static int nb7vpq904m_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct typec_switch_desc sw_desc = { };
struct typec_retimer_desc retimer_desc = { };
struct nb7vpq904m *nb7;
int ret;
nb7 = devm_kzalloc(dev, sizeof(*nb7), GFP_KERNEL);
if (!nb7)
return -ENOMEM;
nb7->client = client;
nb7->regmap = devm_regmap_init_i2c(client, &nb7_regmap);
if (IS_ERR(nb7->regmap)) {
dev_err(&client->dev, "Failed to allocate register map\n");
return PTR_ERR(nb7->regmap);
}
nb7->mode = TYPEC_STATE_SAFE;
nb7->orientation = TYPEC_ORIENTATION_NONE;
mutex_init(&nb7->lock);
nb7->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(nb7->enable_gpio))
return dev_err_probe(dev, PTR_ERR(nb7->enable_gpio),
"unable to acquire enable gpio\n");
nb7->vcc_supply = devm_regulator_get_optional(dev, "vcc");
if (IS_ERR(nb7->vcc_supply))
return PTR_ERR(nb7->vcc_supply);
nb7->typec_switch = fwnode_typec_switch_get(dev->fwnode);
if (IS_ERR(nb7->typec_switch))
return dev_err_probe(dev, PTR_ERR(nb7->typec_switch),
"failed to acquire orientation-switch\n");
ret = nb7vpq904m_parse_data_lanes_mapping(nb7);
if (ret)
return ret;
ret = regulator_enable(nb7->vcc_supply);
if (ret)
dev_warn(dev, "Failed to enable vcc: %d\n", ret);
gpiod_set_value(nb7->enable_gpio, 1);
ret = nb7vpq904m_register_bridge(nb7);
if (ret)
goto err_disable_gpio;
sw_desc.drvdata = nb7;
sw_desc.fwnode = dev->fwnode;
sw_desc.set = nb7vpq904m_sw_set;
nb7->sw = typec_switch_register(dev, &sw_desc);
if (IS_ERR(nb7->sw)) {
ret = dev_err_probe(dev, PTR_ERR(nb7->sw),
"Error registering typec switch\n");
goto err_disable_gpio;
}
retimer_desc.drvdata = nb7;
retimer_desc.fwnode = dev->fwnode;
retimer_desc.set = nb7vpq904m_retimer_set;
nb7->retimer = typec_retimer_register(dev, &retimer_desc);
if (IS_ERR(nb7->retimer)) {
ret = dev_err_probe(dev, PTR_ERR(nb7->retimer),
"Error registering typec retimer\n");
goto err_switch_unregister;
}
return 0;
err_switch_unregister:
typec_switch_unregister(nb7->sw);
err_disable_gpio:
gpiod_set_value(nb7->enable_gpio, 0);
regulator_disable(nb7->vcc_supply);
return ret;
}
static void nb7vpq904m_remove(struct i2c_client *client)
{
struct nb7vpq904m *nb7 = i2c_get_clientdata(client);
typec_retimer_unregister(nb7->retimer);
typec_switch_unregister(nb7->sw);
gpiod_set_value(nb7->enable_gpio, 0);
regulator_disable(nb7->vcc_supply);
}
static const struct i2c_device_id nb7vpq904m_table[] = {
{ "nb7vpq904m" },
{ }
};
MODULE_DEVICE_TABLE(i2c, nb7vpq904m_table);
static const struct of_device_id nb7vpq904m_of_table[] = {
{ .compatible = "onnn,nb7vpq904m" },
{ }
};
MODULE_DEVICE_TABLE(of, nb7vpq904m_of_table);
static struct i2c_driver nb7vpq904m_driver = {
.driver = {
.name = "nb7vpq904m",
.of_match_table = nb7vpq904m_of_table,
},
.probe = nb7vpq904m_probe,
.remove = nb7vpq904m_remove,
.id_table = nb7vpq904m_table,
};
module_i2c_driver(nb7vpq904m_driver);
MODULE_AUTHOR("Dmitry Baryshkov <[email protected]>");
MODULE_DESCRIPTION("OnSemi NB7VPQ904M Type-C driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/typec/mux/nb7vpq904m.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021-2022 Linaro Ltd.
* Copyright (C) 2018-2020 The Linux Foundation
*/
#include <linux/bits.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/regmap.h>
#include <linux/usb/typec_dp.h>
#include <linux/usb/typec_mux.h>
#define FSA4480_SWITCH_ENABLE 0x04
#define FSA4480_SWITCH_SELECT 0x05
#define FSA4480_SWITCH_STATUS1 0x07
#define FSA4480_SLOW_L 0x08
#define FSA4480_SLOW_R 0x09
#define FSA4480_SLOW_MIC 0x0a
#define FSA4480_SLOW_SENSE 0x0b
#define FSA4480_SLOW_GND 0x0c
#define FSA4480_DELAY_L_R 0x0d
#define FSA4480_DELAY_L_MIC 0x0e
#define FSA4480_DELAY_L_SENSE 0x0f
#define FSA4480_DELAY_L_AGND 0x10
#define FSA4480_FUNCTION_ENABLE 0x12
#define FSA4480_RESET 0x1e
#define FSA4480_MAX_REGISTER 0x1f
#define FSA4480_ENABLE_DEVICE BIT(7)
#define FSA4480_ENABLE_SBU GENMASK(6, 5)
#define FSA4480_ENABLE_USB GENMASK(4, 3)
#define FSA4480_ENABLE_SENSE BIT(2)
#define FSA4480_ENABLE_MIC BIT(1)
#define FSA4480_ENABLE_AGND BIT(0)
#define FSA4480_SEL_SBU_REVERSE GENMASK(6, 5)
#define FSA4480_SEL_USB GENMASK(4, 3)
#define FSA4480_SEL_SENSE BIT(2)
#define FSA4480_SEL_MIC BIT(1)
#define FSA4480_SEL_AGND BIT(0)
#define FSA4480_ENABLE_AUTO_JACK_DETECT BIT(0)
struct fsa4480 {
struct i2c_client *client;
/* used to serialize concurrent change requests */
struct mutex lock;
struct typec_switch_dev *sw;
struct typec_mux_dev *mux;
struct regmap *regmap;
enum typec_orientation orientation;
unsigned long mode;
unsigned int svid;
u8 cur_enable;
};
static const struct regmap_config fsa4480_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = FSA4480_MAX_REGISTER,
/* Accesses only done under fsa4480->lock */
.disable_locking = true,
};
static int fsa4480_set(struct fsa4480 *fsa)
{
bool reverse = (fsa->orientation == TYPEC_ORIENTATION_REVERSE);
u8 enable = FSA4480_ENABLE_DEVICE;
u8 sel = 0;
/* USB Mode */
if (fsa->mode < TYPEC_STATE_MODAL ||
(!fsa->svid && (fsa->mode == TYPEC_MODE_USB2 ||
fsa->mode == TYPEC_MODE_USB3))) {
enable |= FSA4480_ENABLE_USB;
sel = FSA4480_SEL_USB;
} else if (fsa->svid) {
switch (fsa->mode) {
/* DP Only */
case TYPEC_DP_STATE_C:
case TYPEC_DP_STATE_E:
enable |= FSA4480_ENABLE_SBU;
if (reverse)
sel = FSA4480_SEL_SBU_REVERSE;
break;
/* DP + USB */
case TYPEC_DP_STATE_D:
case TYPEC_DP_STATE_F:
enable |= FSA4480_ENABLE_USB | FSA4480_ENABLE_SBU;
sel = FSA4480_SEL_USB;
if (reverse)
sel |= FSA4480_SEL_SBU_REVERSE;
break;
default:
return -EOPNOTSUPP;
}
} else if (fsa->mode == TYPEC_MODE_AUDIO) {
/* Audio Accessory Mode, setup to auto Jack Detection */
enable |= FSA4480_ENABLE_USB | FSA4480_ENABLE_AGND;
} else
return -EOPNOTSUPP;
if (fsa->cur_enable & FSA4480_ENABLE_SBU) {
/* Disable SBU output while re-configuring the switch */
regmap_write(fsa->regmap, FSA4480_SWITCH_ENABLE,
fsa->cur_enable & ~FSA4480_ENABLE_SBU);
/* 35us to allow the SBU switch to turn off */
usleep_range(35, 1000);
}
regmap_write(fsa->regmap, FSA4480_SWITCH_SELECT, sel);
regmap_write(fsa->regmap, FSA4480_SWITCH_ENABLE, enable);
/* Start AUDIO JACK DETECTION to setup MIC, AGND & Sense muxes */
if (enable & FSA4480_ENABLE_AGND)
regmap_write(fsa->regmap, FSA4480_FUNCTION_ENABLE,
FSA4480_ENABLE_AUTO_JACK_DETECT);
if (enable & FSA4480_ENABLE_SBU) {
/* 15us to allow the SBU switch to turn on again */
usleep_range(15, 1000);
}
fsa->cur_enable = enable;
return 0;
}
static int fsa4480_switch_set(struct typec_switch_dev *sw,
enum typec_orientation orientation)
{
struct fsa4480 *fsa = typec_switch_get_drvdata(sw);
int ret = 0;
mutex_lock(&fsa->lock);
if (fsa->orientation != orientation) {
fsa->orientation = orientation;
ret = fsa4480_set(fsa);
}
mutex_unlock(&fsa->lock);
return ret;
}
static int fsa4480_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state)
{
struct fsa4480 *fsa = typec_mux_get_drvdata(mux);
int ret = 0;
mutex_lock(&fsa->lock);
if (fsa->mode != state->mode) {
fsa->mode = state->mode;
if (state->alt)
fsa->svid = state->alt->svid;
else
fsa->svid = 0; // No SVID
ret = fsa4480_set(fsa);
}
mutex_unlock(&fsa->lock);
return ret;
}
static int fsa4480_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct typec_switch_desc sw_desc = { };
struct typec_mux_desc mux_desc = { };
struct fsa4480 *fsa;
fsa = devm_kzalloc(dev, sizeof(*fsa), GFP_KERNEL);
if (!fsa)
return -ENOMEM;
fsa->client = client;
mutex_init(&fsa->lock);
fsa->regmap = devm_regmap_init_i2c(client, &fsa4480_regmap_config);
if (IS_ERR(fsa->regmap))
return dev_err_probe(dev, PTR_ERR(fsa->regmap), "failed to initialize regmap\n");
/* Safe mode */
fsa->cur_enable = FSA4480_ENABLE_DEVICE | FSA4480_ENABLE_USB;
fsa->mode = TYPEC_STATE_SAFE;
fsa->orientation = TYPEC_ORIENTATION_NONE;
/* set default settings */
regmap_write(fsa->regmap, FSA4480_SLOW_L, 0x00);
regmap_write(fsa->regmap, FSA4480_SLOW_R, 0x00);
regmap_write(fsa->regmap, FSA4480_SLOW_MIC, 0x00);
regmap_write(fsa->regmap, FSA4480_SLOW_SENSE, 0x00);
regmap_write(fsa->regmap, FSA4480_SLOW_GND, 0x00);
regmap_write(fsa->regmap, FSA4480_DELAY_L_R, 0x00);
regmap_write(fsa->regmap, FSA4480_DELAY_L_MIC, 0x00);
regmap_write(fsa->regmap, FSA4480_DELAY_L_SENSE, 0x00);
regmap_write(fsa->regmap, FSA4480_DELAY_L_AGND, 0x09);
regmap_write(fsa->regmap, FSA4480_SWITCH_SELECT, FSA4480_SEL_USB);
regmap_write(fsa->regmap, FSA4480_SWITCH_ENABLE, fsa->cur_enable);
sw_desc.drvdata = fsa;
sw_desc.fwnode = dev_fwnode(dev);
sw_desc.set = fsa4480_switch_set;
fsa->sw = typec_switch_register(dev, &sw_desc);
if (IS_ERR(fsa->sw))
return dev_err_probe(dev, PTR_ERR(fsa->sw), "failed to register typec switch\n");
mux_desc.drvdata = fsa;
mux_desc.fwnode = dev_fwnode(dev);
mux_desc.set = fsa4480_mux_set;
fsa->mux = typec_mux_register(dev, &mux_desc);
if (IS_ERR(fsa->mux)) {
typec_switch_unregister(fsa->sw);
return dev_err_probe(dev, PTR_ERR(fsa->mux), "failed to register typec mux\n");
}
i2c_set_clientdata(client, fsa);
return 0;
}
static void fsa4480_remove(struct i2c_client *client)
{
struct fsa4480 *fsa = i2c_get_clientdata(client);
typec_mux_unregister(fsa->mux);
typec_switch_unregister(fsa->sw);
}
static const struct i2c_device_id fsa4480_table[] = {
{ "fsa4480" },
{ }
};
MODULE_DEVICE_TABLE(i2c, fsa4480_table);
static const struct of_device_id fsa4480_of_table[] = {
{ .compatible = "fcs,fsa4480" },
{ }
};
MODULE_DEVICE_TABLE(of, fsa4480_of_table);
static struct i2c_driver fsa4480_driver = {
.driver = {
.name = "fsa4480",
.of_match_table = fsa4480_of_table,
},
.probe = fsa4480_probe,
.remove = fsa4480_remove,
.id_table = fsa4480_table,
};
module_i2c_driver(fsa4480_driver);
MODULE_DESCRIPTION("ON Semiconductor FSA4480 driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/typec/mux/fsa4480.c |
// SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
/*
* UCSI driver for STMicroelectronics STM32G0 Type-C PD controller
*
* Copyright (C) 2022, STMicroelectronics - All Rights Reserved
* Author: Fabrice Gasnier <[email protected]>.
*/
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <asm/unaligned.h>
#include "ucsi.h"
/* STM32G0 I2C bootloader addr: 0b1010001x (See AN2606) */
#define STM32G0_I2C_BL_ADDR (0xa2 >> 1)
/* STM32G0 I2C bootloader max data size */
#define STM32G0_I2C_BL_SZ 256
/* STM32 I2C bootloader commands (See AN4221) */
#define STM32_CMD_GVR 0x01 /* Gets the bootloader version */
#define STM32_CMD_GVR_LEN 1
#define STM32_CMD_RM 0x11 /* Reag memory */
#define STM32_CMD_WM 0x31 /* Write memory */
#define STM32_CMD_ADDR_LEN 5 /* Address len for go, mem write... */
#define STM32_CMD_ERASE 0x44 /* Erase page, bank or all */
#define STM32_CMD_ERASE_SPECIAL_LEN 3
#define STM32_CMD_GLOBAL_MASS_ERASE 0xffff /* All-bank erase */
/* STM32 I2C bootloader answer status */
#define STM32G0_I2C_BL_ACK 0x79
#define STM32G0_I2C_BL_NACK 0x1f
#define STM32G0_I2C_BL_BUSY 0x76
/* STM32G0 flash definitions */
#define STM32G0_USER_OPTION_BYTES 0x1fff7800
#define STM32G0_USER_OB_NBOOT0 BIT(26)
#define STM32G0_USER_OB_NBOOT_SEL BIT(24)
#define STM32G0_USER_OB_BOOT_MAIN (STM32G0_USER_OB_NBOOT0 | STM32G0_USER_OB_NBOOT_SEL)
#define STM32G0_MAIN_MEM_ADDR 0x08000000
/* STM32 Firmware definitions: additional commands */
#define STM32G0_FW_GETVER 0x00 /* Gets the firmware version */
#define STM32G0_FW_GETVER_LEN 4
#define STM32G0_FW_RSTGOBL 0x21 /* Reset and go to bootloader */
#define STM32G0_FW_KEYWORD 0xa56959a6
/* ucsi_stm32g0_fw_info located at the end of the firmware */
struct ucsi_stm32g0_fw_info {
u32 version;
u32 keyword;
};
struct ucsi_stm32g0 {
struct i2c_client *client;
struct i2c_client *i2c_bl;
bool in_bootloader;
u8 bl_version;
struct completion complete;
struct device *dev;
unsigned long flags;
const char *fw_name;
struct ucsi *ucsi;
bool suspended;
bool wakeup_event;
};
/*
* Bootloader commands helpers:
* - send command (2 bytes)
* - check ack
* Then either:
* - receive data
* - receive data + check ack
* - send data + check ack
* These operations depends on the command and have various length.
*/
static int ucsi_stm32g0_bl_check_ack(struct ucsi *ucsi)
{
struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
struct i2c_client *client = g0->i2c_bl;
unsigned char ack;
struct i2c_msg msg[] = {
{
.addr = client->addr,
.flags = I2C_M_RD,
.len = 1,
.buf = &ack,
},
};
int ret;
ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
if (ret != ARRAY_SIZE(msg)) {
dev_err(g0->dev, "i2c bl ack (%02x), error: %d\n", client->addr, ret);
return ret < 0 ? ret : -EIO;
}
/* The 'ack' byte should contain bootloader answer: ack/nack/busy */
switch (ack) {
case STM32G0_I2C_BL_ACK:
return 0;
case STM32G0_I2C_BL_NACK:
return -ENOENT;
case STM32G0_I2C_BL_BUSY:
return -EBUSY;
default:
dev_err(g0->dev, "i2c bl ack (%02x), invalid byte: %02x\n",
client->addr, ack);
return -EINVAL;
}
}
static int ucsi_stm32g0_bl_cmd_check_ack(struct ucsi *ucsi, unsigned int cmd, bool check_ack)
{
struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
struct i2c_client *client = g0->i2c_bl;
unsigned char buf[2];
struct i2c_msg msg[] = {
{
.addr = client->addr,
.flags = 0,
.len = sizeof(buf),
.buf = buf,
},
};
int ret;
/*
* Send STM32 bootloader command format is two bytes:
* - command code
* - XOR'ed command code
*/
buf[0] = cmd;
buf[1] = cmd ^ 0xff;
ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
if (ret != ARRAY_SIZE(msg)) {
dev_dbg(g0->dev, "i2c bl cmd %d (%02x), error: %d\n", cmd, client->addr, ret);
return ret < 0 ? ret : -EIO;
}
if (check_ack)
return ucsi_stm32g0_bl_check_ack(ucsi);
return 0;
}
static int ucsi_stm32g0_bl_cmd(struct ucsi *ucsi, unsigned int cmd)
{
return ucsi_stm32g0_bl_cmd_check_ack(ucsi, cmd, true);
}
static int ucsi_stm32g0_bl_rcv_check_ack(struct ucsi *ucsi, void *data, size_t len, bool check_ack)
{
struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
struct i2c_client *client = g0->i2c_bl;
struct i2c_msg msg[] = {
{
.addr = client->addr,
.flags = I2C_M_RD,
.len = len,
.buf = data,
},
};
int ret;
ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
if (ret != ARRAY_SIZE(msg)) {
dev_err(g0->dev, "i2c bl rcv %02x, error: %d\n", client->addr, ret);
return ret < 0 ? ret : -EIO;
}
if (check_ack)
return ucsi_stm32g0_bl_check_ack(ucsi);
return 0;
}
static int ucsi_stm32g0_bl_rcv(struct ucsi *ucsi, void *data, size_t len)
{
return ucsi_stm32g0_bl_rcv_check_ack(ucsi, data, len, true);
}
static int ucsi_stm32g0_bl_rcv_woack(struct ucsi *ucsi, void *data, size_t len)
{
return ucsi_stm32g0_bl_rcv_check_ack(ucsi, data, len, false);
}
static int ucsi_stm32g0_bl_send(struct ucsi *ucsi, void *data, size_t len)
{
struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
struct i2c_client *client = g0->i2c_bl;
struct i2c_msg msg[] = {
{
.addr = client->addr,
.flags = 0,
.len = len,
.buf = data,
},
};
int ret;
ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
if (ret != ARRAY_SIZE(msg)) {
dev_err(g0->dev, "i2c bl send %02x, error: %d\n", client->addr, ret);
return ret < 0 ? ret : -EIO;
}
return ucsi_stm32g0_bl_check_ack(ucsi);
}
/* Bootloader commands */
static int ucsi_stm32g0_bl_get_version(struct ucsi *ucsi, u8 *bl_version)
{
int ret;
ret = ucsi_stm32g0_bl_cmd(ucsi, STM32_CMD_GVR);
if (ret)
return ret;
return ucsi_stm32g0_bl_rcv(ucsi, bl_version, STM32_CMD_GVR_LEN);
}
static int ucsi_stm32g0_bl_send_addr(struct ucsi *ucsi, u32 addr)
{
u8 data8[STM32_CMD_ADDR_LEN];
/* Address format: 4 bytes addr (MSB first) + XOR'ed addr bytes */
put_unaligned_be32(addr, data8);
data8[4] = data8[0] ^ data8[1] ^ data8[2] ^ data8[3];
return ucsi_stm32g0_bl_send(ucsi, data8, STM32_CMD_ADDR_LEN);
}
static int ucsi_stm32g0_bl_global_mass_erase(struct ucsi *ucsi)
{
u8 data8[4];
u16 *data16 = (u16 *)&data8[0];
int ret;
data16[0] = STM32_CMD_GLOBAL_MASS_ERASE;
data8[2] = data8[0] ^ data8[1];
ret = ucsi_stm32g0_bl_cmd(ucsi, STM32_CMD_ERASE);
if (ret)
return ret;
return ucsi_stm32g0_bl_send(ucsi, data8, STM32_CMD_ERASE_SPECIAL_LEN);
}
static int ucsi_stm32g0_bl_write(struct ucsi *ucsi, u32 addr, const void *data, size_t len)
{
u8 *data8;
int i, ret;
if (!len || len > STM32G0_I2C_BL_SZ)
return -EINVAL;
/* Write memory: len bytes -1, data up to 256 bytes + XOR'ed bytes */
data8 = kmalloc(STM32G0_I2C_BL_SZ + 2, GFP_KERNEL);
if (!data8)
return -ENOMEM;
ret = ucsi_stm32g0_bl_cmd(ucsi, STM32_CMD_WM);
if (ret)
goto free;
ret = ucsi_stm32g0_bl_send_addr(ucsi, addr);
if (ret)
goto free;
data8[0] = len - 1;
memcpy(data8 + 1, data, len);
data8[len + 1] = data8[0];
for (i = 1; i <= len; i++)
data8[len + 1] ^= data8[i];
ret = ucsi_stm32g0_bl_send(ucsi, data8, len + 2);
free:
kfree(data8);
return ret;
}
static int ucsi_stm32g0_bl_read(struct ucsi *ucsi, u32 addr, void *data, size_t len)
{
int ret;
if (!len || len > STM32G0_I2C_BL_SZ)
return -EINVAL;
ret = ucsi_stm32g0_bl_cmd(ucsi, STM32_CMD_RM);
if (ret)
return ret;
ret = ucsi_stm32g0_bl_send_addr(ucsi, addr);
if (ret)
return ret;
ret = ucsi_stm32g0_bl_cmd(ucsi, len - 1);
if (ret)
return ret;
return ucsi_stm32g0_bl_rcv_woack(ucsi, data, len);
}
/* Firmware commands (the same address as the bootloader) */
static int ucsi_stm32g0_fw_cmd(struct ucsi *ucsi, unsigned int cmd)
{
return ucsi_stm32g0_bl_cmd_check_ack(ucsi, cmd, false);
}
static int ucsi_stm32g0_fw_rcv(struct ucsi *ucsi, void *data, size_t len)
{
return ucsi_stm32g0_bl_rcv_woack(ucsi, data, len);
}
/* UCSI ops */
static int ucsi_stm32g0_read(struct ucsi *ucsi, unsigned int offset, void *val, size_t len)
{
struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
struct i2c_client *client = g0->client;
u8 reg = offset;
struct i2c_msg msg[] = {
{
.addr = client->addr,
.flags = 0,
.len = 1,
.buf = ®,
},
{
.addr = client->addr,
.flags = I2C_M_RD,
.len = len,
.buf = val,
},
};
int ret;
ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
if (ret != ARRAY_SIZE(msg)) {
dev_err(g0->dev, "i2c read %02x, %02x error: %d\n", client->addr, reg, ret);
return ret < 0 ? ret : -EIO;
}
return 0;
}
static int ucsi_stm32g0_async_write(struct ucsi *ucsi, unsigned int offset, const void *val,
size_t len)
{
struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
struct i2c_client *client = g0->client;
struct i2c_msg msg[] = {
{
.addr = client->addr,
.flags = 0,
}
};
unsigned char *buf;
int ret;
buf = kmalloc(len + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf[0] = offset;
memcpy(&buf[1], val, len);
msg[0].len = len + 1;
msg[0].buf = buf;
ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
kfree(buf);
if (ret != ARRAY_SIZE(msg)) {
dev_err(g0->dev, "i2c write %02x, %02x error: %d\n", client->addr, offset, ret);
return ret < 0 ? ret : -EIO;
}
return 0;
}
static int ucsi_stm32g0_sync_write(struct ucsi *ucsi, unsigned int offset, const void *val,
size_t len)
{
struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
int ret;
set_bit(COMMAND_PENDING, &g0->flags);
ret = ucsi_stm32g0_async_write(ucsi, offset, val, len);
if (ret)
goto out_clear_bit;
if (!wait_for_completion_timeout(&g0->complete, msecs_to_jiffies(5000)))
ret = -ETIMEDOUT;
out_clear_bit:
clear_bit(COMMAND_PENDING, &g0->flags);
return ret;
}
static irqreturn_t ucsi_stm32g0_irq_handler(int irq, void *data)
{
struct ucsi_stm32g0 *g0 = data;
u32 cci;
int ret;
if (g0->suspended)
g0->wakeup_event = true;
ret = ucsi_stm32g0_read(g0->ucsi, UCSI_CCI, &cci, sizeof(cci));
if (ret)
return IRQ_NONE;
if (UCSI_CCI_CONNECTOR(cci))
ucsi_connector_change(g0->ucsi, UCSI_CCI_CONNECTOR(cci));
if (test_bit(COMMAND_PENDING, &g0->flags) &&
cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
complete(&g0->complete);
return IRQ_HANDLED;
}
static const struct ucsi_operations ucsi_stm32g0_ops = {
.read = ucsi_stm32g0_read,
.sync_write = ucsi_stm32g0_sync_write,
.async_write = ucsi_stm32g0_async_write,
};
static int ucsi_stm32g0_register(struct ucsi *ucsi)
{
struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
struct i2c_client *client = g0->client;
int ret;
/* Request alert interrupt */
ret = request_threaded_irq(client->irq, NULL, ucsi_stm32g0_irq_handler, IRQF_ONESHOT,
dev_name(g0->dev), g0);
if (ret) {
dev_err(g0->dev, "request IRQ failed: %d\n", ret);
return ret;
}
ret = ucsi_register(ucsi);
if (ret) {
dev_err_probe(g0->dev, ret, "ucsi_register failed\n");
free_irq(client->irq, g0);
return ret;
}
return 0;
}
static void ucsi_stm32g0_unregister(struct ucsi *ucsi)
{
struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
struct i2c_client *client = g0->client;
ucsi_unregister(ucsi);
free_irq(client->irq, g0);
}
static void ucsi_stm32g0_fw_cb(const struct firmware *fw, void *context)
{
struct ucsi_stm32g0 *g0;
const u8 *data, *end;
const struct ucsi_stm32g0_fw_info *fw_info;
u32 addr = STM32G0_MAIN_MEM_ADDR, ob, fw_version;
int ret, size;
if (!context)
return;
g0 = ucsi_get_drvdata(context);
if (!fw)
goto fw_release;
fw_info = (struct ucsi_stm32g0_fw_info *)(fw->data + fw->size - sizeof(*fw_info));
if (!g0->in_bootloader) {
/* Read running firmware version */
ret = ucsi_stm32g0_fw_cmd(g0->ucsi, STM32G0_FW_GETVER);
if (ret) {
dev_err(g0->dev, "Get version cmd failed %d\n", ret);
goto fw_release;
}
ret = ucsi_stm32g0_fw_rcv(g0->ucsi, &fw_version,
STM32G0_FW_GETVER_LEN);
if (ret) {
dev_err(g0->dev, "Get version failed %d\n", ret);
goto fw_release;
}
/* Sanity check on keyword and firmware version */
if (fw_info->keyword != STM32G0_FW_KEYWORD || fw_info->version == fw_version)
goto fw_release;
dev_info(g0->dev, "Flashing FW: %08x (%08x cur)\n", fw_info->version, fw_version);
/* Switch to bootloader mode */
ucsi_stm32g0_unregister(g0->ucsi);
ret = ucsi_stm32g0_fw_cmd(g0->ucsi, STM32G0_FW_RSTGOBL);
if (ret) {
dev_err(g0->dev, "bootloader cmd failed %d\n", ret);
goto fw_release;
}
g0->in_bootloader = true;
/* STM32G0 reboot delay */
msleep(100);
}
ret = ucsi_stm32g0_bl_global_mass_erase(g0->ucsi);
if (ret) {
dev_err(g0->dev, "Erase failed %d\n", ret);
goto fw_release;
}
data = fw->data;
end = fw->data + fw->size;
while (data < end) {
if ((end - data) < STM32G0_I2C_BL_SZ)
size = end - data;
else
size = STM32G0_I2C_BL_SZ;
ret = ucsi_stm32g0_bl_write(g0->ucsi, addr, data, size);
if (ret) {
dev_err(g0->dev, "Write failed %d\n", ret);
goto fw_release;
}
addr += size;
data += size;
}
dev_dbg(g0->dev, "Configure to boot from main flash\n");
ret = ucsi_stm32g0_bl_read(g0->ucsi, STM32G0_USER_OPTION_BYTES, &ob, sizeof(ob));
if (ret) {
dev_err(g0->dev, "read user option bytes failed %d\n", ret);
goto fw_release;
}
dev_dbg(g0->dev, "STM32G0_USER_OPTION_BYTES 0x%08x\n", ob);
/* Configure user option bytes to boot from main flash next time */
ob |= STM32G0_USER_OB_BOOT_MAIN;
/* Writing option bytes will also reset G0 for updates to be loaded */
ret = ucsi_stm32g0_bl_write(g0->ucsi, STM32G0_USER_OPTION_BYTES, &ob, sizeof(ob));
if (ret) {
dev_err(g0->dev, "write user option bytes failed %d\n", ret);
goto fw_release;
}
dev_info(g0->dev, "Starting, option bytes:0x%08x\n", ob);
/* STM32G0 FW boot delay */
msleep(500);
/* Register UCSI interface */
if (!ucsi_stm32g0_register(g0->ucsi))
g0->in_bootloader = false;
fw_release:
release_firmware(fw);
}
static int ucsi_stm32g0_probe_bootloader(struct ucsi *ucsi)
{
struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
int ret;
u16 ucsi_version;
/* firmware-name is optional */
if (device_property_present(g0->dev, "firmware-name")) {
ret = device_property_read_string(g0->dev, "firmware-name", &g0->fw_name);
if (ret < 0)
return dev_err_probe(g0->dev, ret, "Error reading firmware-name\n");
}
if (g0->fw_name) {
/* STM32G0 in bootloader mode communicates at reserved address 0x51 */
g0->i2c_bl = i2c_new_dummy_device(g0->client->adapter, STM32G0_I2C_BL_ADDR);
if (IS_ERR(g0->i2c_bl)) {
ret = dev_err_probe(g0->dev, PTR_ERR(g0->i2c_bl),
"Failed to register bootloader I2C address\n");
return ret;
}
}
/*
* Try to guess if the STM32G0 is running a UCSI firmware. First probe the UCSI FW at its
* i2c address. Fallback to bootloader i2c address only if firmware-name is specified.
*/
ret = ucsi_stm32g0_read(ucsi, UCSI_VERSION, &ucsi_version, sizeof(ucsi_version));
if (!ret || !g0->fw_name)
return ret;
/* Speculatively read the bootloader version that has a known length. */
ret = ucsi_stm32g0_bl_get_version(ucsi, &g0->bl_version);
if (ret < 0) {
i2c_unregister_device(g0->i2c_bl);
return ret;
}
/* Device in bootloader mode */
g0->in_bootloader = true;
dev_info(g0->dev, "Bootloader Version 0x%02x\n", g0->bl_version);
return 0;
}
static int ucsi_stm32g0_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ucsi_stm32g0 *g0;
int ret;
g0 = devm_kzalloc(dev, sizeof(*g0), GFP_KERNEL);
if (!g0)
return -ENOMEM;
g0->dev = dev;
g0->client = client;
init_completion(&g0->complete);
i2c_set_clientdata(client, g0);
g0->ucsi = ucsi_create(dev, &ucsi_stm32g0_ops);
if (IS_ERR(g0->ucsi))
return PTR_ERR(g0->ucsi);
ucsi_set_drvdata(g0->ucsi, g0);
ret = ucsi_stm32g0_probe_bootloader(g0->ucsi);
if (ret < 0)
goto destroy;
/*
* Don't register in bootloader mode: wait for the firmware to be loaded and started before
* registering UCSI device.
*/
if (!g0->in_bootloader) {
ret = ucsi_stm32g0_register(g0->ucsi);
if (ret < 0)
goto freei2c;
}
if (g0->fw_name) {
/*
* Asynchronously flash (e.g. bootloader mode) or update the running firmware,
* not to hang the boot process
*/
ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT, g0->fw_name, g0->dev,
GFP_KERNEL, g0->ucsi, ucsi_stm32g0_fw_cb);
if (ret < 0) {
dev_err_probe(dev, ret, "firmware request failed\n");
goto unregister;
}
}
return 0;
unregister:
if (!g0->in_bootloader)
ucsi_stm32g0_unregister(g0->ucsi);
freei2c:
if (g0->fw_name)
i2c_unregister_device(g0->i2c_bl);
destroy:
ucsi_destroy(g0->ucsi);
return ret;
}
static void ucsi_stm32g0_remove(struct i2c_client *client)
{
struct ucsi_stm32g0 *g0 = i2c_get_clientdata(client);
if (!g0->in_bootloader)
ucsi_stm32g0_unregister(g0->ucsi);
if (g0->fw_name)
i2c_unregister_device(g0->i2c_bl);
ucsi_destroy(g0->ucsi);
}
static int ucsi_stm32g0_suspend(struct device *dev)
{
struct ucsi_stm32g0 *g0 = dev_get_drvdata(dev);
struct i2c_client *client = g0->client;
if (g0->in_bootloader)
return 0;
/* Keep the interrupt disabled until the i2c bus has been resumed */
disable_irq(client->irq);
g0->suspended = true;
g0->wakeup_event = false;
if (device_may_wakeup(dev) || device_wakeup_path(dev))
enable_irq_wake(client->irq);
return 0;
}
static int ucsi_stm32g0_resume(struct device *dev)
{
struct ucsi_stm32g0 *g0 = dev_get_drvdata(dev);
struct i2c_client *client = g0->client;
if (g0->in_bootloader)
return 0;
if (device_may_wakeup(dev) || device_wakeup_path(dev))
disable_irq_wake(client->irq);
enable_irq(client->irq);
/* Enforce any pending handler gets called to signal a wakeup_event */
synchronize_irq(client->irq);
if (g0->wakeup_event)
pm_wakeup_event(g0->dev, 0);
g0->suspended = false;
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(ucsi_stm32g0_pm_ops, ucsi_stm32g0_suspend, ucsi_stm32g0_resume);
static const struct of_device_id __maybe_unused ucsi_stm32g0_typec_of_match[] = {
{ .compatible = "st,stm32g0-typec" },
{},
};
MODULE_DEVICE_TABLE(of, ucsi_stm32g0_typec_of_match);
static const struct i2c_device_id ucsi_stm32g0_typec_i2c_devid[] = {
{"stm32g0-typec", 0},
{},
};
MODULE_DEVICE_TABLE(i2c, ucsi_stm32g0_typec_i2c_devid);
static struct i2c_driver ucsi_stm32g0_i2c_driver = {
.driver = {
.name = "ucsi-stm32g0-i2c",
.of_match_table = of_match_ptr(ucsi_stm32g0_typec_of_match),
.pm = pm_sleep_ptr(&ucsi_stm32g0_pm_ops),
},
.probe = ucsi_stm32g0_probe,
.remove = ucsi_stm32g0_remove,
.id_table = ucsi_stm32g0_typec_i2c_devid
};
module_i2c_driver(ucsi_stm32g0_i2c_driver);
MODULE_AUTHOR("Fabrice Gasnier <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics STM32G0 Type-C controller");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("platform:ucsi-stm32g0");
| linux-master | drivers/usb/typec/ucsi/ucsi_stm32g0.c |
// SPDX-License-Identifier: GPL-2.0
#define CREATE_TRACE_POINTS
#include "ucsi.h"
#include "trace.h"
static const char * const ucsi_cmd_strs[] = {
[0] = "Unknown command",
[UCSI_PPM_RESET] = "PPM_RESET",
[UCSI_CANCEL] = "CANCEL",
[UCSI_CONNECTOR_RESET] = "CONNECTOR_RESET",
[UCSI_ACK_CC_CI] = "ACK_CC_CI",
[UCSI_SET_NOTIFICATION_ENABLE] = "SET_NOTIFICATION_ENABLE",
[UCSI_GET_CAPABILITY] = "GET_CAPABILITY",
[UCSI_GET_CONNECTOR_CAPABILITY] = "GET_CONNECTOR_CAPABILITY",
[UCSI_SET_UOM] = "SET_UOM",
[UCSI_SET_UOR] = "SET_UOR",
[UCSI_SET_PDM] = "SET_PDM",
[UCSI_SET_PDR] = "SET_PDR",
[UCSI_GET_ALTERNATE_MODES] = "GET_ALTERNATE_MODES",
[UCSI_GET_CAM_SUPPORTED] = "GET_CAM_SUPPORTED",
[UCSI_GET_CURRENT_CAM] = "GET_CURRENT_CAM",
[UCSI_SET_NEW_CAM] = "SET_NEW_CAM",
[UCSI_GET_PDOS] = "GET_PDOS",
[UCSI_GET_CABLE_PROPERTY] = "GET_CABLE_PROPERTY",
[UCSI_GET_CONNECTOR_STATUS] = "GET_CONNECTOR_STATUS",
[UCSI_GET_ERROR_STATUS] = "GET_ERROR_STATUS",
};
const char *ucsi_cmd_str(u64 raw_cmd)
{
u8 cmd = raw_cmd & GENMASK(7, 0);
return ucsi_cmd_strs[(cmd >= ARRAY_SIZE(ucsi_cmd_strs)) ? 0 : cmd];
}
const char *ucsi_cci_str(u32 cci)
{
if (UCSI_CCI_CONNECTOR(cci)) {
if (cci & UCSI_CCI_ACK_COMPLETE)
return "Event pending (ACK completed)";
if (cci & UCSI_CCI_COMMAND_COMPLETE)
return "Event pending (command completed)";
return "Connector Change";
}
if (cci & UCSI_CCI_ACK_COMPLETE)
return "ACK completed";
if (cci & UCSI_CCI_COMMAND_COMPLETE)
return "Command completed";
return "";
}
static const char * const ucsi_recipient_strs[] = {
[UCSI_RECIPIENT_CON] = "port",
[UCSI_RECIPIENT_SOP] = "partner",
[UCSI_RECIPIENT_SOP_P] = "plug (prime)",
[UCSI_RECIPIENT_SOP_PP] = "plug (double prime)",
};
const char *ucsi_recipient_str(u8 recipient)
{
return ucsi_recipient_strs[recipient];
}
| linux-master | drivers/usb/typec/ucsi/trace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB Type-C Connector System Software Interface driver
*
* Copyright (C) 2017, Intel Corporation
* Author: Heikki Krogerus <[email protected]>
*/
#include <linux/completion.h>
#include <linux/property.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/usb/typec_dp.h>
#include "ucsi.h"
#include "trace.h"
/*
* UCSI_TIMEOUT_MS - PPM communication timeout
*
* Ideally we could use MIN_TIME_TO_RESPOND_WITH_BUSY (which is defined in UCSI
* specification) here as reference, but unfortunately we can't. It is very
* difficult to estimate the time it takes for the system to process the command
* before it is actually passed to the PPM.
*/
#define UCSI_TIMEOUT_MS 5000
/*
* UCSI_SWAP_TIMEOUT_MS - Timeout for role swap requests
*
* 5 seconds is close to the time it takes for CapsCounter to reach 0, so even
* if the PPM does not generate Connector Change events before that with
* partners that do not support USB Power Delivery, this should still work.
*/
#define UCSI_SWAP_TIMEOUT_MS 5000
static int ucsi_acknowledge_command(struct ucsi *ucsi)
{
u64 ctrl;
ctrl = UCSI_ACK_CC_CI;
ctrl |= UCSI_ACK_COMMAND_COMPLETE;
return ucsi->ops->sync_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl));
}
static int ucsi_acknowledge_connector_change(struct ucsi *ucsi)
{
u64 ctrl;
ctrl = UCSI_ACK_CC_CI;
ctrl |= UCSI_ACK_CONNECTOR_CHANGE;
return ucsi->ops->sync_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl));
}
static int ucsi_exec_command(struct ucsi *ucsi, u64 command);
static int ucsi_read_error(struct ucsi *ucsi)
{
u16 error;
int ret;
/* Acknowledge the command that failed */
ret = ucsi_acknowledge_command(ucsi);
if (ret)
return ret;
ret = ucsi_exec_command(ucsi, UCSI_GET_ERROR_STATUS);
if (ret < 0)
return ret;
ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, &error, sizeof(error));
if (ret)
return ret;
ret = ucsi_acknowledge_command(ucsi);
if (ret)
return ret;
switch (error) {
case UCSI_ERROR_INCOMPATIBLE_PARTNER:
return -EOPNOTSUPP;
case UCSI_ERROR_CC_COMMUNICATION_ERR:
return -ECOMM;
case UCSI_ERROR_CONTRACT_NEGOTIATION_FAIL:
return -EPROTO;
case UCSI_ERROR_DEAD_BATTERY:
dev_warn(ucsi->dev, "Dead battery condition!\n");
return -EPERM;
case UCSI_ERROR_INVALID_CON_NUM:
case UCSI_ERROR_UNREGONIZED_CMD:
case UCSI_ERROR_INVALID_CMD_ARGUMENT:
dev_err(ucsi->dev, "possible UCSI driver bug %u\n", error);
return -EINVAL;
case UCSI_ERROR_OVERCURRENT:
dev_warn(ucsi->dev, "Overcurrent condition\n");
break;
case UCSI_ERROR_PARTNER_REJECTED_SWAP:
dev_warn(ucsi->dev, "Partner rejected swap\n");
break;
case UCSI_ERROR_HARD_RESET:
dev_warn(ucsi->dev, "Hard reset occurred\n");
break;
case UCSI_ERROR_PPM_POLICY_CONFLICT:
dev_warn(ucsi->dev, "PPM Policy conflict\n");
break;
case UCSI_ERROR_SWAP_REJECTED:
dev_warn(ucsi->dev, "Swap rejected\n");
break;
case UCSI_ERROR_UNDEFINED:
default:
dev_err(ucsi->dev, "unknown error %u\n", error);
break;
}
return -EIO;
}
static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
{
u32 cci;
int ret;
ret = ucsi->ops->sync_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
if (ret)
return ret;
ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
if (ret)
return ret;
if (cmd != UCSI_CANCEL && cci & UCSI_CCI_BUSY)
return ucsi_exec_command(ucsi, UCSI_CANCEL);
if (!(cci & UCSI_CCI_COMMAND_COMPLETE))
return -EIO;
if (cci & UCSI_CCI_NOT_SUPPORTED)
return -EOPNOTSUPP;
if (cci & UCSI_CCI_ERROR) {
if (cmd == UCSI_GET_ERROR_STATUS)
return -EIO;
return ucsi_read_error(ucsi);
}
if (cmd == UCSI_CANCEL && cci & UCSI_CCI_CANCEL_COMPLETE) {
ret = ucsi_acknowledge_command(ucsi);
return ret ? ret : -EBUSY;
}
return UCSI_CCI_LENGTH(cci);
}
int ucsi_send_command(struct ucsi *ucsi, u64 command,
void *data, size_t size)
{
u8 length;
int ret;
mutex_lock(&ucsi->ppm_lock);
ret = ucsi_exec_command(ucsi, command);
if (ret < 0)
goto out;
length = ret;
if (data) {
ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, data, size);
if (ret)
goto out;
}
ret = ucsi_acknowledge_command(ucsi);
if (ret)
goto out;
ret = length;
out:
mutex_unlock(&ucsi->ppm_lock);
return ret;
}
EXPORT_SYMBOL_GPL(ucsi_send_command);
/* -------------------------------------------------------------------------- */
struct ucsi_work {
struct delayed_work work;
struct list_head node;
unsigned long delay;
unsigned int count;
struct ucsi_connector *con;
int (*cb)(struct ucsi_connector *);
};
static void ucsi_poll_worker(struct work_struct *work)
{
struct ucsi_work *uwork = container_of(work, struct ucsi_work, work.work);
struct ucsi_connector *con = uwork->con;
int ret;
mutex_lock(&con->lock);
if (!con->partner) {
list_del(&uwork->node);
mutex_unlock(&con->lock);
kfree(uwork);
return;
}
ret = uwork->cb(con);
if (uwork->count-- && (ret == -EBUSY || ret == -ETIMEDOUT)) {
queue_delayed_work(con->wq, &uwork->work, uwork->delay);
} else {
list_del(&uwork->node);
kfree(uwork);
}
mutex_unlock(&con->lock);
}
static int ucsi_partner_task(struct ucsi_connector *con,
int (*cb)(struct ucsi_connector *),
int retries, unsigned long delay)
{
struct ucsi_work *uwork;
if (!con->partner)
return 0;
uwork = kzalloc(sizeof(*uwork), GFP_KERNEL);
if (!uwork)
return -ENOMEM;
INIT_DELAYED_WORK(&uwork->work, ucsi_poll_worker);
uwork->count = retries;
uwork->delay = delay;
uwork->con = con;
uwork->cb = cb;
list_add_tail(&uwork->node, &con->partner_tasks);
queue_delayed_work(con->wq, &uwork->work, delay);
return 0;
}
/* -------------------------------------------------------------------------- */
void ucsi_altmode_update_active(struct ucsi_connector *con)
{
const struct typec_altmode *altmode = NULL;
u64 command;
int ret;
u8 cur;
int i;
command = UCSI_GET_CURRENT_CAM | UCSI_CONNECTOR_NUMBER(con->num);
ret = ucsi_send_command(con->ucsi, command, &cur, sizeof(cur));
if (ret < 0) {
if (con->ucsi->version > 0x0100) {
dev_err(con->ucsi->dev,
"GET_CURRENT_CAM command failed\n");
return;
}
cur = 0xff;
}
if (cur < UCSI_MAX_ALTMODES)
altmode = typec_altmode_get_partner(con->port_altmode[cur]);
for (i = 0; con->partner_altmode[i]; i++)
typec_altmode_update_active(con->partner_altmode[i],
con->partner_altmode[i] == altmode);
}
static int ucsi_altmode_next_mode(struct typec_altmode **alt, u16 svid)
{
u8 mode = 1;
int i;
for (i = 0; alt[i]; i++) {
if (i > MODE_DISCOVERY_MAX)
return -ERANGE;
if (alt[i]->svid == svid)
mode++;
}
return mode;
}
static int ucsi_next_altmode(struct typec_altmode **alt)
{
int i = 0;
for (i = 0; i < UCSI_MAX_ALTMODES; i++)
if (!alt[i])
return i;
return -ENOENT;
}
static int ucsi_get_num_altmode(struct typec_altmode **alt)
{
int i;
for (i = 0; i < UCSI_MAX_ALTMODES; i++)
if (!alt[i])
break;
return i;
}
static int ucsi_register_altmode(struct ucsi_connector *con,
struct typec_altmode_desc *desc,
u8 recipient)
{
struct typec_altmode *alt;
bool override;
int ret;
int i;
override = !!(con->ucsi->cap.features & UCSI_CAP_ALT_MODE_OVERRIDE);
switch (recipient) {
case UCSI_RECIPIENT_CON:
i = ucsi_next_altmode(con->port_altmode);
if (i < 0) {
ret = i;
goto err;
}
ret = ucsi_altmode_next_mode(con->port_altmode, desc->svid);
if (ret < 0)
return ret;
desc->mode = ret;
switch (desc->svid) {
case USB_TYPEC_DP_SID:
alt = ucsi_register_displayport(con, override, i, desc);
break;
case USB_TYPEC_NVIDIA_VLINK_SID:
if (desc->vdo == USB_TYPEC_NVIDIA_VLINK_DBG_VDO)
alt = typec_port_register_altmode(con->port,
desc);
else
alt = ucsi_register_displayport(con, override,
i, desc);
break;
default:
alt = typec_port_register_altmode(con->port, desc);
break;
}
if (IS_ERR(alt)) {
ret = PTR_ERR(alt);
goto err;
}
con->port_altmode[i] = alt;
break;
case UCSI_RECIPIENT_SOP:
i = ucsi_next_altmode(con->partner_altmode);
if (i < 0) {
ret = i;
goto err;
}
ret = ucsi_altmode_next_mode(con->partner_altmode, desc->svid);
if (ret < 0)
return ret;
desc->mode = ret;
alt = typec_partner_register_altmode(con->partner, desc);
if (IS_ERR(alt)) {
ret = PTR_ERR(alt);
goto err;
}
con->partner_altmode[i] = alt;
break;
default:
return -EINVAL;
}
trace_ucsi_register_altmode(recipient, alt);
return 0;
err:
dev_err(con->ucsi->dev, "failed to registers svid 0x%04x mode %d\n",
desc->svid, desc->mode);
return ret;
}
static int
ucsi_register_altmodes_nvidia(struct ucsi_connector *con, u8 recipient)
{
int max_altmodes = UCSI_MAX_ALTMODES;
struct typec_altmode_desc desc;
struct ucsi_altmode alt;
struct ucsi_altmode orig[UCSI_MAX_ALTMODES];
struct ucsi_altmode updated[UCSI_MAX_ALTMODES];
struct ucsi *ucsi = con->ucsi;
bool multi_dp = false;
u64 command;
int ret;
int len;
int i;
int k = 0;
if (recipient == UCSI_RECIPIENT_CON)
max_altmodes = con->ucsi->cap.num_alt_modes;
memset(orig, 0, sizeof(orig));
memset(updated, 0, sizeof(updated));
/* First get all the alternate modes */
for (i = 0; i < max_altmodes; i++) {
memset(&alt, 0, sizeof(alt));
command = UCSI_GET_ALTERNATE_MODES;
command |= UCSI_GET_ALTMODE_RECIPIENT(recipient);
command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num);
command |= UCSI_GET_ALTMODE_OFFSET(i);
len = ucsi_send_command(con->ucsi, command, &alt, sizeof(alt));
/*
* We are collecting all altmodes first and then registering.
* Some type-C device will return zero length data beyond last
* alternate modes. We should not return if length is zero.
*/
if (len < 0)
return len;
/* We got all altmodes, now break out and register them */
if (!len || !alt.svid)
break;
orig[k].mid = alt.mid;
orig[k].svid = alt.svid;
k++;
}
/*
* Update the original altmode table as some ppms may report
* multiple DP altmodes.
*/
if (recipient == UCSI_RECIPIENT_CON)
multi_dp = ucsi->ops->update_altmodes(ucsi, orig, updated);
/* now register altmodes */
for (i = 0; i < max_altmodes; i++) {
memset(&desc, 0, sizeof(desc));
if (multi_dp && recipient == UCSI_RECIPIENT_CON) {
desc.svid = updated[i].svid;
desc.vdo = updated[i].mid;
} else {
desc.svid = orig[i].svid;
desc.vdo = orig[i].mid;
}
desc.roles = TYPEC_PORT_DRD;
if (!desc.svid)
return 0;
ret = ucsi_register_altmode(con, &desc, recipient);
if (ret)
return ret;
}
return 0;
}
static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
{
int max_altmodes = UCSI_MAX_ALTMODES;
struct typec_altmode_desc desc;
struct ucsi_altmode alt[2];
u64 command;
int num;
int ret;
int len;
int j;
int i;
if (!(con->ucsi->cap.features & UCSI_CAP_ALT_MODE_DETAILS))
return 0;
if (recipient == UCSI_RECIPIENT_SOP && con->partner_altmode[0])
return 0;
if (con->ucsi->ops->update_altmodes)
return ucsi_register_altmodes_nvidia(con, recipient);
if (recipient == UCSI_RECIPIENT_CON)
max_altmodes = con->ucsi->cap.num_alt_modes;
for (i = 0; i < max_altmodes;) {
memset(alt, 0, sizeof(alt));
command = UCSI_GET_ALTERNATE_MODES;
command |= UCSI_GET_ALTMODE_RECIPIENT(recipient);
command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num);
command |= UCSI_GET_ALTMODE_OFFSET(i);
len = ucsi_send_command(con->ucsi, command, alt, sizeof(alt));
if (len == -EBUSY)
continue;
if (len <= 0)
return len;
/*
* This code is requesting one alt mode at a time, but some PPMs
* may still return two. If that happens both alt modes need be
* registered and the offset for the next alt mode has to be
* incremented.
*/
num = len / sizeof(alt[0]);
i += num;
for (j = 0; j < num; j++) {
if (!alt[j].svid)
return 0;
memset(&desc, 0, sizeof(desc));
desc.vdo = alt[j].mid;
desc.svid = alt[j].svid;
desc.roles = TYPEC_PORT_DRD;
ret = ucsi_register_altmode(con, &desc, recipient);
if (ret)
return ret;
}
}
return 0;
}
static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
{
const struct typec_altmode *pdev;
struct typec_altmode **adev;
int i = 0;
switch (recipient) {
case UCSI_RECIPIENT_CON:
adev = con->port_altmode;
break;
case UCSI_RECIPIENT_SOP:
adev = con->partner_altmode;
break;
default:
return;
}
while (adev[i]) {
if (recipient == UCSI_RECIPIENT_SOP &&
(adev[i]->svid == USB_TYPEC_DP_SID ||
(adev[i]->svid == USB_TYPEC_NVIDIA_VLINK_SID &&
adev[i]->vdo != USB_TYPEC_NVIDIA_VLINK_DBG_VDO))) {
pdev = typec_altmode_get_partner(adev[i]);
ucsi_displayport_remove_partner((void *)pdev);
}
typec_unregister_altmode(adev[i]);
adev[i++] = NULL;
}
}
static int ucsi_read_pdos(struct ucsi_connector *con,
enum typec_role role, int is_partner,
u32 *pdos, int offset, int num_pdos)
{
struct ucsi *ucsi = con->ucsi;
u64 command;
int ret;
command = UCSI_COMMAND(UCSI_GET_PDOS) | UCSI_CONNECTOR_NUMBER(con->num);
command |= UCSI_GET_PDOS_PARTNER_PDO(is_partner);
command |= UCSI_GET_PDOS_PDO_OFFSET(offset);
command |= UCSI_GET_PDOS_NUM_PDOS(num_pdos - 1);
command |= is_source(role) ? UCSI_GET_PDOS_SRC_PDOS : 0;
ret = ucsi_send_command(ucsi, command, pdos + offset,
num_pdos * sizeof(u32));
if (ret < 0 && ret != -ETIMEDOUT)
dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
return ret;
}
static int ucsi_get_pdos(struct ucsi_connector *con, enum typec_role role,
int is_partner, u32 *pdos)
{
u8 num_pdos;
int ret;
/* UCSI max payload means only getting at most 4 PDOs at a time */
ret = ucsi_read_pdos(con, role, is_partner, pdos, 0, UCSI_MAX_PDOS);
if (ret < 0)
return ret;
num_pdos = ret / sizeof(u32); /* number of bytes to 32-bit PDOs */
if (num_pdos < UCSI_MAX_PDOS)
return num_pdos;
/* get the remaining PDOs, if any */
ret = ucsi_read_pdos(con, role, is_partner, pdos, UCSI_MAX_PDOS,
PDO_MAX_OBJECTS - UCSI_MAX_PDOS);
if (ret < 0)
return ret;
return ret / sizeof(u32) + num_pdos;
}
static int ucsi_get_src_pdos(struct ucsi_connector *con)
{
int ret;
ret = ucsi_get_pdos(con, TYPEC_SOURCE, 1, con->src_pdos);
if (ret < 0)
return ret;
con->num_pdos = ret;
ucsi_port_psy_changed(con);
return ret;
}
static int ucsi_check_altmodes(struct ucsi_connector *con)
{
int ret, num_partner_am;
ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP);
if (ret && ret != -ETIMEDOUT)
dev_err(con->ucsi->dev,
"con%d: failed to register partner alt modes (%d)\n",
con->num, ret);
/* Ignoring the errors in this case. */
if (con->partner_altmode[0]) {
num_partner_am = ucsi_get_num_altmode(con->partner_altmode);
if (num_partner_am > 0)
typec_partner_set_num_altmodes(con->partner, num_partner_am);
ucsi_altmode_update_active(con);
return 0;
}
return ret;
}
static int ucsi_register_partner_pdos(struct ucsi_connector *con)
{
struct usb_power_delivery_desc desc = { con->ucsi->cap.pd_version };
struct usb_power_delivery_capabilities_desc caps;
struct usb_power_delivery_capabilities *cap;
int ret;
if (con->partner_pd)
return 0;
con->partner_pd = usb_power_delivery_register(NULL, &desc);
if (IS_ERR(con->partner_pd))
return PTR_ERR(con->partner_pd);
ret = ucsi_get_pdos(con, TYPEC_SOURCE, 1, caps.pdo);
if (ret > 0) {
if (ret < PDO_MAX_OBJECTS)
caps.pdo[ret] = 0;
caps.role = TYPEC_SOURCE;
cap = usb_power_delivery_register_capabilities(con->partner_pd, &caps);
if (IS_ERR(cap))
return PTR_ERR(cap);
con->partner_source_caps = cap;
ret = typec_partner_set_usb_power_delivery(con->partner, con->partner_pd);
if (ret) {
usb_power_delivery_unregister_capabilities(con->partner_source_caps);
return ret;
}
}
ret = ucsi_get_pdos(con, TYPEC_SINK, 1, caps.pdo);
if (ret > 0) {
if (ret < PDO_MAX_OBJECTS)
caps.pdo[ret] = 0;
caps.role = TYPEC_SINK;
cap = usb_power_delivery_register_capabilities(con->partner_pd, &caps);
if (IS_ERR(cap))
return PTR_ERR(cap);
con->partner_sink_caps = cap;
ret = typec_partner_set_usb_power_delivery(con->partner, con->partner_pd);
if (ret) {
usb_power_delivery_unregister_capabilities(con->partner_sink_caps);
return ret;
}
}
return 0;
}
static void ucsi_unregister_partner_pdos(struct ucsi_connector *con)
{
usb_power_delivery_unregister_capabilities(con->partner_sink_caps);
con->partner_sink_caps = NULL;
usb_power_delivery_unregister_capabilities(con->partner_source_caps);
con->partner_source_caps = NULL;
usb_power_delivery_unregister(con->partner_pd);
con->partner_pd = NULL;
}
static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
{
switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
case UCSI_CONSTAT_PWR_OPMODE_PD:
con->rdo = con->status.request_data_obj;
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_PD);
ucsi_partner_task(con, ucsi_get_src_pdos, 30, 0);
ucsi_partner_task(con, ucsi_check_altmodes, 30, 0);
ucsi_partner_task(con, ucsi_register_partner_pdos, 1, HZ);
break;
case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
con->rdo = 0;
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_1_5A);
break;
case UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0:
con->rdo = 0;
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_3_0A);
break;
default:
con->rdo = 0;
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_USB);
break;
}
}
static int ucsi_register_partner(struct ucsi_connector *con)
{
u8 pwr_opmode = UCSI_CONSTAT_PWR_OPMODE(con->status.flags);
struct typec_partner_desc desc;
struct typec_partner *partner;
if (con->partner)
return 0;
memset(&desc, 0, sizeof(desc));
switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_DEBUG:
desc.accessory = TYPEC_ACCESSORY_DEBUG;
break;
case UCSI_CONSTAT_PARTNER_TYPE_AUDIO:
desc.accessory = TYPEC_ACCESSORY_AUDIO;
break;
default:
break;
}
desc.usb_pd = pwr_opmode == UCSI_CONSTAT_PWR_OPMODE_PD;
partner = typec_register_partner(con->port, &desc);
if (IS_ERR(partner)) {
dev_err(con->ucsi->dev,
"con%d: failed to register partner (%ld)\n", con->num,
PTR_ERR(partner));
return PTR_ERR(partner);
}
con->partner = partner;
return 0;
}
static void ucsi_unregister_partner(struct ucsi_connector *con)
{
if (!con->partner)
return;
typec_set_mode(con->port, TYPEC_STATE_SAFE);
ucsi_unregister_partner_pdos(con);
ucsi_unregister_altmodes(con, UCSI_RECIPIENT_SOP);
typec_unregister_partner(con->partner);
con->partner = NULL;
}
static void ucsi_partner_change(struct ucsi_connector *con)
{
enum usb_role u_role = USB_ROLE_NONE;
int ret;
switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
case UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP:
u_role = USB_ROLE_HOST;
fallthrough;
case UCSI_CONSTAT_PARTNER_TYPE_CABLE:
typec_set_data_role(con->port, TYPEC_HOST);
break;
case UCSI_CONSTAT_PARTNER_TYPE_DFP:
u_role = USB_ROLE_DEVICE;
typec_set_data_role(con->port, TYPEC_DEVICE);
break;
default:
break;
}
if (con->status.flags & UCSI_CONSTAT_CONNECTED) {
switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_DEBUG:
typec_set_mode(con->port, TYPEC_MODE_DEBUG);
break;
case UCSI_CONSTAT_PARTNER_TYPE_AUDIO:
typec_set_mode(con->port, TYPEC_MODE_AUDIO);
break;
default:
if (UCSI_CONSTAT_PARTNER_FLAGS(con->status.flags) ==
UCSI_CONSTAT_PARTNER_FLAG_USB)
typec_set_mode(con->port, TYPEC_STATE_USB);
}
}
/* Only notify USB controller if partner supports USB data */
if (!(UCSI_CONSTAT_PARTNER_FLAGS(con->status.flags) & UCSI_CONSTAT_PARTNER_FLAG_USB))
u_role = USB_ROLE_NONE;
ret = usb_role_switch_set_role(con->usb_role_sw, u_role);
if (ret)
dev_err(con->ucsi->dev, "con:%d: failed to set usb role:%d\n",
con->num, u_role);
}
static int ucsi_check_connection(struct ucsi_connector *con)
{
u8 prev_flags = con->status.flags;
u64 command;
int ret;
command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
ret = ucsi_send_command(con->ucsi, command, &con->status, sizeof(con->status));
if (ret < 0) {
dev_err(con->ucsi->dev, "GET_CONNECTOR_STATUS failed (%d)\n", ret);
return ret;
}
if (con->status.flags == prev_flags)
return 0;
if (con->status.flags & UCSI_CONSTAT_CONNECTED) {
ucsi_register_partner(con);
ucsi_pwr_opmode_change(con);
ucsi_partner_change(con);
} else {
ucsi_partner_change(con);
ucsi_port_psy_changed(con);
ucsi_unregister_partner(con);
}
return 0;
}
static void ucsi_handle_connector_change(struct work_struct *work)
{
struct ucsi_connector *con = container_of(work, struct ucsi_connector,
work);
struct ucsi *ucsi = con->ucsi;
enum typec_role role;
u64 command;
int ret;
mutex_lock(&con->lock);
command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
ret = ucsi_send_command(ucsi, command, &con->status, sizeof(con->status));
if (ret < 0) {
dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n",
__func__, ret);
goto out_unlock;
}
trace_ucsi_connector_change(con->num, &con->status);
role = !!(con->status.flags & UCSI_CONSTAT_PWR_DIR);
if (con->status.change & UCSI_CONSTAT_POWER_DIR_CHANGE) {
typec_set_pwr_role(con->port, role);
/* Complete pending power role swap */
if (!completion_done(&con->complete))
complete(&con->complete);
}
if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) {
typec_set_pwr_role(con->port, role);
ucsi_port_psy_changed(con);
ucsi_partner_change(con);
if (con->status.flags & UCSI_CONSTAT_CONNECTED) {
ucsi_register_partner(con);
ucsi_partner_task(con, ucsi_check_connection, 1, HZ);
if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) ==
UCSI_CONSTAT_PWR_OPMODE_PD)
ucsi_partner_task(con, ucsi_register_partner_pdos, 1, HZ);
} else {
ucsi_unregister_partner(con);
}
}
if (con->status.change & UCSI_CONSTAT_POWER_OPMODE_CHANGE ||
con->status.change & UCSI_CONSTAT_POWER_LEVEL_CHANGE)
ucsi_pwr_opmode_change(con);
if (con->partner && con->status.change & UCSI_CONSTAT_PARTNER_CHANGE) {
ucsi_partner_change(con);
/* Complete pending data role swap */
if (!completion_done(&con->complete))
complete(&con->complete);
}
if (con->status.change & UCSI_CONSTAT_CAM_CHANGE)
ucsi_partner_task(con, ucsi_check_altmodes, 1, 0);
clear_bit(EVENT_PENDING, &con->ucsi->flags);
ret = ucsi_acknowledge_connector_change(ucsi);
if (ret)
dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
out_unlock:
mutex_unlock(&con->lock);
}
/**
* ucsi_connector_change - Process Connector Change Event
* @ucsi: UCSI Interface
* @num: Connector number
*/
void ucsi_connector_change(struct ucsi *ucsi, u8 num)
{
struct ucsi_connector *con = &ucsi->connector[num - 1];
if (!(ucsi->ntfy & UCSI_ENABLE_NTFY_CONNECTOR_CHANGE)) {
dev_dbg(ucsi->dev, "Bogus connector change event\n");
return;
}
if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags))
schedule_work(&con->work);
}
EXPORT_SYMBOL_GPL(ucsi_connector_change);
/* -------------------------------------------------------------------------- */
static int ucsi_reset_connector(struct ucsi_connector *con, bool hard)
{
u64 command;
command = UCSI_CONNECTOR_RESET | UCSI_CONNECTOR_NUMBER(con->num);
command |= hard ? UCSI_CONNECTOR_RESET_HARD : 0;
return ucsi_send_command(con->ucsi, command, NULL, 0);
}
static int ucsi_reset_ppm(struct ucsi *ucsi)
{
u64 command = UCSI_PPM_RESET;
unsigned long tmo;
u32 cci;
int ret;
mutex_lock(&ucsi->ppm_lock);
ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command,
sizeof(command));
if (ret < 0)
goto out;
tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS);
do {
if (time_is_before_jiffies(tmo)) {
ret = -ETIMEDOUT;
goto out;
}
ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
if (ret)
goto out;
/* If the PPM is still doing something else, reset it again. */
if (cci & ~UCSI_CCI_RESET_COMPLETE) {
ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL,
&command,
sizeof(command));
if (ret < 0)
goto out;
}
msleep(20);
} while (!(cci & UCSI_CCI_RESET_COMPLETE));
out:
mutex_unlock(&ucsi->ppm_lock);
return ret;
}
static int ucsi_role_cmd(struct ucsi_connector *con, u64 command)
{
int ret;
ret = ucsi_send_command(con->ucsi, command, NULL, 0);
if (ret == -ETIMEDOUT) {
u64 c;
/* PPM most likely stopped responding. Resetting everything. */
ucsi_reset_ppm(con->ucsi);
c = UCSI_SET_NOTIFICATION_ENABLE | con->ucsi->ntfy;
ucsi_send_command(con->ucsi, c, NULL, 0);
ucsi_reset_connector(con, true);
}
return ret;
}
static int ucsi_dr_swap(struct typec_port *port, enum typec_data_role role)
{
struct ucsi_connector *con = typec_get_drvdata(port);
u8 partner_type;
u64 command;
int ret = 0;
mutex_lock(&con->lock);
if (!con->partner) {
ret = -ENOTCONN;
goto out_unlock;
}
partner_type = UCSI_CONSTAT_PARTNER_TYPE(con->status.flags);
if ((partner_type == UCSI_CONSTAT_PARTNER_TYPE_DFP &&
role == TYPEC_DEVICE) ||
(partner_type == UCSI_CONSTAT_PARTNER_TYPE_UFP &&
role == TYPEC_HOST))
goto out_unlock;
reinit_completion(&con->complete);
command = UCSI_SET_UOR | UCSI_CONNECTOR_NUMBER(con->num);
command |= UCSI_SET_UOR_ROLE(role);
command |= UCSI_SET_UOR_ACCEPT_ROLE_SWAPS;
ret = ucsi_role_cmd(con, command);
if (ret < 0)
goto out_unlock;
mutex_unlock(&con->lock);
if (!wait_for_completion_timeout(&con->complete,
msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS)))
return -ETIMEDOUT;
return 0;
out_unlock:
mutex_unlock(&con->lock);
return ret;
}
static int ucsi_pr_swap(struct typec_port *port, enum typec_role role)
{
struct ucsi_connector *con = typec_get_drvdata(port);
enum typec_role cur_role;
u64 command;
int ret = 0;
mutex_lock(&con->lock);
if (!con->partner) {
ret = -ENOTCONN;
goto out_unlock;
}
cur_role = !!(con->status.flags & UCSI_CONSTAT_PWR_DIR);
if (cur_role == role)
goto out_unlock;
reinit_completion(&con->complete);
command = UCSI_SET_PDR | UCSI_CONNECTOR_NUMBER(con->num);
command |= UCSI_SET_PDR_ROLE(role);
command |= UCSI_SET_PDR_ACCEPT_ROLE_SWAPS;
ret = ucsi_role_cmd(con, command);
if (ret < 0)
goto out_unlock;
mutex_unlock(&con->lock);
if (!wait_for_completion_timeout(&con->complete,
msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS)))
return -ETIMEDOUT;
mutex_lock(&con->lock);
/* Something has gone wrong while swapping the role */
if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) !=
UCSI_CONSTAT_PWR_OPMODE_PD) {
ucsi_reset_connector(con, true);
ret = -EPROTO;
}
out_unlock:
mutex_unlock(&con->lock);
return ret;
}
static const struct typec_operations ucsi_ops = {
.dr_set = ucsi_dr_swap,
.pr_set = ucsi_pr_swap
};
/* Caller must call fwnode_handle_put() after use */
static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con)
{
struct fwnode_handle *fwnode;
int i = 1;
device_for_each_child_node(con->ucsi->dev, fwnode)
if (i++ == con->num)
return fwnode;
return NULL;
}
static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
{
struct usb_power_delivery_desc desc = { ucsi->cap.pd_version};
struct usb_power_delivery_capabilities_desc pd_caps;
struct usb_power_delivery_capabilities *pd_cap;
struct typec_capability *cap = &con->typec_cap;
enum typec_accessory *accessory = cap->accessory;
enum usb_role u_role = USB_ROLE_NONE;
u64 command;
char *name;
int ret;
name = kasprintf(GFP_KERNEL, "%s-con%d", dev_name(ucsi->dev), con->num);
if (!name)
return -ENOMEM;
con->wq = create_singlethread_workqueue(name);
kfree(name);
if (!con->wq)
return -ENOMEM;
INIT_WORK(&con->work, ucsi_handle_connector_change);
init_completion(&con->complete);
mutex_init(&con->lock);
INIT_LIST_HEAD(&con->partner_tasks);
con->ucsi = ucsi;
cap->fwnode = ucsi_find_fwnode(con);
con->usb_role_sw = fwnode_usb_role_switch_get(cap->fwnode);
if (IS_ERR(con->usb_role_sw))
return dev_err_probe(ucsi->dev, PTR_ERR(con->usb_role_sw),
"con%d: failed to get usb role switch\n", con->num);
/* Delay other interactions with the con until registration is complete */
mutex_lock(&con->lock);
/* Get connector capability */
command = UCSI_GET_CONNECTOR_CAPABILITY;
command |= UCSI_CONNECTOR_NUMBER(con->num);
ret = ucsi_send_command(ucsi, command, &con->cap, sizeof(con->cap));
if (ret < 0)
goto out_unlock;
if (con->cap.op_mode & UCSI_CONCAP_OPMODE_DRP)
cap->data = TYPEC_PORT_DRD;
else if (con->cap.op_mode & UCSI_CONCAP_OPMODE_DFP)
cap->data = TYPEC_PORT_DFP;
else if (con->cap.op_mode & UCSI_CONCAP_OPMODE_UFP)
cap->data = TYPEC_PORT_UFP;
if ((con->cap.flags & UCSI_CONCAP_FLAG_PROVIDER) &&
(con->cap.flags & UCSI_CONCAP_FLAG_CONSUMER))
cap->type = TYPEC_PORT_DRP;
else if (con->cap.flags & UCSI_CONCAP_FLAG_PROVIDER)
cap->type = TYPEC_PORT_SRC;
else if (con->cap.flags & UCSI_CONCAP_FLAG_CONSUMER)
cap->type = TYPEC_PORT_SNK;
cap->revision = ucsi->cap.typec_version;
cap->pd_revision = ucsi->cap.pd_version;
cap->svdm_version = SVDM_VER_2_0;
cap->prefer_role = TYPEC_NO_PREFERRED_ROLE;
if (con->cap.op_mode & UCSI_CONCAP_OPMODE_AUDIO_ACCESSORY)
*accessory++ = TYPEC_ACCESSORY_AUDIO;
if (con->cap.op_mode & UCSI_CONCAP_OPMODE_DEBUG_ACCESSORY)
*accessory = TYPEC_ACCESSORY_DEBUG;
cap->driver_data = con;
cap->ops = &ucsi_ops;
ret = ucsi_register_port_psy(con);
if (ret)
goto out;
/* Register the connector */
con->port = typec_register_port(ucsi->dev, cap);
if (IS_ERR(con->port)) {
ret = PTR_ERR(con->port);
goto out;
}
con->pd = usb_power_delivery_register(ucsi->dev, &desc);
ret = ucsi_get_pdos(con, TYPEC_SOURCE, 0, pd_caps.pdo);
if (ret > 0) {
if (ret < PDO_MAX_OBJECTS)
pd_caps.pdo[ret] = 0;
pd_caps.role = TYPEC_SOURCE;
pd_cap = usb_power_delivery_register_capabilities(con->pd, &pd_caps);
if (IS_ERR(pd_cap)) {
ret = PTR_ERR(pd_cap);
goto out;
}
con->port_source_caps = pd_cap;
typec_port_set_usb_power_delivery(con->port, con->pd);
}
memset(&pd_caps, 0, sizeof(pd_caps));
ret = ucsi_get_pdos(con, TYPEC_SINK, 0, pd_caps.pdo);
if (ret > 0) {
if (ret < PDO_MAX_OBJECTS)
pd_caps.pdo[ret] = 0;
pd_caps.role = TYPEC_SINK;
pd_cap = usb_power_delivery_register_capabilities(con->pd, &pd_caps);
if (IS_ERR(pd_cap)) {
ret = PTR_ERR(pd_cap);
goto out;
}
con->port_sink_caps = pd_cap;
typec_port_set_usb_power_delivery(con->port, con->pd);
}
/* Alternate modes */
ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_CON);
if (ret) {
dev_err(ucsi->dev, "con%d: failed to register alt modes\n",
con->num);
goto out;
}
/* Get the status */
command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
ret = ucsi_send_command(ucsi, command, &con->status, sizeof(con->status));
if (ret < 0) {
dev_err(ucsi->dev, "con%d: failed to get status\n", con->num);
ret = 0;
goto out;
}
ret = 0; /* ucsi_send_command() returns length on success */
switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
case UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP:
u_role = USB_ROLE_HOST;
fallthrough;
case UCSI_CONSTAT_PARTNER_TYPE_CABLE:
typec_set_data_role(con->port, TYPEC_HOST);
break;
case UCSI_CONSTAT_PARTNER_TYPE_DFP:
u_role = USB_ROLE_DEVICE;
typec_set_data_role(con->port, TYPEC_DEVICE);
break;
default:
break;
}
/* Check if there is already something connected */
if (con->status.flags & UCSI_CONSTAT_CONNECTED) {
typec_set_pwr_role(con->port,
!!(con->status.flags & UCSI_CONSTAT_PWR_DIR));
ucsi_register_partner(con);
ucsi_pwr_opmode_change(con);
ucsi_port_psy_changed(con);
}
/* Only notify USB controller if partner supports USB data */
if (!(UCSI_CONSTAT_PARTNER_FLAGS(con->status.flags) & UCSI_CONSTAT_PARTNER_FLAG_USB))
u_role = USB_ROLE_NONE;
ret = usb_role_switch_set_role(con->usb_role_sw, u_role);
if (ret) {
dev_err(ucsi->dev, "con:%d: failed to set usb role:%d\n",
con->num, u_role);
ret = 0;
}
if (con->partner &&
UCSI_CONSTAT_PWR_OPMODE(con->status.flags) ==
UCSI_CONSTAT_PWR_OPMODE_PD) {
ucsi_get_src_pdos(con);
ucsi_check_altmodes(con);
}
trace_ucsi_register_port(con->num, &con->status);
out:
fwnode_handle_put(cap->fwnode);
out_unlock:
mutex_unlock(&con->lock);
if (ret && con->wq) {
destroy_workqueue(con->wq);
con->wq = NULL;
}
return ret;
}
/**
* ucsi_init - Initialize UCSI interface
* @ucsi: UCSI to be initialized
*
* Registers all ports @ucsi has and enables all notification events.
*/
static int ucsi_init(struct ucsi *ucsi)
{
struct ucsi_connector *con, *connector;
u64 command, ntfy;
int ret;
int i;
/* Reset the PPM */
ret = ucsi_reset_ppm(ucsi);
if (ret) {
dev_err(ucsi->dev, "failed to reset PPM!\n");
goto err;
}
/* Enable basic notifications */
ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
command = UCSI_SET_NOTIFICATION_ENABLE | ntfy;
ret = ucsi_send_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_reset;
/* Get PPM capabilities */
command = UCSI_GET_CAPABILITY;
ret = ucsi_send_command(ucsi, command, &ucsi->cap, sizeof(ucsi->cap));
if (ret < 0)
goto err_reset;
if (!ucsi->cap.num_connectors) {
ret = -ENODEV;
goto err_reset;
}
/* Allocate the connectors. Released in ucsi_unregister() */
connector = kcalloc(ucsi->cap.num_connectors + 1, sizeof(*connector), GFP_KERNEL);
if (!connector) {
ret = -ENOMEM;
goto err_reset;
}
/* Register all connectors */
for (i = 0; i < ucsi->cap.num_connectors; i++) {
connector[i].num = i + 1;
ret = ucsi_register_port(ucsi, &connector[i]);
if (ret)
goto err_unregister;
}
/* Enable all notifications */
ntfy = UCSI_ENABLE_NTFY_ALL;
command = UCSI_SET_NOTIFICATION_ENABLE | ntfy;
ret = ucsi_send_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_unregister;
ucsi->connector = connector;
ucsi->ntfy = ntfy;
return 0;
err_unregister:
for (con = connector; con->port; con++) {
ucsi_unregister_partner(con);
ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
ucsi_unregister_port_psy(con);
if (con->wq)
destroy_workqueue(con->wq);
usb_power_delivery_unregister_capabilities(con->port_sink_caps);
con->port_sink_caps = NULL;
usb_power_delivery_unregister_capabilities(con->port_source_caps);
con->port_source_caps = NULL;
usb_power_delivery_unregister(con->pd);
con->pd = NULL;
typec_unregister_port(con->port);
con->port = NULL;
}
kfree(connector);
err_reset:
memset(&ucsi->cap, 0, sizeof(ucsi->cap));
ucsi_reset_ppm(ucsi);
err:
return ret;
}
static void ucsi_resume_work(struct work_struct *work)
{
struct ucsi *ucsi = container_of(work, struct ucsi, resume_work);
struct ucsi_connector *con;
u64 command;
int ret;
/* Restore UCSI notification enable mask after system resume */
command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
ret = ucsi_send_command(ucsi, command, NULL, 0);
if (ret < 0) {
dev_err(ucsi->dev, "failed to re-enable notifications (%d)\n", ret);
return;
}
for (con = ucsi->connector; con->port; con++) {
mutex_lock(&con->lock);
ucsi_partner_task(con, ucsi_check_connection, 1, 0);
mutex_unlock(&con->lock);
}
}
int ucsi_resume(struct ucsi *ucsi)
{
if (ucsi->connector)
queue_work(system_long_wq, &ucsi->resume_work);
return 0;
}
EXPORT_SYMBOL_GPL(ucsi_resume);
static void ucsi_init_work(struct work_struct *work)
{
struct ucsi *ucsi = container_of(work, struct ucsi, work.work);
int ret;
ret = ucsi_init(ucsi);
if (ret)
dev_err_probe(ucsi->dev, ret, "PPM init failed\n");
if (ret == -EPROBE_DEFER) {
if (ucsi->work_count++ > UCSI_ROLE_SWITCH_WAIT_COUNT) {
dev_err(ucsi->dev, "PPM init failed, stop trying\n");
return;
}
queue_delayed_work(system_long_wq, &ucsi->work,
UCSI_ROLE_SWITCH_INTERVAL);
}
}
/**
* ucsi_get_drvdata - Return private driver data pointer
* @ucsi: UCSI interface
*/
void *ucsi_get_drvdata(struct ucsi *ucsi)
{
return ucsi->driver_data;
}
EXPORT_SYMBOL_GPL(ucsi_get_drvdata);
/**
* ucsi_set_drvdata - Assign private driver data pointer
* @ucsi: UCSI interface
* @data: Private data pointer
*/
void ucsi_set_drvdata(struct ucsi *ucsi, void *data)
{
ucsi->driver_data = data;
}
EXPORT_SYMBOL_GPL(ucsi_set_drvdata);
/**
* ucsi_create - Allocate UCSI instance
* @dev: Device interface to the PPM (Platform Policy Manager)
* @ops: I/O routines
*/
struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops)
{
struct ucsi *ucsi;
if (!ops || !ops->read || !ops->sync_write || !ops->async_write)
return ERR_PTR(-EINVAL);
ucsi = kzalloc(sizeof(*ucsi), GFP_KERNEL);
if (!ucsi)
return ERR_PTR(-ENOMEM);
INIT_WORK(&ucsi->resume_work, ucsi_resume_work);
INIT_DELAYED_WORK(&ucsi->work, ucsi_init_work);
mutex_init(&ucsi->ppm_lock);
ucsi->dev = dev;
ucsi->ops = ops;
return ucsi;
}
EXPORT_SYMBOL_GPL(ucsi_create);
/**
* ucsi_destroy - Free UCSI instance
* @ucsi: UCSI instance to be freed
*/
void ucsi_destroy(struct ucsi *ucsi)
{
ucsi_debugfs_unregister(ucsi);
kfree(ucsi);
}
EXPORT_SYMBOL_GPL(ucsi_destroy);
/**
* ucsi_register - Register UCSI interface
* @ucsi: UCSI instance
*/
int ucsi_register(struct ucsi *ucsi)
{
int ret;
ret = ucsi->ops->read(ucsi, UCSI_VERSION, &ucsi->version,
sizeof(ucsi->version));
if (ret)
return ret;
if (!ucsi->version)
return -ENODEV;
queue_delayed_work(system_long_wq, &ucsi->work, 0);
ucsi_debugfs_register(ucsi);
return 0;
}
EXPORT_SYMBOL_GPL(ucsi_register);
/**
* ucsi_unregister - Unregister UCSI interface
* @ucsi: UCSI interface to be unregistered
*
* Unregister UCSI interface that was created with ucsi_register().
*/
void ucsi_unregister(struct ucsi *ucsi)
{
u64 cmd = UCSI_SET_NOTIFICATION_ENABLE;
int i;
/* Make sure that we are not in the middle of driver initialization */
cancel_delayed_work_sync(&ucsi->work);
cancel_work_sync(&ucsi->resume_work);
/* Disable notifications */
ucsi->ops->async_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
if (!ucsi->connector)
return;
for (i = 0; i < ucsi->cap.num_connectors; i++) {
cancel_work_sync(&ucsi->connector[i].work);
ucsi_unregister_partner(&ucsi->connector[i]);
ucsi_unregister_altmodes(&ucsi->connector[i],
UCSI_RECIPIENT_CON);
ucsi_unregister_port_psy(&ucsi->connector[i]);
if (ucsi->connector[i].wq) {
struct ucsi_work *uwork;
mutex_lock(&ucsi->connector[i].lock);
/*
* queue delayed items immediately so they can execute
* and free themselves before the wq is destroyed
*/
list_for_each_entry(uwork, &ucsi->connector[i].partner_tasks, node)
mod_delayed_work(ucsi->connector[i].wq, &uwork->work, 0);
mutex_unlock(&ucsi->connector[i].lock);
destroy_workqueue(ucsi->connector[i].wq);
}
usb_power_delivery_unregister_capabilities(ucsi->connector[i].port_sink_caps);
ucsi->connector[i].port_sink_caps = NULL;
usb_power_delivery_unregister_capabilities(ucsi->connector[i].port_source_caps);
ucsi->connector[i].port_source_caps = NULL;
usb_power_delivery_unregister(ucsi->connector[i].pd);
ucsi->connector[i].pd = NULL;
typec_unregister_port(ucsi->connector[i].port);
}
kfree(ucsi->connector);
}
EXPORT_SYMBOL_GPL(ucsi_unregister);
static int __init ucsi_module_init(void)
{
ucsi_debugfs_init();
return 0;
}
module_init(ucsi_module_init);
static void __exit ucsi_module_exit(void)
{
ucsi_debugfs_exit();
}
module_exit(ucsi_module_exit);
MODULE_AUTHOR("Heikki Krogerus <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("USB Type-C Connector System Software Interface driver");
| linux-master | drivers/usb/typec/ucsi/ucsi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* UCSI debugfs interface
*
* Copyright (C) 2023 Intel Corporation
*
* Authors: Rajaram Regupathy <[email protected]>
* Gopal Saranya <[email protected]>
*/
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/usb.h>
#include <asm/errno.h>
#include "ucsi.h"
static struct dentry *ucsi_debugfs_root;
static int ucsi_cmd(void *data, u64 val)
{
struct ucsi *ucsi = data;
int ret;
memset(&ucsi->debugfs->response, 0, sizeof(ucsi->debugfs->response));
ucsi->debugfs->status = 0;
switch (UCSI_COMMAND(val)) {
case UCSI_SET_UOM:
case UCSI_SET_UOR:
case UCSI_SET_PDR:
case UCSI_CONNECTOR_RESET:
ret = ucsi_send_command(ucsi, val, NULL, 0);
break;
case UCSI_GET_CAPABILITY:
case UCSI_GET_CONNECTOR_CAPABILITY:
case UCSI_GET_ALTERNATE_MODES:
case UCSI_GET_CURRENT_CAM:
case UCSI_GET_PDOS:
case UCSI_GET_CABLE_PROPERTY:
case UCSI_GET_CONNECTOR_STATUS:
ret = ucsi_send_command(ucsi, val,
&ucsi->debugfs->response,
sizeof(ucsi->debugfs->response));
break;
default:
ret = -EOPNOTSUPP;
}
if (ret < 0) {
ucsi->debugfs->status = ret;
return ret;
}
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(ucsi_cmd_fops, NULL, ucsi_cmd, "0x%llx\n");
static int ucsi_resp_show(struct seq_file *s, void *not_used)
{
struct ucsi *ucsi = s->private;
if (ucsi->debugfs->status)
return ucsi->debugfs->status;
seq_printf(s, "0x%016llx%016llx\n", ucsi->debugfs->response.high,
ucsi->debugfs->response.low);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ucsi_resp);
void ucsi_debugfs_register(struct ucsi *ucsi)
{
ucsi->debugfs = kzalloc(sizeof(*ucsi->debugfs), GFP_KERNEL);
if (!ucsi->debugfs)
return;
ucsi->debugfs->dentry = debugfs_create_dir(dev_name(ucsi->dev), ucsi_debugfs_root);
debugfs_create_file("command", 0200, ucsi->debugfs->dentry, ucsi, &ucsi_cmd_fops);
debugfs_create_file("response", 0400, ucsi->debugfs->dentry, ucsi, &ucsi_resp_fops);
}
void ucsi_debugfs_unregister(struct ucsi *ucsi)
{
if (IS_ERR_OR_NULL(ucsi) || !ucsi->debugfs)
return;
debugfs_remove_recursive(ucsi->debugfs->dentry);
kfree(ucsi->debugfs);
}
void ucsi_debugfs_init(void)
{
ucsi_debugfs_root = debugfs_create_dir("ucsi", usb_debug_root);
}
void ucsi_debugfs_exit(void)
{
debugfs_remove(ucsi_debugfs_root);
}
| linux-master | drivers/usb/typec/ucsi/debugfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2023, Linaro Ltd
*/
#include <linux/auxiliary_bus.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/soc/qcom/pdr.h>
#include <linux/soc/qcom/pmic_glink.h>
#include "ucsi.h"
#define UCSI_BUF_SIZE 48
#define MSG_TYPE_REQ_RESP 1
#define UCSI_BUF_SIZE 48
#define UC_NOTIFY_RECEIVER_UCSI 0x0
#define UC_UCSI_READ_BUF_REQ 0x11
#define UC_UCSI_WRITE_BUF_REQ 0x12
#define UC_UCSI_USBC_NOTIFY_IND 0x13
struct ucsi_read_buf_req_msg {
struct pmic_glink_hdr hdr;
};
struct ucsi_read_buf_resp_msg {
struct pmic_glink_hdr hdr;
u8 buf[UCSI_BUF_SIZE];
u32 ret_code;
};
struct ucsi_write_buf_req_msg {
struct pmic_glink_hdr hdr;
u8 buf[UCSI_BUF_SIZE];
u32 reserved;
};
struct ucsi_write_buf_resp_msg {
struct pmic_glink_hdr hdr;
u32 ret_code;
};
struct ucsi_notify_ind_msg {
struct pmic_glink_hdr hdr;
u32 notification;
u32 receiver;
u32 reserved;
};
struct pmic_glink_ucsi {
struct device *dev;
struct pmic_glink_client *client;
struct ucsi *ucsi;
struct completion read_ack;
struct completion write_ack;
struct completion sync_ack;
bool sync_pending;
struct mutex lock; /* protects concurrent access to PMIC Glink interface */
int sync_val;
struct work_struct notify_work;
struct work_struct register_work;
u8 read_buf[UCSI_BUF_SIZE];
};
static int pmic_glink_ucsi_read(struct ucsi *__ucsi, unsigned int offset,
void *val, size_t val_len)
{
struct pmic_glink_ucsi *ucsi = ucsi_get_drvdata(__ucsi);
struct ucsi_read_buf_req_msg req = {};
unsigned long left;
int ret;
req.hdr.owner = PMIC_GLINK_OWNER_USBC;
req.hdr.type = MSG_TYPE_REQ_RESP;
req.hdr.opcode = UC_UCSI_READ_BUF_REQ;
mutex_lock(&ucsi->lock);
memset(ucsi->read_buf, 0, sizeof(ucsi->read_buf));
reinit_completion(&ucsi->read_ack);
ret = pmic_glink_send(ucsi->client, &req, sizeof(req));
if (ret < 0) {
dev_err(ucsi->dev, "failed to send UCSI read request: %d\n", ret);
goto out_unlock;
}
left = wait_for_completion_timeout(&ucsi->read_ack, 5 * HZ);
if (!left) {
dev_err(ucsi->dev, "timeout waiting for UCSI read response\n");
ret = -ETIMEDOUT;
goto out_unlock;
}
memcpy(val, &ucsi->read_buf[offset], val_len);
ret = 0;
out_unlock:
mutex_unlock(&ucsi->lock);
return ret;
}
static int pmic_glink_ucsi_locked_write(struct pmic_glink_ucsi *ucsi, unsigned int offset,
const void *val, size_t val_len)
{
struct ucsi_write_buf_req_msg req = {};
unsigned long left;
int ret;
req.hdr.owner = PMIC_GLINK_OWNER_USBC;
req.hdr.type = MSG_TYPE_REQ_RESP;
req.hdr.opcode = UC_UCSI_WRITE_BUF_REQ;
memcpy(&req.buf[offset], val, val_len);
reinit_completion(&ucsi->write_ack);
ret = pmic_glink_send(ucsi->client, &req, sizeof(req));
if (ret < 0) {
dev_err(ucsi->dev, "failed to send UCSI write request: %d\n", ret);
return ret;
}
left = wait_for_completion_timeout(&ucsi->write_ack, 5 * HZ);
if (!left) {
dev_err(ucsi->dev, "timeout waiting for UCSI write response\n");
return -ETIMEDOUT;
}
return 0;
}
static int pmic_glink_ucsi_async_write(struct ucsi *__ucsi, unsigned int offset,
const void *val, size_t val_len)
{
struct pmic_glink_ucsi *ucsi = ucsi_get_drvdata(__ucsi);
int ret;
mutex_lock(&ucsi->lock);
ret = pmic_glink_ucsi_locked_write(ucsi, offset, val, val_len);
mutex_unlock(&ucsi->lock);
return ret;
}
static int pmic_glink_ucsi_sync_write(struct ucsi *__ucsi, unsigned int offset,
const void *val, size_t val_len)
{
struct pmic_glink_ucsi *ucsi = ucsi_get_drvdata(__ucsi);
unsigned long left;
int ret;
/* TOFIX: Downstream forces recipient to CON when UCSI_GET_ALTERNATE_MODES command */
mutex_lock(&ucsi->lock);
ucsi->sync_val = 0;
reinit_completion(&ucsi->sync_ack);
ucsi->sync_pending = true;
ret = pmic_glink_ucsi_locked_write(ucsi, offset, val, val_len);
mutex_unlock(&ucsi->lock);
left = wait_for_completion_timeout(&ucsi->sync_ack, 5 * HZ);
if (!left) {
dev_err(ucsi->dev, "timeout waiting for UCSI sync write response\n");
ret = -ETIMEDOUT;
} else if (ucsi->sync_val) {
dev_err(ucsi->dev, "sync write returned: %d\n", ucsi->sync_val);
}
ucsi->sync_pending = false;
return ret;
}
static const struct ucsi_operations pmic_glink_ucsi_ops = {
.read = pmic_glink_ucsi_read,
.sync_write = pmic_glink_ucsi_sync_write,
.async_write = pmic_glink_ucsi_async_write
};
static void pmic_glink_ucsi_read_ack(struct pmic_glink_ucsi *ucsi, const void *data, int len)
{
const struct ucsi_read_buf_resp_msg *resp = data;
if (resp->ret_code)
return;
memcpy(ucsi->read_buf, resp->buf, UCSI_BUF_SIZE);
complete(&ucsi->read_ack);
}
static void pmic_glink_ucsi_write_ack(struct pmic_glink_ucsi *ucsi, const void *data, int len)
{
const struct ucsi_write_buf_resp_msg *resp = data;
if (resp->ret_code)
return;
ucsi->sync_val = resp->ret_code;
complete(&ucsi->write_ack);
}
static void pmic_glink_ucsi_notify(struct work_struct *work)
{
struct pmic_glink_ucsi *ucsi = container_of(work, struct pmic_glink_ucsi, notify_work);
unsigned int con_num;
u32 cci;
int ret;
ret = pmic_glink_ucsi_read(ucsi->ucsi, UCSI_CCI, &cci, sizeof(cci));
if (ret) {
dev_err(ucsi->dev, "failed to read CCI on notification\n");
return;
}
con_num = UCSI_CCI_CONNECTOR(cci);
if (con_num)
ucsi_connector_change(ucsi->ucsi, con_num);
if (ucsi->sync_pending && cci & UCSI_CCI_BUSY) {
ucsi->sync_val = -EBUSY;
complete(&ucsi->sync_ack);
} else if (ucsi->sync_pending &&
(cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))) {
complete(&ucsi->sync_ack);
}
}
static void pmic_glink_ucsi_register(struct work_struct *work)
{
struct pmic_glink_ucsi *ucsi = container_of(work, struct pmic_glink_ucsi, register_work);
ucsi_register(ucsi->ucsi);
}
static void pmic_glink_ucsi_callback(const void *data, size_t len, void *priv)
{
struct pmic_glink_ucsi *ucsi = priv;
const struct pmic_glink_hdr *hdr = data;
switch (le32_to_cpu(hdr->opcode)) {
case UC_UCSI_READ_BUF_REQ:
pmic_glink_ucsi_read_ack(ucsi, data, len);
break;
case UC_UCSI_WRITE_BUF_REQ:
pmic_glink_ucsi_write_ack(ucsi, data, len);
break;
case UC_UCSI_USBC_NOTIFY_IND:
schedule_work(&ucsi->notify_work);
break;
};
}
static void pmic_glink_ucsi_pdr_notify(void *priv, int state)
{
struct pmic_glink_ucsi *ucsi = priv;
if (state == SERVREG_SERVICE_STATE_UP)
schedule_work(&ucsi->register_work);
else if (state == SERVREG_SERVICE_STATE_DOWN)
ucsi_unregister(ucsi->ucsi);
}
static void pmic_glink_ucsi_destroy(void *data)
{
struct pmic_glink_ucsi *ucsi = data;
/* Protect to make sure we're not in a middle of a transaction from a glink callback */
mutex_lock(&ucsi->lock);
ucsi_destroy(ucsi->ucsi);
mutex_unlock(&ucsi->lock);
}
static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
struct pmic_glink_ucsi *ucsi;
struct device *dev = &adev->dev;
int ret;
ucsi = devm_kzalloc(dev, sizeof(*ucsi), GFP_KERNEL);
if (!ucsi)
return -ENOMEM;
ucsi->dev = dev;
dev_set_drvdata(dev, ucsi);
INIT_WORK(&ucsi->notify_work, pmic_glink_ucsi_notify);
INIT_WORK(&ucsi->register_work, pmic_glink_ucsi_register);
init_completion(&ucsi->read_ack);
init_completion(&ucsi->write_ack);
init_completion(&ucsi->sync_ack);
mutex_init(&ucsi->lock);
ucsi->ucsi = ucsi_create(dev, &pmic_glink_ucsi_ops);
if (IS_ERR(ucsi->ucsi))
return PTR_ERR(ucsi->ucsi);
/* Make sure we destroy *after* pmic_glink unregister */
ret = devm_add_action_or_reset(dev, pmic_glink_ucsi_destroy, ucsi);
if (ret)
return ret;
ucsi_set_drvdata(ucsi->ucsi, ucsi);
ucsi->client = devm_pmic_glink_register_client(dev,
PMIC_GLINK_OWNER_USBC,
pmic_glink_ucsi_callback,
pmic_glink_ucsi_pdr_notify,
ucsi);
return PTR_ERR_OR_ZERO(ucsi->client);
}
static void pmic_glink_ucsi_remove(struct auxiliary_device *adev)
{
struct pmic_glink_ucsi *ucsi = dev_get_drvdata(&adev->dev);
/* Unregister first to stop having read & writes */
ucsi_unregister(ucsi->ucsi);
}
static const struct auxiliary_device_id pmic_glink_ucsi_id_table[] = {
{ .name = "pmic_glink.ucsi", },
{},
};
MODULE_DEVICE_TABLE(auxiliary, pmic_glink_ucsi_id_table);
static struct auxiliary_driver pmic_glink_ucsi_driver = {
.name = "pmic_glink_ucsi",
.probe = pmic_glink_ucsi_probe,
.remove = pmic_glink_ucsi_remove,
.id_table = pmic_glink_ucsi_id_table,
};
module_auxiliary_driver(pmic_glink_ucsi_driver);
MODULE_DESCRIPTION("Qualcomm PMIC GLINK UCSI driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/typec/ucsi/ucsi_glink.c |
// SPDX-License-Identifier: GPL-2.0
/*
* UCSI ACPI driver
*
* Copyright (C) 2017, Intel Corporation
* Author: Heikki Krogerus <[email protected]>
*/
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include "ucsi.h"
#define UCSI_DSM_UUID "6f8398c2-7ca4-11e4-ad36-631042b5008f"
#define UCSI_DSM_FUNC_WRITE 1
#define UCSI_DSM_FUNC_READ 2
struct ucsi_acpi {
struct device *dev;
struct ucsi *ucsi;
void *base;
struct completion complete;
unsigned long flags;
guid_t guid;
u64 cmd;
};
static int ucsi_acpi_dsm(struct ucsi_acpi *ua, int func)
{
union acpi_object *obj;
obj = acpi_evaluate_dsm(ACPI_HANDLE(ua->dev), &ua->guid, 1, func,
NULL);
if (!obj) {
dev_err(ua->dev, "%s: failed to evaluate _DSM %d\n",
__func__, func);
return -EIO;
}
ACPI_FREE(obj);
return 0;
}
static int ucsi_acpi_read(struct ucsi *ucsi, unsigned int offset,
void *val, size_t val_len)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
int ret;
ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
if (ret)
return ret;
memcpy(val, ua->base + offset, val_len);
return 0;
}
static int ucsi_acpi_async_write(struct ucsi *ucsi, unsigned int offset,
const void *val, size_t val_len)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
memcpy(ua->base + offset, val, val_len);
ua->cmd = *(u64 *)val;
return ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_WRITE);
}
static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
const void *val, size_t val_len)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
int ret;
set_bit(COMMAND_PENDING, &ua->flags);
ret = ucsi_acpi_async_write(ucsi, offset, val, val_len);
if (ret)
goto out_clear_bit;
if (!wait_for_completion_timeout(&ua->complete, 5 * HZ))
ret = -ETIMEDOUT;
out_clear_bit:
clear_bit(COMMAND_PENDING, &ua->flags);
return ret;
}
static const struct ucsi_operations ucsi_acpi_ops = {
.read = ucsi_acpi_read,
.sync_write = ucsi_acpi_sync_write,
.async_write = ucsi_acpi_async_write
};
static int
ucsi_zenbook_read(struct ucsi *ucsi, unsigned int offset, void *val, size_t val_len)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
int ret;
if (offset == UCSI_VERSION || UCSI_COMMAND(ua->cmd) == UCSI_PPM_RESET) {
ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
if (ret)
return ret;
}
memcpy(val, ua->base + offset, val_len);
return 0;
}
static const struct ucsi_operations ucsi_zenbook_ops = {
.read = ucsi_zenbook_read,
.sync_write = ucsi_acpi_sync_write,
.async_write = ucsi_acpi_async_write
};
static const struct dmi_system_id zenbook_dmi_id[] = {
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
},
},
{ }
};
static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
{
struct ucsi_acpi *ua = data;
u32 cci;
int ret;
ret = ua->ucsi->ops->read(ua->ucsi, UCSI_CCI, &cci, sizeof(cci));
if (ret)
return;
if (UCSI_CCI_CONNECTOR(cci))
ucsi_connector_change(ua->ucsi, UCSI_CCI_CONNECTOR(cci));
if (test_bit(COMMAND_PENDING, &ua->flags) &&
cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
complete(&ua->complete);
}
static int ucsi_acpi_probe(struct platform_device *pdev)
{
struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
const struct ucsi_operations *ops = &ucsi_acpi_ops;
struct ucsi_acpi *ua;
struct resource *res;
acpi_status status;
int ret;
if (adev->dep_unmet)
return -EPROBE_DEFER;
ua = devm_kzalloc(&pdev->dev, sizeof(*ua), GFP_KERNEL);
if (!ua)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "missing memory resource\n");
return -ENODEV;
}
ua->base = devm_memremap(&pdev->dev, res->start, resource_size(res), MEMREMAP_WB);
if (IS_ERR(ua->base))
return PTR_ERR(ua->base);
ret = guid_parse(UCSI_DSM_UUID, &ua->guid);
if (ret)
return ret;
init_completion(&ua->complete);
ua->dev = &pdev->dev;
if (dmi_check_system(zenbook_dmi_id))
ops = &ucsi_zenbook_ops;
ua->ucsi = ucsi_create(&pdev->dev, ops);
if (IS_ERR(ua->ucsi))
return PTR_ERR(ua->ucsi);
ucsi_set_drvdata(ua->ucsi, ua);
status = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
ACPI_DEVICE_NOTIFY,
ucsi_acpi_notify, ua);
if (ACPI_FAILURE(status)) {
dev_err(&pdev->dev, "failed to install notify handler\n");
ucsi_destroy(ua->ucsi);
return -ENODEV;
}
ret = ucsi_register(ua->ucsi);
if (ret) {
acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
ACPI_DEVICE_NOTIFY,
ucsi_acpi_notify);
ucsi_destroy(ua->ucsi);
return ret;
}
platform_set_drvdata(pdev, ua);
return 0;
}
static void ucsi_acpi_remove(struct platform_device *pdev)
{
struct ucsi_acpi *ua = platform_get_drvdata(pdev);
ucsi_unregister(ua->ucsi);
ucsi_destroy(ua->ucsi);
acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), ACPI_DEVICE_NOTIFY,
ucsi_acpi_notify);
}
static int ucsi_acpi_resume(struct device *dev)
{
struct ucsi_acpi *ua = dev_get_drvdata(dev);
return ucsi_resume(ua->ucsi);
}
static DEFINE_SIMPLE_DEV_PM_OPS(ucsi_acpi_pm_ops, NULL, ucsi_acpi_resume);
static const struct acpi_device_id ucsi_acpi_match[] = {
{ "PNP0CA0", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, ucsi_acpi_match);
static struct platform_driver ucsi_acpi_platform_driver = {
.driver = {
.name = "ucsi_acpi",
.pm = pm_ptr(&ucsi_acpi_pm_ops),
.acpi_match_table = ACPI_PTR(ucsi_acpi_match),
},
.probe = ucsi_acpi_probe,
.remove_new = ucsi_acpi_remove,
};
module_platform_driver(ucsi_acpi_platform_driver);
MODULE_AUTHOR("Heikki Krogerus <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("UCSI ACPI driver");
| linux-master | drivers/usb/typec/ucsi/ucsi_acpi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* UCSI DisplayPort Alternate Mode Support
*
* Copyright (C) 2018, Intel Corporation
* Author: Heikki Krogerus <[email protected]>
*/
#include <linux/usb/typec_dp.h>
#include <linux/usb/pd_vdo.h>
#include "ucsi.h"
#define UCSI_CMD_SET_NEW_CAM(_con_num_, _enter_, _cam_, _am_) \
(UCSI_SET_NEW_CAM | ((_con_num_) << 16) | ((_enter_) << 23) | \
((_cam_) << 24) | ((u64)(_am_) << 32))
struct ucsi_dp {
struct typec_displayport_data data;
struct ucsi_connector *con;
struct typec_altmode *alt;
struct work_struct work;
int offset;
bool override;
bool initialized;
u32 header;
u32 *vdo_data;
u8 vdo_size;
};
/*
* Note. Alternate mode control is optional feature in UCSI. It means that even
* if the system supports alternate modes, the OS may not be aware of them.
*
* In most cases however, the OS will be able to see the supported alternate
* modes, but it may still not be able to configure them, not even enter or exit
* them. That is because UCSI defines alt mode details and alt mode "overriding"
* as separate options.
*
* In case alt mode details are supported, but overriding is not, the driver
* will still display the supported pin assignments and configuration, but any
* changes the user attempts to do will lead into failure with return value of
* -EOPNOTSUPP.
*/
static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo)
{
struct ucsi_dp *dp = typec_altmode_get_drvdata(alt);
struct ucsi *ucsi = dp->con->ucsi;
int svdm_version;
u64 command;
u8 cur = 0;
int ret;
mutex_lock(&dp->con->lock);
if (!dp->override && dp->initialized) {
const struct typec_altmode *p = typec_altmode_get_partner(alt);
dev_warn(&p->dev,
"firmware doesn't support alternate mode overriding\n");
ret = -EOPNOTSUPP;
goto err_unlock;
}
command = UCSI_GET_CURRENT_CAM | UCSI_CONNECTOR_NUMBER(dp->con->num);
ret = ucsi_send_command(ucsi, command, &cur, sizeof(cur));
if (ret < 0) {
if (ucsi->version > 0x0100)
goto err_unlock;
cur = 0xff;
}
if (cur != 0xff) {
ret = dp->con->port_altmode[cur] == alt ? 0 : -EBUSY;
goto err_unlock;
}
/*
* We can't send the New CAM command yet to the PPM as it needs the
* configuration value as well. Pretending that we have now entered the
* mode, and letting the alt mode driver continue.
*/
svdm_version = typec_altmode_get_svdm_version(alt);
if (svdm_version < 0) {
ret = svdm_version;
goto err_unlock;
}
dp->header = VDO(USB_TYPEC_DP_SID, 1, svdm_version, CMD_ENTER_MODE);
dp->header |= VDO_OPOS(USB_TYPEC_DP_MODE);
dp->header |= VDO_CMDT(CMDT_RSP_ACK);
dp->vdo_data = NULL;
dp->vdo_size = 1;
schedule_work(&dp->work);
ret = 0;
err_unlock:
mutex_unlock(&dp->con->lock);
return ret;
}
static int ucsi_displayport_exit(struct typec_altmode *alt)
{
struct ucsi_dp *dp = typec_altmode_get_drvdata(alt);
int svdm_version;
u64 command;
int ret = 0;
mutex_lock(&dp->con->lock);
if (!dp->override) {
const struct typec_altmode *p = typec_altmode_get_partner(alt);
dev_warn(&p->dev,
"firmware doesn't support alternate mode overriding\n");
ret = -EOPNOTSUPP;
goto out_unlock;
}
command = UCSI_CMD_SET_NEW_CAM(dp->con->num, 0, dp->offset, 0);
ret = ucsi_send_command(dp->con->ucsi, command, NULL, 0);
if (ret < 0)
goto out_unlock;
svdm_version = typec_altmode_get_svdm_version(alt);
if (svdm_version < 0) {
ret = svdm_version;
goto out_unlock;
}
dp->header = VDO(USB_TYPEC_DP_SID, 1, svdm_version, CMD_EXIT_MODE);
dp->header |= VDO_OPOS(USB_TYPEC_DP_MODE);
dp->header |= VDO_CMDT(CMDT_RSP_ACK);
dp->vdo_data = NULL;
dp->vdo_size = 1;
schedule_work(&dp->work);
out_unlock:
mutex_unlock(&dp->con->lock);
return ret;
}
/*
* We do not actually have access to the Status Update VDO, so we have to guess
* things.
*/
static int ucsi_displayport_status_update(struct ucsi_dp *dp)
{
u32 cap = dp->alt->vdo;
dp->data.status = DP_STATUS_ENABLED;
/*
* If pin assignement D is supported, claiming always
* that Multi-function is preferred.
*/
if (DP_CAP_CAPABILITY(cap) & DP_CAP_UFP_D) {
dp->data.status |= DP_STATUS_CON_UFP_D;
if (DP_CAP_UFP_D_PIN_ASSIGN(cap) & BIT(DP_PIN_ASSIGN_D))
dp->data.status |= DP_STATUS_PREFER_MULTI_FUNC;
} else {
dp->data.status |= DP_STATUS_CON_DFP_D;
if (DP_CAP_DFP_D_PIN_ASSIGN(cap) & BIT(DP_PIN_ASSIGN_D))
dp->data.status |= DP_STATUS_PREFER_MULTI_FUNC;
}
dp->vdo_data = &dp->data.status;
dp->vdo_size = 2;
return 0;
}
static int ucsi_displayport_configure(struct ucsi_dp *dp)
{
u32 pins = DP_CONF_GET_PIN_ASSIGN(dp->data.conf);
u64 command;
if (!dp->override)
return 0;
command = UCSI_CMD_SET_NEW_CAM(dp->con->num, 1, dp->offset, pins);
return ucsi_send_command(dp->con->ucsi, command, NULL, 0);
}
static int ucsi_displayport_vdm(struct typec_altmode *alt,
u32 header, const u32 *data, int count)
{
struct ucsi_dp *dp = typec_altmode_get_drvdata(alt);
int cmd_type = PD_VDO_CMDT(header);
int cmd = PD_VDO_CMD(header);
int svdm_version;
mutex_lock(&dp->con->lock);
if (!dp->override && dp->initialized) {
const struct typec_altmode *p = typec_altmode_get_partner(alt);
dev_warn(&p->dev,
"firmware doesn't support alternate mode overriding\n");
mutex_unlock(&dp->con->lock);
return -EOPNOTSUPP;
}
svdm_version = typec_altmode_get_svdm_version(alt);
if (svdm_version < 0) {
mutex_unlock(&dp->con->lock);
return svdm_version;
}
switch (cmd_type) {
case CMDT_INIT:
if (PD_VDO_SVDM_VER(header) < svdm_version) {
typec_partner_set_svdm_version(dp->con->partner, PD_VDO_SVDM_VER(header));
svdm_version = PD_VDO_SVDM_VER(header);
}
dp->header = VDO(USB_TYPEC_DP_SID, 1, svdm_version, cmd);
dp->header |= VDO_OPOS(USB_TYPEC_DP_MODE);
switch (cmd) {
case DP_CMD_STATUS_UPDATE:
if (ucsi_displayport_status_update(dp))
dp->header |= VDO_CMDT(CMDT_RSP_NAK);
else
dp->header |= VDO_CMDT(CMDT_RSP_ACK);
break;
case DP_CMD_CONFIGURE:
dp->data.conf = *data;
if (ucsi_displayport_configure(dp)) {
dp->header |= VDO_CMDT(CMDT_RSP_NAK);
} else {
dp->header |= VDO_CMDT(CMDT_RSP_ACK);
if (dp->initialized)
ucsi_altmode_update_active(dp->con);
else
dp->initialized = true;
}
break;
default:
dp->header |= VDO_CMDT(CMDT_RSP_ACK);
break;
}
schedule_work(&dp->work);
break;
default:
break;
}
mutex_unlock(&dp->con->lock);
return 0;
}
static const struct typec_altmode_ops ucsi_displayport_ops = {
.enter = ucsi_displayport_enter,
.exit = ucsi_displayport_exit,
.vdm = ucsi_displayport_vdm,
};
static void ucsi_displayport_work(struct work_struct *work)
{
struct ucsi_dp *dp = container_of(work, struct ucsi_dp, work);
int ret;
mutex_lock(&dp->con->lock);
ret = typec_altmode_vdm(dp->alt, dp->header,
dp->vdo_data, dp->vdo_size);
if (ret)
dev_err(&dp->alt->dev, "VDM 0x%x failed\n", dp->header);
dp->vdo_data = NULL;
dp->vdo_size = 0;
dp->header = 0;
mutex_unlock(&dp->con->lock);
}
void ucsi_displayport_remove_partner(struct typec_altmode *alt)
{
struct ucsi_dp *dp;
if (!alt)
return;
dp = typec_altmode_get_drvdata(alt);
if (!dp)
return;
dp->data.conf = 0;
dp->data.status = 0;
dp->initialized = false;
}
struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con,
bool override, int offset,
struct typec_altmode_desc *desc)
{
u8 all_assignments = BIT(DP_PIN_ASSIGN_C) | BIT(DP_PIN_ASSIGN_D) |
BIT(DP_PIN_ASSIGN_E);
struct typec_altmode *alt;
struct ucsi_dp *dp;
/* We can't rely on the firmware with the capabilities. */
desc->vdo |= DP_CAP_DP_SIGNALING | DP_CAP_RECEPTACLE;
/* Claiming that we support all pin assignments */
desc->vdo |= all_assignments << 8;
desc->vdo |= all_assignments << 16;
alt = typec_port_register_altmode(con->port, desc);
if (IS_ERR(alt))
return alt;
dp = devm_kzalloc(&alt->dev, sizeof(*dp), GFP_KERNEL);
if (!dp) {
typec_unregister_altmode(alt);
return ERR_PTR(-ENOMEM);
}
INIT_WORK(&dp->work, ucsi_displayport_work);
dp->override = override;
dp->offset = offset;
dp->con = con;
dp->alt = alt;
alt->ops = &ucsi_displayport_ops;
typec_altmode_set_drvdata(alt, dp);
return alt;
}
| linux-master | drivers/usb/typec/ucsi/displayport.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Power Supply for UCSI
*
* Copyright (C) 2020, Intel Corporation
* Author: K V, Abhilash <[email protected]>
* Author: Heikki Krogerus <[email protected]>
*/
#include <linux/property.h>
#include <linux/usb/pd.h>
#include "ucsi.h"
/* Power Supply access to expose source power information */
enum ucsi_psy_online_states {
UCSI_PSY_OFFLINE = 0,
UCSI_PSY_FIXED_ONLINE,
UCSI_PSY_PROG_ONLINE,
};
static enum power_supply_property ucsi_psy_props[] = {
POWER_SUPPLY_PROP_USB_TYPE,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_VOLTAGE_MIN,
POWER_SUPPLY_PROP_VOLTAGE_MAX,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_MAX,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_SCOPE,
};
static int ucsi_psy_get_scope(struct ucsi_connector *con,
union power_supply_propval *val)
{
u8 scope = POWER_SUPPLY_SCOPE_UNKNOWN;
struct device *dev = con->ucsi->dev;
device_property_read_u8(dev, "scope", &scope);
val->intval = scope;
return 0;
}
static int ucsi_psy_get_online(struct ucsi_connector *con,
union power_supply_propval *val)
{
val->intval = UCSI_PSY_OFFLINE;
if (con->status.flags & UCSI_CONSTAT_CONNECTED &&
(con->status.flags & UCSI_CONSTAT_PWR_DIR) == TYPEC_SINK)
val->intval = UCSI_PSY_FIXED_ONLINE;
return 0;
}
static int ucsi_psy_get_voltage_min(struct ucsi_connector *con,
union power_supply_propval *val)
{
u32 pdo;
switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
case UCSI_CONSTAT_PWR_OPMODE_PD:
pdo = con->src_pdos[0];
val->intval = pdo_fixed_voltage(pdo) * 1000;
break;
case UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0:
case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
case UCSI_CONSTAT_PWR_OPMODE_BC:
case UCSI_CONSTAT_PWR_OPMODE_DEFAULT:
val->intval = UCSI_TYPEC_VSAFE5V * 1000;
break;
default:
val->intval = 0;
break;
}
return 0;
}
static int ucsi_psy_get_voltage_max(struct ucsi_connector *con,
union power_supply_propval *val)
{
u32 pdo;
switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
case UCSI_CONSTAT_PWR_OPMODE_PD:
if (con->num_pdos > 0) {
pdo = con->src_pdos[con->num_pdos - 1];
val->intval = pdo_fixed_voltage(pdo) * 1000;
} else {
val->intval = 0;
}
break;
case UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0:
case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
case UCSI_CONSTAT_PWR_OPMODE_BC:
case UCSI_CONSTAT_PWR_OPMODE_DEFAULT:
val->intval = UCSI_TYPEC_VSAFE5V * 1000;
break;
default:
val->intval = 0;
break;
}
return 0;
}
static int ucsi_psy_get_voltage_now(struct ucsi_connector *con,
union power_supply_propval *val)
{
int index;
u32 pdo;
switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
case UCSI_CONSTAT_PWR_OPMODE_PD:
index = rdo_index(con->rdo);
if (index > 0) {
pdo = con->src_pdos[index - 1];
val->intval = pdo_fixed_voltage(pdo) * 1000;
} else {
val->intval = 0;
}
break;
case UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0:
case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
case UCSI_CONSTAT_PWR_OPMODE_BC:
case UCSI_CONSTAT_PWR_OPMODE_DEFAULT:
val->intval = UCSI_TYPEC_VSAFE5V * 1000;
break;
default:
val->intval = 0;
break;
}
return 0;
}
static int ucsi_psy_get_current_max(struct ucsi_connector *con,
union power_supply_propval *val)
{
u32 pdo;
switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
case UCSI_CONSTAT_PWR_OPMODE_PD:
if (con->num_pdos > 0) {
pdo = con->src_pdos[con->num_pdos - 1];
val->intval = pdo_max_current(pdo) * 1000;
} else {
val->intval = 0;
}
break;
case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
val->intval = UCSI_TYPEC_1_5_CURRENT * 1000;
break;
case UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0:
val->intval = UCSI_TYPEC_3_0_CURRENT * 1000;
break;
case UCSI_CONSTAT_PWR_OPMODE_BC:
case UCSI_CONSTAT_PWR_OPMODE_DEFAULT:
/* UCSI can't tell b/w DCP/CDP or USB2/3x1/3x2 SDP chargers */
default:
val->intval = 0;
break;
}
return 0;
}
static int ucsi_psy_get_current_now(struct ucsi_connector *con,
union power_supply_propval *val)
{
u16 flags = con->status.flags;
if (UCSI_CONSTAT_PWR_OPMODE(flags) == UCSI_CONSTAT_PWR_OPMODE_PD)
val->intval = rdo_op_current(con->rdo) * 1000;
else
val->intval = 0;
return 0;
}
static int ucsi_psy_get_usb_type(struct ucsi_connector *con,
union power_supply_propval *val)
{
u16 flags = con->status.flags;
val->intval = POWER_SUPPLY_USB_TYPE_C;
if (flags & UCSI_CONSTAT_CONNECTED &&
UCSI_CONSTAT_PWR_OPMODE(flags) == UCSI_CONSTAT_PWR_OPMODE_PD)
val->intval = POWER_SUPPLY_USB_TYPE_PD;
return 0;
}
static int ucsi_psy_get_prop(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct ucsi_connector *con = power_supply_get_drvdata(psy);
switch (psp) {
case POWER_SUPPLY_PROP_USB_TYPE:
return ucsi_psy_get_usb_type(con, val);
case POWER_SUPPLY_PROP_ONLINE:
return ucsi_psy_get_online(con, val);
case POWER_SUPPLY_PROP_VOLTAGE_MIN:
return ucsi_psy_get_voltage_min(con, val);
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
return ucsi_psy_get_voltage_max(con, val);
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
return ucsi_psy_get_voltage_now(con, val);
case POWER_SUPPLY_PROP_CURRENT_MAX:
return ucsi_psy_get_current_max(con, val);
case POWER_SUPPLY_PROP_CURRENT_NOW:
return ucsi_psy_get_current_now(con, val);
case POWER_SUPPLY_PROP_SCOPE:
return ucsi_psy_get_scope(con, val);
default:
return -EINVAL;
}
}
static enum power_supply_usb_type ucsi_psy_usb_types[] = {
POWER_SUPPLY_USB_TYPE_C,
POWER_SUPPLY_USB_TYPE_PD,
POWER_SUPPLY_USB_TYPE_PD_PPS,
};
int ucsi_register_port_psy(struct ucsi_connector *con)
{
struct power_supply_config psy_cfg = {};
struct device *dev = con->ucsi->dev;
char *psy_name;
psy_cfg.drv_data = con;
psy_cfg.fwnode = dev_fwnode(dev);
psy_name = devm_kasprintf(dev, GFP_KERNEL, "ucsi-source-psy-%s%d",
dev_name(dev), con->num);
if (!psy_name)
return -ENOMEM;
con->psy_desc.name = psy_name;
con->psy_desc.type = POWER_SUPPLY_TYPE_USB;
con->psy_desc.usb_types = ucsi_psy_usb_types;
con->psy_desc.num_usb_types = ARRAY_SIZE(ucsi_psy_usb_types);
con->psy_desc.properties = ucsi_psy_props;
con->psy_desc.num_properties = ARRAY_SIZE(ucsi_psy_props);
con->psy_desc.get_property = ucsi_psy_get_prop;
con->psy = power_supply_register(dev, &con->psy_desc, &psy_cfg);
return PTR_ERR_OR_ZERO(con->psy);
}
void ucsi_unregister_port_psy(struct ucsi_connector *con)
{
if (IS_ERR_OR_NULL(con->psy))
return;
power_supply_unregister(con->psy);
con->psy = NULL;
}
void ucsi_port_psy_changed(struct ucsi_connector *con)
{
if (IS_ERR_OR_NULL(con->psy))
return;
power_supply_changed(con->psy);
}
| linux-master | drivers/usb/typec/ucsi/psy.c |
// SPDX-License-Identifier: GPL-2.0
/*
* UCSI driver for Cypress CCGx Type-C controller
*
* Copyright (C) 2017-2018 NVIDIA Corporation. All rights reserved.
* Author: Ajay Gupta <[email protected]>
*
* Some code borrowed from drivers/usb/typec/ucsi/ucsi_acpi.c
*/
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/usb/typec_dp.h>
#include <asm/unaligned.h>
#include "ucsi.h"
enum enum_fw_mode {
BOOT, /* bootloader */
FW1, /* FW partition-1 (contains secondary fw) */
FW2, /* FW partition-2 (contains primary fw) */
FW_INVALID,
};
#define CCGX_RAB_DEVICE_MODE 0x0000
#define CCGX_RAB_INTR_REG 0x0006
#define DEV_INT BIT(0)
#define PORT0_INT BIT(1)
#define PORT1_INT BIT(2)
#define UCSI_READ_INT BIT(7)
#define CCGX_RAB_JUMP_TO_BOOT 0x0007
#define TO_BOOT 'J'
#define TO_ALT_FW 'A'
#define CCGX_RAB_RESET_REQ 0x0008
#define RESET_SIG 'R'
#define CMD_RESET_I2C 0x0
#define CMD_RESET_DEV 0x1
#define CCGX_RAB_ENTER_FLASHING 0x000A
#define FLASH_ENTER_SIG 'P'
#define CCGX_RAB_VALIDATE_FW 0x000B
#define CCGX_RAB_FLASH_ROW_RW 0x000C
#define FLASH_SIG 'F'
#define FLASH_RD_CMD 0x0
#define FLASH_WR_CMD 0x1
#define FLASH_FWCT1_WR_CMD 0x2
#define FLASH_FWCT2_WR_CMD 0x3
#define FLASH_FWCT_SIG_WR_CMD 0x4
#define CCGX_RAB_READ_ALL_VER 0x0010
#define CCGX_RAB_READ_FW2_VER 0x0020
#define CCGX_RAB_UCSI_CONTROL 0x0039
#define CCGX_RAB_UCSI_CONTROL_START BIT(0)
#define CCGX_RAB_UCSI_CONTROL_STOP BIT(1)
#define CCGX_RAB_UCSI_DATA_BLOCK(offset) (0xf000 | ((offset) & 0xff))
#define REG_FLASH_RW_MEM 0x0200
#define DEV_REG_IDX CCGX_RAB_DEVICE_MODE
#define CCGX_RAB_PDPORT_ENABLE 0x002C
#define PDPORT_1 BIT(0)
#define PDPORT_2 BIT(1)
#define CCGX_RAB_RESPONSE 0x007E
#define ASYNC_EVENT BIT(7)
/* CCGx events & async msg codes */
#define RESET_COMPLETE 0x80
#define EVENT_INDEX RESET_COMPLETE
#define PORT_CONNECT_DET 0x84
#define PORT_DISCONNECT_DET 0x85
#define ROLE_SWAP_COMPELETE 0x87
/* ccg firmware */
#define CYACD_LINE_SIZE 527
#define CCG4_ROW_SIZE 256
#define FW1_METADATA_ROW 0x1FF
#define FW2_METADATA_ROW 0x1FE
#define FW_CFG_TABLE_SIG_SIZE 256
static int secondary_fw_min_ver = 41;
enum enum_flash_mode {
SECONDARY_BL, /* update secondary using bootloader */
PRIMARY, /* update primary using secondary */
SECONDARY, /* update secondary using primary */
FLASH_NOT_NEEDED, /* update not required */
FLASH_INVALID,
};
static const char * const ccg_fw_names[] = {
"ccg_boot.cyacd",
"ccg_primary.cyacd",
"ccg_secondary.cyacd"
};
struct ccg_dev_info {
#define CCG_DEVINFO_FWMODE_SHIFT (0)
#define CCG_DEVINFO_FWMODE_MASK (0x3 << CCG_DEVINFO_FWMODE_SHIFT)
#define CCG_DEVINFO_PDPORTS_SHIFT (2)
#define CCG_DEVINFO_PDPORTS_MASK (0x3 << CCG_DEVINFO_PDPORTS_SHIFT)
u8 mode;
u8 bl_mode;
__le16 silicon_id;
__le16 bl_last_row;
} __packed;
struct version_format {
__le16 build;
u8 patch;
u8 ver;
#define CCG_VERSION_PATCH(x) ((x) << 16)
#define CCG_VERSION(x) ((x) << 24)
#define CCG_VERSION_MIN_SHIFT (0)
#define CCG_VERSION_MIN_MASK (0xf << CCG_VERSION_MIN_SHIFT)
#define CCG_VERSION_MAJ_SHIFT (4)
#define CCG_VERSION_MAJ_MASK (0xf << CCG_VERSION_MAJ_SHIFT)
} __packed;
/*
* Firmware version 3.1.10 or earlier, built for NVIDIA has known issue
* of missing interrupt when a device is connected for runtime resume
*/
#define CCG_FW_BUILD_NVIDIA (('n' << 8) | 'v')
#define CCG_OLD_FW_VERSION (CCG_VERSION(0x31) | CCG_VERSION_PATCH(10))
/* Firmware for Tegra doesn't support UCSI ALT command, built
* for NVIDIA has known issue of reporting wrong capability info
*/
#define CCG_FW_BUILD_NVIDIA_TEGRA (('g' << 8) | 'n')
/* Altmode offset for NVIDIA Function Test Board (FTB) */
#define NVIDIA_FTB_DP_OFFSET (2)
#define NVIDIA_FTB_DBG_OFFSET (3)
struct version_info {
struct version_format base;
struct version_format app;
};
struct fw_config_table {
u32 identity;
u16 table_size;
u8 fwct_version;
u8 is_key_change;
u8 guid[16];
struct version_format base;
struct version_format app;
u8 primary_fw_digest[32];
u32 key_exp_length;
u8 key_modulus[256];
u8 key_exp[4];
};
/* CCGx response codes */
enum ccg_resp_code {
CMD_NO_RESP = 0x00,
CMD_SUCCESS = 0x02,
FLASH_DATA_AVAILABLE = 0x03,
CMD_INVALID = 0x05,
FLASH_UPDATE_FAIL = 0x07,
INVALID_FW = 0x08,
INVALID_ARG = 0x09,
CMD_NOT_SUPPORT = 0x0A,
TRANSACTION_FAIL = 0x0C,
PD_CMD_FAIL = 0x0D,
UNDEF_ERROR = 0x0F,
INVALID_RESP = 0x10,
};
#define CCG_EVENT_MAX (EVENT_INDEX + 43)
struct ccg_cmd {
u16 reg;
u32 data;
int len;
u32 delay; /* ms delay for cmd timeout */
};
struct ccg_resp {
u8 code;
u8 length;
};
struct ucsi_ccg_altmode {
u16 svid;
u32 mid;
u8 linked_idx;
u8 active_idx;
#define UCSI_MULTI_DP_INDEX (0xff)
bool checked;
} __packed;
struct ucsi_ccg {
struct device *dev;
struct ucsi *ucsi;
struct i2c_client *client;
struct ccg_dev_info info;
/* version info for boot, primary and secondary */
struct version_info version[FW2 + 1];
u32 fw_version;
/* CCG HPI communication flags */
unsigned long flags;
#define RESET_PENDING 0
#define DEV_CMD_PENDING 1
struct ccg_resp dev_resp;
u8 cmd_resp;
int port_num;
int irq;
struct work_struct work;
struct mutex lock; /* to sync between user and driver thread */
/* fw build with vendor information */
u16 fw_build;
struct work_struct pm_work;
struct completion complete;
u64 last_cmd_sent;
bool has_multiple_dp;
struct ucsi_ccg_altmode orig[UCSI_MAX_ALTMODES];
struct ucsi_ccg_altmode updated[UCSI_MAX_ALTMODES];
};
static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
{
struct i2c_client *client = uc->client;
const struct i2c_adapter_quirks *quirks = client->adapter->quirks;
unsigned char buf[2];
struct i2c_msg msgs[] = {
{
.addr = client->addr,
.flags = 0x0,
.len = sizeof(buf),
.buf = buf,
},
{
.addr = client->addr,
.flags = I2C_M_RD,
.buf = data,
},
};
u32 rlen, rem_len = len, max_read_len = len;
int status;
/* check any max_read_len limitation on i2c adapter */
if (quirks && quirks->max_read_len)
max_read_len = quirks->max_read_len;
pm_runtime_get_sync(uc->dev);
while (rem_len > 0) {
msgs[1].buf = &data[len - rem_len];
rlen = min_t(u16, rem_len, max_read_len);
msgs[1].len = rlen;
put_unaligned_le16(rab, buf);
status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
if (status < 0) {
dev_err(uc->dev, "i2c_transfer failed %d\n", status);
pm_runtime_put_sync(uc->dev);
return status;
}
rab += rlen;
rem_len -= rlen;
}
pm_runtime_put_sync(uc->dev);
return 0;
}
static int ccg_write(struct ucsi_ccg *uc, u16 rab, const u8 *data, u32 len)
{
struct i2c_client *client = uc->client;
unsigned char *buf;
struct i2c_msg msgs[] = {
{
.addr = client->addr,
.flags = 0x0,
}
};
int status;
buf = kzalloc(len + sizeof(rab), GFP_KERNEL);
if (!buf)
return -ENOMEM;
put_unaligned_le16(rab, buf);
memcpy(buf + sizeof(rab), data, len);
msgs[0].len = len + sizeof(rab);
msgs[0].buf = buf;
pm_runtime_get_sync(uc->dev);
status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
if (status < 0) {
dev_err(uc->dev, "i2c_transfer failed %d\n", status);
pm_runtime_put_sync(uc->dev);
kfree(buf);
return status;
}
pm_runtime_put_sync(uc->dev);
kfree(buf);
return 0;
}
static int ucsi_ccg_init(struct ucsi_ccg *uc)
{
unsigned int count = 10;
u8 data;
int status;
data = CCGX_RAB_UCSI_CONTROL_STOP;
status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
if (status < 0)
return status;
data = CCGX_RAB_UCSI_CONTROL_START;
status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
if (status < 0)
return status;
/*
* Flush CCGx RESPONSE queue by acking interrupts. Above ucsi control
* register write will push response which must be cleared.
*/
do {
status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
if (status < 0)
return status;
if (!(data & DEV_INT))
return 0;
status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
if (status < 0)
return status;
usleep_range(10000, 11000);
} while (--count);
return -ETIMEDOUT;
}
static void ucsi_ccg_update_get_current_cam_cmd(struct ucsi_ccg *uc, u8 *data)
{
u8 cam, new_cam;
cam = data[0];
new_cam = uc->orig[cam].linked_idx;
uc->updated[new_cam].active_idx = cam;
data[0] = new_cam;
}
static bool ucsi_ccg_update_altmodes(struct ucsi *ucsi,
struct ucsi_altmode *orig,
struct ucsi_altmode *updated)
{
struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
struct ucsi_ccg_altmode *alt, *new_alt;
int i, j, k = 0;
bool found = false;
alt = uc->orig;
new_alt = uc->updated;
memset(uc->updated, 0, sizeof(uc->updated));
/*
* Copy original connector altmodes to new structure.
* We need this before second loop since second loop
* checks for duplicate altmodes.
*/
for (i = 0; i < UCSI_MAX_ALTMODES; i++) {
alt[i].svid = orig[i].svid;
alt[i].mid = orig[i].mid;
if (!alt[i].svid)
break;
}
for (i = 0; i < UCSI_MAX_ALTMODES; i++) {
if (!alt[i].svid)
break;
/* already checked and considered */
if (alt[i].checked)
continue;
if (!DP_CONF_GET_PIN_ASSIGN(alt[i].mid)) {
/* Found Non DP altmode */
new_alt[k].svid = alt[i].svid;
new_alt[k].mid |= alt[i].mid;
new_alt[k].linked_idx = i;
alt[i].linked_idx = k;
updated[k].svid = new_alt[k].svid;
updated[k].mid = new_alt[k].mid;
k++;
continue;
}
for (j = i + 1; j < UCSI_MAX_ALTMODES; j++) {
if (alt[i].svid != alt[j].svid ||
!DP_CONF_GET_PIN_ASSIGN(alt[j].mid)) {
continue;
} else {
/* Found duplicate DP mode */
new_alt[k].svid = alt[i].svid;
new_alt[k].mid |= alt[i].mid | alt[j].mid;
new_alt[k].linked_idx = UCSI_MULTI_DP_INDEX;
alt[i].linked_idx = k;
alt[j].linked_idx = k;
alt[j].checked = true;
found = true;
}
}
if (found) {
uc->has_multiple_dp = true;
} else {
/* Didn't find any duplicate DP altmode */
new_alt[k].svid = alt[i].svid;
new_alt[k].mid |= alt[i].mid;
new_alt[k].linked_idx = i;
alt[i].linked_idx = k;
}
updated[k].svid = new_alt[k].svid;
updated[k].mid = new_alt[k].mid;
k++;
}
return found;
}
static void ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg *uc,
struct ucsi_connector *con,
u64 *cmd)
{
struct ucsi_ccg_altmode *new_port, *port;
struct typec_altmode *alt = NULL;
u8 new_cam, cam, pin;
bool enter_new_mode;
int i, j, k = 0xff;
port = uc->orig;
new_cam = UCSI_SET_NEW_CAM_GET_AM(*cmd);
new_port = &uc->updated[new_cam];
cam = new_port->linked_idx;
enter_new_mode = UCSI_SET_NEW_CAM_ENTER(*cmd);
/*
* If CAM is UCSI_MULTI_DP_INDEX then this is DP altmode
* with multiple DP mode. Find out CAM for best pin assignment
* among all DP mode. Priorite pin E->D->C after making sure
* the partner supports that pin.
*/
if (cam == UCSI_MULTI_DP_INDEX) {
if (enter_new_mode) {
for (i = 0; con->partner_altmode[i]; i++) {
alt = con->partner_altmode[i];
if (alt->svid == new_port->svid)
break;
}
/*
* alt will always be non NULL since this is
* UCSI_SET_NEW_CAM command and so there will be
* at least one con->partner_altmode[i] with svid
* matching with new_port->svid.
*/
for (j = 0; port[j].svid; j++) {
pin = DP_CONF_GET_PIN_ASSIGN(port[j].mid);
if (alt && port[j].svid == alt->svid &&
(pin & DP_CONF_GET_PIN_ASSIGN(alt->vdo))) {
/* prioritize pin E->D->C */
if (k == 0xff || (k != 0xff && pin >
DP_CONF_GET_PIN_ASSIGN(port[k].mid))
) {
k = j;
}
}
}
cam = k;
new_port->active_idx = cam;
} else {
cam = new_port->active_idx;
}
}
*cmd &= ~UCSI_SET_NEW_CAM_AM_MASK;
*cmd |= UCSI_SET_NEW_CAM_SET_AM(cam);
}
/*
* Change the order of vdo values of NVIDIA test device FTB
* (Function Test Board) which reports altmode list with vdo=0x3
* first and then vdo=0x. Current logic to assign mode value is
* based on order in altmode list and it causes a mismatch of CON
* and SOP altmodes since NVIDIA GPU connector has order of vdo=0x1
* first and then vdo=0x3
*/
static void ucsi_ccg_nvidia_altmode(struct ucsi_ccg *uc,
struct ucsi_altmode *alt)
{
switch (UCSI_ALTMODE_OFFSET(uc->last_cmd_sent)) {
case NVIDIA_FTB_DP_OFFSET:
if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DBG_VDO)
alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DP_VDO |
DP_CAP_DP_SIGNALING | DP_CAP_USB |
DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_E));
break;
case NVIDIA_FTB_DBG_OFFSET:
if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DP_VDO)
alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DBG_VDO;
break;
default:
break;
}
}
static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset,
void *val, size_t val_len)
{
struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
struct ucsi_capability *cap;
struct ucsi_altmode *alt;
int ret;
ret = ccg_read(uc, reg, val, val_len);
if (ret)
return ret;
if (offset != UCSI_MESSAGE_IN)
return ret;
switch (UCSI_COMMAND(uc->last_cmd_sent)) {
case UCSI_GET_CURRENT_CAM:
if (uc->has_multiple_dp)
ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)val);
break;
case UCSI_GET_ALTERNATE_MODES:
if (UCSI_ALTMODE_RECIPIENT(uc->last_cmd_sent) ==
UCSI_RECIPIENT_SOP) {
alt = val;
if (alt[0].svid == USB_TYPEC_NVIDIA_VLINK_SID)
ucsi_ccg_nvidia_altmode(uc, alt);
}
break;
case UCSI_GET_CAPABILITY:
if (uc->fw_build == CCG_FW_BUILD_NVIDIA_TEGRA) {
cap = val;
cap->features &= ~UCSI_CAP_ALT_MODE_DETAILS;
}
break;
default:
break;
}
uc->last_cmd_sent = 0;
return ret;
}
static int ucsi_ccg_async_write(struct ucsi *ucsi, unsigned int offset,
const void *val, size_t val_len)
{
u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
return ccg_write(ucsi_get_drvdata(ucsi), reg, val, val_len);
}
static int ucsi_ccg_sync_write(struct ucsi *ucsi, unsigned int offset,
const void *val, size_t val_len)
{
struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
struct ucsi_connector *con;
int con_index;
int ret;
mutex_lock(&uc->lock);
pm_runtime_get_sync(uc->dev);
set_bit(DEV_CMD_PENDING, &uc->flags);
if (offset == UCSI_CONTROL && val_len == sizeof(uc->last_cmd_sent)) {
uc->last_cmd_sent = *(u64 *)val;
if (UCSI_COMMAND(uc->last_cmd_sent) == UCSI_SET_NEW_CAM &&
uc->has_multiple_dp) {
con_index = (uc->last_cmd_sent >> 16) &
UCSI_CMD_CONNECTOR_MASK;
con = &uc->ucsi->connector[con_index - 1];
ucsi_ccg_update_set_new_cam_cmd(uc, con, (u64 *)val);
}
}
ret = ucsi_ccg_async_write(ucsi, offset, val, val_len);
if (ret)
goto err_clear_bit;
if (!wait_for_completion_timeout(&uc->complete, msecs_to_jiffies(5000)))
ret = -ETIMEDOUT;
err_clear_bit:
clear_bit(DEV_CMD_PENDING, &uc->flags);
pm_runtime_put_sync(uc->dev);
mutex_unlock(&uc->lock);
return ret;
}
static const struct ucsi_operations ucsi_ccg_ops = {
.read = ucsi_ccg_read,
.sync_write = ucsi_ccg_sync_write,
.async_write = ucsi_ccg_async_write,
.update_altmodes = ucsi_ccg_update_altmodes
};
static irqreturn_t ccg_irq_handler(int irq, void *data)
{
u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_CCI);
struct ucsi_ccg *uc = data;
u8 intr_reg;
u32 cci;
int ret;
ret = ccg_read(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
if (ret)
return ret;
ret = ccg_read(uc, reg, (void *)&cci, sizeof(cci));
if (ret)
goto err_clear_irq;
if (UCSI_CCI_CONNECTOR(cci))
ucsi_connector_change(uc->ucsi, UCSI_CCI_CONNECTOR(cci));
if (test_bit(DEV_CMD_PENDING, &uc->flags) &&
cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
complete(&uc->complete);
err_clear_irq:
ccg_write(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
return IRQ_HANDLED;
}
static int ccg_request_irq(struct ucsi_ccg *uc)
{
unsigned long flags = IRQF_ONESHOT;
if (!dev_fwnode(uc->dev))
flags |= IRQF_TRIGGER_HIGH;
return request_threaded_irq(uc->irq, NULL, ccg_irq_handler, flags, dev_name(uc->dev), uc);
}
static void ccg_pm_workaround_work(struct work_struct *pm_work)
{
ccg_irq_handler(0, container_of(pm_work, struct ucsi_ccg, pm_work));
}
static int get_fw_info(struct ucsi_ccg *uc)
{
int err;
err = ccg_read(uc, CCGX_RAB_READ_ALL_VER, (u8 *)(&uc->version),
sizeof(uc->version));
if (err < 0)
return err;
uc->fw_version = CCG_VERSION(uc->version[FW2].app.ver) |
CCG_VERSION_PATCH(uc->version[FW2].app.patch);
err = ccg_read(uc, CCGX_RAB_DEVICE_MODE, (u8 *)(&uc->info),
sizeof(uc->info));
if (err < 0)
return err;
return 0;
}
static inline bool invalid_async_evt(int code)
{
return (code >= CCG_EVENT_MAX) || (code < EVENT_INDEX);
}
static void ccg_process_response(struct ucsi_ccg *uc)
{
struct device *dev = uc->dev;
if (uc->dev_resp.code & ASYNC_EVENT) {
if (uc->dev_resp.code == RESET_COMPLETE) {
if (test_bit(RESET_PENDING, &uc->flags))
uc->cmd_resp = uc->dev_resp.code;
get_fw_info(uc);
}
if (invalid_async_evt(uc->dev_resp.code))
dev_err(dev, "invalid async evt %d\n",
uc->dev_resp.code);
} else {
if (test_bit(DEV_CMD_PENDING, &uc->flags)) {
uc->cmd_resp = uc->dev_resp.code;
clear_bit(DEV_CMD_PENDING, &uc->flags);
} else {
dev_err(dev, "dev resp 0x%04x but no cmd pending\n",
uc->dev_resp.code);
}
}
}
static int ccg_read_response(struct ucsi_ccg *uc)
{
unsigned long target = jiffies + msecs_to_jiffies(1000);
struct device *dev = uc->dev;
u8 intval;
int status;
/* wait for interrupt status to get updated */
do {
status = ccg_read(uc, CCGX_RAB_INTR_REG, &intval,
sizeof(intval));
if (status < 0)
return status;
if (intval & DEV_INT)
break;
usleep_range(500, 600);
} while (time_is_after_jiffies(target));
if (time_is_before_jiffies(target)) {
dev_err(dev, "response timeout error\n");
return -ETIME;
}
status = ccg_read(uc, CCGX_RAB_RESPONSE, (u8 *)&uc->dev_resp,
sizeof(uc->dev_resp));
if (status < 0)
return status;
status = ccg_write(uc, CCGX_RAB_INTR_REG, &intval, sizeof(intval));
if (status < 0)
return status;
return 0;
}
/* Caller must hold uc->lock */
static int ccg_send_command(struct ucsi_ccg *uc, struct ccg_cmd *cmd)
{
struct device *dev = uc->dev;
int ret;
switch (cmd->reg & 0xF000) {
case DEV_REG_IDX:
set_bit(DEV_CMD_PENDING, &uc->flags);
break;
default:
dev_err(dev, "invalid cmd register\n");
break;
}
ret = ccg_write(uc, cmd->reg, (u8 *)&cmd->data, cmd->len);
if (ret < 0)
return ret;
msleep(cmd->delay);
ret = ccg_read_response(uc);
if (ret < 0) {
dev_err(dev, "response read error\n");
switch (cmd->reg & 0xF000) {
case DEV_REG_IDX:
clear_bit(DEV_CMD_PENDING, &uc->flags);
break;
default:
dev_err(dev, "invalid cmd register\n");
break;
}
return -EIO;
}
ccg_process_response(uc);
return uc->cmd_resp;
}
static int ccg_cmd_enter_flashing(struct ucsi_ccg *uc)
{
struct ccg_cmd cmd;
int ret;
cmd.reg = CCGX_RAB_ENTER_FLASHING;
cmd.data = FLASH_ENTER_SIG;
cmd.len = 1;
cmd.delay = 50;
mutex_lock(&uc->lock);
ret = ccg_send_command(uc, &cmd);
mutex_unlock(&uc->lock);
if (ret != CMD_SUCCESS) {
dev_err(uc->dev, "enter flashing failed ret=%d\n", ret);
return ret;
}
return 0;
}
static int ccg_cmd_reset(struct ucsi_ccg *uc)
{
struct ccg_cmd cmd;
u8 *p;
int ret;
p = (u8 *)&cmd.data;
cmd.reg = CCGX_RAB_RESET_REQ;
p[0] = RESET_SIG;
p[1] = CMD_RESET_DEV;
cmd.len = 2;
cmd.delay = 5000;
mutex_lock(&uc->lock);
set_bit(RESET_PENDING, &uc->flags);
ret = ccg_send_command(uc, &cmd);
if (ret != RESET_COMPLETE)
goto err_clear_flag;
ret = 0;
err_clear_flag:
clear_bit(RESET_PENDING, &uc->flags);
mutex_unlock(&uc->lock);
return ret;
}
static int ccg_cmd_port_control(struct ucsi_ccg *uc, bool enable)
{
struct ccg_cmd cmd;
int ret;
cmd.reg = CCGX_RAB_PDPORT_ENABLE;
if (enable)
cmd.data = (uc->port_num == 1) ?
PDPORT_1 : (PDPORT_1 | PDPORT_2);
else
cmd.data = 0x0;
cmd.len = 1;
cmd.delay = 10;
mutex_lock(&uc->lock);
ret = ccg_send_command(uc, &cmd);
mutex_unlock(&uc->lock);
if (ret != CMD_SUCCESS) {
dev_err(uc->dev, "port control failed ret=%d\n", ret);
return ret;
}
return 0;
}
static int ccg_cmd_jump_boot_mode(struct ucsi_ccg *uc, int bl_mode)
{
struct ccg_cmd cmd;
int ret;
cmd.reg = CCGX_RAB_JUMP_TO_BOOT;
if (bl_mode)
cmd.data = TO_BOOT;
else
cmd.data = TO_ALT_FW;
cmd.len = 1;
cmd.delay = 100;
mutex_lock(&uc->lock);
set_bit(RESET_PENDING, &uc->flags);
ret = ccg_send_command(uc, &cmd);
if (ret != RESET_COMPLETE)
goto err_clear_flag;
ret = 0;
err_clear_flag:
clear_bit(RESET_PENDING, &uc->flags);
mutex_unlock(&uc->lock);
return ret;
}
static int
ccg_cmd_write_flash_row(struct ucsi_ccg *uc, u16 row,
const void *data, u8 fcmd)
{
struct i2c_client *client = uc->client;
struct ccg_cmd cmd;
u8 buf[CCG4_ROW_SIZE + 2];
u8 *p;
int ret;
/* Copy the data into the flash read/write memory. */
put_unaligned_le16(REG_FLASH_RW_MEM, buf);
memcpy(buf + 2, data, CCG4_ROW_SIZE);
mutex_lock(&uc->lock);
ret = i2c_master_send(client, buf, CCG4_ROW_SIZE + 2);
if (ret != CCG4_ROW_SIZE + 2) {
dev_err(uc->dev, "REG_FLASH_RW_MEM write fail %d\n", ret);
mutex_unlock(&uc->lock);
return ret < 0 ? ret : -EIO;
}
/* Use the FLASH_ROW_READ_WRITE register to trigger */
/* writing of data to the desired flash row */
p = (u8 *)&cmd.data;
cmd.reg = CCGX_RAB_FLASH_ROW_RW;
p[0] = FLASH_SIG;
p[1] = fcmd;
put_unaligned_le16(row, &p[2]);
cmd.len = 4;
cmd.delay = 50;
if (fcmd == FLASH_FWCT_SIG_WR_CMD)
cmd.delay += 400;
if (row == 510)
cmd.delay += 220;
ret = ccg_send_command(uc, &cmd);
mutex_unlock(&uc->lock);
if (ret != CMD_SUCCESS) {
dev_err(uc->dev, "write flash row failed ret=%d\n", ret);
return ret;
}
return 0;
}
static int ccg_cmd_validate_fw(struct ucsi_ccg *uc, unsigned int fwid)
{
struct ccg_cmd cmd;
int ret;
cmd.reg = CCGX_RAB_VALIDATE_FW;
cmd.data = fwid;
cmd.len = 1;
cmd.delay = 500;
mutex_lock(&uc->lock);
ret = ccg_send_command(uc, &cmd);
mutex_unlock(&uc->lock);
if (ret != CMD_SUCCESS)
return ret;
return 0;
}
static bool ccg_check_vendor_version(struct ucsi_ccg *uc,
struct version_format *app,
struct fw_config_table *fw_cfg)
{
struct device *dev = uc->dev;
/* Check if the fw build is for supported vendors */
if (le16_to_cpu(app->build) != uc->fw_build) {
dev_info(dev, "current fw is not from supported vendor\n");
return false;
}
/* Check if the new fw build is for supported vendors */
if (le16_to_cpu(fw_cfg->app.build) != uc->fw_build) {
dev_info(dev, "new fw is not from supported vendor\n");
return false;
}
return true;
}
static bool ccg_check_fw_version(struct ucsi_ccg *uc, const char *fw_name,
struct version_format *app)
{
const struct firmware *fw = NULL;
struct device *dev = uc->dev;
struct fw_config_table fw_cfg;
u32 cur_version, new_version;
bool is_later = false;
if (request_firmware(&fw, fw_name, dev) != 0) {
dev_err(dev, "error: Failed to open cyacd file %s\n", fw_name);
return false;
}
/*
* check if signed fw
* last part of fw image is fw cfg table and signature
*/
if (fw->size < sizeof(fw_cfg) + FW_CFG_TABLE_SIG_SIZE)
goto out_release_firmware;
memcpy((uint8_t *)&fw_cfg, fw->data + fw->size -
sizeof(fw_cfg) - FW_CFG_TABLE_SIG_SIZE, sizeof(fw_cfg));
if (fw_cfg.identity != ('F' | 'W' << 8 | 'C' << 16 | 'T' << 24)) {
dev_info(dev, "not a signed image\n");
goto out_release_firmware;
}
/* compare input version with FWCT version */
cur_version = le16_to_cpu(app->build) | CCG_VERSION_PATCH(app->patch) |
CCG_VERSION(app->ver);
new_version = le16_to_cpu(fw_cfg.app.build) |
CCG_VERSION_PATCH(fw_cfg.app.patch) |
CCG_VERSION(fw_cfg.app.ver);
if (!ccg_check_vendor_version(uc, app, &fw_cfg))
goto out_release_firmware;
if (new_version > cur_version)
is_later = true;
out_release_firmware:
release_firmware(fw);
return is_later;
}
static int ccg_fw_update_needed(struct ucsi_ccg *uc,
enum enum_flash_mode *mode)
{
struct device *dev = uc->dev;
int err;
struct version_info version[3];
err = ccg_read(uc, CCGX_RAB_DEVICE_MODE, (u8 *)(&uc->info),
sizeof(uc->info));
if (err) {
dev_err(dev, "read device mode failed\n");
return err;
}
err = ccg_read(uc, CCGX_RAB_READ_ALL_VER, (u8 *)version,
sizeof(version));
if (err) {
dev_err(dev, "read device mode failed\n");
return err;
}
if (memcmp(&version[FW1], "\0\0\0\0\0\0\0\0",
sizeof(struct version_info)) == 0) {
dev_info(dev, "secondary fw is not flashed\n");
*mode = SECONDARY_BL;
} else if (le16_to_cpu(version[FW1].base.build) <
secondary_fw_min_ver) {
dev_info(dev, "secondary fw version is too low (< %d)\n",
secondary_fw_min_ver);
*mode = SECONDARY;
} else if (memcmp(&version[FW2], "\0\0\0\0\0\0\0\0",
sizeof(struct version_info)) == 0) {
dev_info(dev, "primary fw is not flashed\n");
*mode = PRIMARY;
} else if (ccg_check_fw_version(uc, ccg_fw_names[PRIMARY],
&version[FW2].app)) {
dev_info(dev, "found primary fw with later version\n");
*mode = PRIMARY;
} else {
dev_info(dev, "secondary and primary fw are the latest\n");
*mode = FLASH_NOT_NEEDED;
}
return 0;
}
static int do_flash(struct ucsi_ccg *uc, enum enum_flash_mode mode)
{
struct device *dev = uc->dev;
const struct firmware *fw = NULL;
const char *p, *s;
const char *eof;
int err, row, len, line_sz, line_cnt = 0;
unsigned long start_time = jiffies;
struct fw_config_table fw_cfg;
u8 fw_cfg_sig[FW_CFG_TABLE_SIG_SIZE];
u8 *wr_buf;
err = request_firmware(&fw, ccg_fw_names[mode], dev);
if (err) {
dev_err(dev, "request %s failed err=%d\n",
ccg_fw_names[mode], err);
return err;
}
if (((uc->info.mode & CCG_DEVINFO_FWMODE_MASK) >>
CCG_DEVINFO_FWMODE_SHIFT) == FW2) {
err = ccg_cmd_port_control(uc, false);
if (err < 0)
goto release_fw;
err = ccg_cmd_jump_boot_mode(uc, 0);
if (err < 0)
goto release_fw;
}
eof = fw->data + fw->size;
/*
* check if signed fw
* last part of fw image is fw cfg table and signature
*/
if (fw->size < sizeof(fw_cfg) + sizeof(fw_cfg_sig))
goto not_signed_fw;
memcpy((uint8_t *)&fw_cfg, fw->data + fw->size -
sizeof(fw_cfg) - sizeof(fw_cfg_sig), sizeof(fw_cfg));
if (fw_cfg.identity != ('F' | ('W' << 8) | ('C' << 16) | ('T' << 24))) {
dev_info(dev, "not a signed image\n");
goto not_signed_fw;
}
eof = fw->data + fw->size - sizeof(fw_cfg) - sizeof(fw_cfg_sig);
memcpy((uint8_t *)&fw_cfg_sig,
fw->data + fw->size - sizeof(fw_cfg_sig), sizeof(fw_cfg_sig));
/* flash fw config table and signature first */
err = ccg_cmd_write_flash_row(uc, 0, (u8 *)&fw_cfg,
FLASH_FWCT1_WR_CMD);
if (err)
goto release_fw;
err = ccg_cmd_write_flash_row(uc, 0, (u8 *)&fw_cfg + CCG4_ROW_SIZE,
FLASH_FWCT2_WR_CMD);
if (err)
goto release_fw;
err = ccg_cmd_write_flash_row(uc, 0, &fw_cfg_sig,
FLASH_FWCT_SIG_WR_CMD);
if (err)
goto release_fw;
not_signed_fw:
wr_buf = kzalloc(CCG4_ROW_SIZE + 4, GFP_KERNEL);
if (!wr_buf) {
err = -ENOMEM;
goto release_fw;
}
err = ccg_cmd_enter_flashing(uc);
if (err)
goto release_mem;
/*****************************************************************
* CCG firmware image (.cyacd) file line format
*
* :00rrrrllll[dd....]cc/r/n
*
* :00 header
* rrrr is row number to flash (4 char)
* llll is data len to flash (4 char)
* dd is a data field represents one byte of data (512 char)
* cc is checksum (2 char)
* \r\n newline
*
* Total length: 3 + 4 + 4 + 512 + 2 + 2 = 527
*
*****************************************************************/
p = strnchr(fw->data, fw->size, ':');
while (p < eof) {
s = strnchr(p + 1, eof - p - 1, ':');
if (!s)
s = eof;
line_sz = s - p;
if (line_sz != CYACD_LINE_SIZE) {
dev_err(dev, "Bad FW format line_sz=%d\n", line_sz);
err = -EINVAL;
goto release_mem;
}
if (hex2bin(wr_buf, p + 3, CCG4_ROW_SIZE + 4)) {
err = -EINVAL;
goto release_mem;
}
row = get_unaligned_be16(wr_buf);
len = get_unaligned_be16(&wr_buf[2]);
if (len != CCG4_ROW_SIZE) {
err = -EINVAL;
goto release_mem;
}
err = ccg_cmd_write_flash_row(uc, row, wr_buf + 4,
FLASH_WR_CMD);
if (err)
goto release_mem;
line_cnt++;
p = s;
}
dev_info(dev, "total %d row flashed. time: %dms\n",
line_cnt, jiffies_to_msecs(jiffies - start_time));
err = ccg_cmd_validate_fw(uc, (mode == PRIMARY) ? FW2 : FW1);
if (err)
dev_err(dev, "%s validation failed err=%d\n",
(mode == PRIMARY) ? "FW2" : "FW1", err);
else
dev_info(dev, "%s validated\n",
(mode == PRIMARY) ? "FW2" : "FW1");
err = ccg_cmd_port_control(uc, false);
if (err < 0)
goto release_mem;
err = ccg_cmd_reset(uc);
if (err < 0)
goto release_mem;
err = ccg_cmd_port_control(uc, true);
if (err < 0)
goto release_mem;
release_mem:
kfree(wr_buf);
release_fw:
release_firmware(fw);
return err;
}
/*******************************************************************************
* CCG4 has two copies of the firmware in addition to the bootloader.
* If the device is running FW1, FW2 can be updated with the new version.
* Dual firmware mode allows the CCG device to stay in a PD contract and support
* USB PD and Type-C functionality while a firmware update is in progress.
******************************************************************************/
static int ccg_fw_update(struct ucsi_ccg *uc, enum enum_flash_mode flash_mode)
{
int err = 0;
while (flash_mode != FLASH_NOT_NEEDED) {
err = do_flash(uc, flash_mode);
if (err < 0)
return err;
err = ccg_fw_update_needed(uc, &flash_mode);
if (err < 0)
return err;
}
dev_info(uc->dev, "CCG FW update successful\n");
return err;
}
static int ccg_restart(struct ucsi_ccg *uc)
{
struct device *dev = uc->dev;
int status;
status = ucsi_ccg_init(uc);
if (status < 0) {
dev_err(dev, "ucsi_ccg_start fail, err=%d\n", status);
return status;
}
status = ccg_request_irq(uc);
if (status < 0) {
dev_err(dev, "request_threaded_irq failed - %d\n", status);
return status;
}
status = ucsi_register(uc->ucsi);
if (status) {
dev_err(uc->dev, "failed to register the interface\n");
return status;
}
pm_runtime_enable(uc->dev);
return 0;
}
static void ccg_update_firmware(struct work_struct *work)
{
struct ucsi_ccg *uc = container_of(work, struct ucsi_ccg, work);
enum enum_flash_mode flash_mode;
int status;
status = ccg_fw_update_needed(uc, &flash_mode);
if (status < 0)
return;
if (flash_mode != FLASH_NOT_NEEDED) {
ucsi_unregister(uc->ucsi);
pm_runtime_disable(uc->dev);
free_irq(uc->irq, uc);
ccg_fw_update(uc, flash_mode);
ccg_restart(uc);
}
}
static ssize_t do_flash_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t n)
{
struct ucsi_ccg *uc = i2c_get_clientdata(to_i2c_client(dev));
bool flash;
if (kstrtobool(buf, &flash))
return -EINVAL;
if (!flash)
return n;
if (uc->fw_build == 0x0) {
dev_err(dev, "fail to flash FW due to missing FW build info\n");
return -EINVAL;
}
schedule_work(&uc->work);
return n;
}
static DEVICE_ATTR_WO(do_flash);
static struct attribute *ucsi_ccg_attrs[] = {
&dev_attr_do_flash.attr,
NULL,
};
ATTRIBUTE_GROUPS(ucsi_ccg);
static int ucsi_ccg_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ucsi_ccg *uc;
const char *fw_name;
int status;
uc = devm_kzalloc(dev, sizeof(*uc), GFP_KERNEL);
if (!uc)
return -ENOMEM;
uc->dev = dev;
uc->client = client;
uc->irq = client->irq;
mutex_init(&uc->lock);
init_completion(&uc->complete);
INIT_WORK(&uc->work, ccg_update_firmware);
INIT_WORK(&uc->pm_work, ccg_pm_workaround_work);
/* Only fail FW flashing when FW build information is not provided */
status = device_property_read_string(dev, "firmware-name", &fw_name);
if (!status) {
if (!strcmp(fw_name, "nvidia,jetson-agx-xavier"))
uc->fw_build = CCG_FW_BUILD_NVIDIA_TEGRA;
else if (!strcmp(fw_name, "nvidia,gpu"))
uc->fw_build = CCG_FW_BUILD_NVIDIA;
}
if (!uc->fw_build)
dev_err(uc->dev, "failed to get FW build information\n");
/* reset ccg device and initialize ucsi */
status = ucsi_ccg_init(uc);
if (status < 0) {
dev_err(uc->dev, "ucsi_ccg_init failed - %d\n", status);
return status;
}
status = get_fw_info(uc);
if (status < 0) {
dev_err(uc->dev, "get_fw_info failed - %d\n", status);
return status;
}
uc->port_num = 1;
if (uc->info.mode & CCG_DEVINFO_PDPORTS_MASK)
uc->port_num++;
uc->ucsi = ucsi_create(dev, &ucsi_ccg_ops);
if (IS_ERR(uc->ucsi))
return PTR_ERR(uc->ucsi);
ucsi_set_drvdata(uc->ucsi, uc);
status = ccg_request_irq(uc);
if (status < 0) {
dev_err(uc->dev, "request_threaded_irq failed - %d\n", status);
goto out_ucsi_destroy;
}
status = ucsi_register(uc->ucsi);
if (status)
goto out_free_irq;
i2c_set_clientdata(client, uc);
pm_runtime_set_active(uc->dev);
pm_runtime_enable(uc->dev);
pm_runtime_use_autosuspend(uc->dev);
pm_runtime_set_autosuspend_delay(uc->dev, 5000);
pm_runtime_idle(uc->dev);
return 0;
out_free_irq:
free_irq(uc->irq, uc);
out_ucsi_destroy:
ucsi_destroy(uc->ucsi);
return status;
}
static void ucsi_ccg_remove(struct i2c_client *client)
{
struct ucsi_ccg *uc = i2c_get_clientdata(client);
cancel_work_sync(&uc->pm_work);
cancel_work_sync(&uc->work);
pm_runtime_disable(uc->dev);
ucsi_unregister(uc->ucsi);
ucsi_destroy(uc->ucsi);
free_irq(uc->irq, uc);
}
static const struct of_device_id ucsi_ccg_of_match_table[] = {
{ .compatible = "cypress,cypd4226", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ucsi_ccg_of_match_table);
static const struct i2c_device_id ucsi_ccg_device_id[] = {
{"ccgx-ucsi", 0},
{}
};
MODULE_DEVICE_TABLE(i2c, ucsi_ccg_device_id);
static const struct acpi_device_id amd_i2c_ucsi_match[] = {
{"AMDI0042"},
{}
};
MODULE_DEVICE_TABLE(acpi, amd_i2c_ucsi_match);
static int ucsi_ccg_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct ucsi_ccg *uc = i2c_get_clientdata(client);
return ucsi_resume(uc->ucsi);
}
static int ucsi_ccg_runtime_suspend(struct device *dev)
{
return 0;
}
static int ucsi_ccg_runtime_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct ucsi_ccg *uc = i2c_get_clientdata(client);
/*
* Firmware version 3.1.10 or earlier, built for NVIDIA has known issue
* of missing interrupt when a device is connected for runtime resume.
* Schedule a work to call ISR as a workaround.
*/
if (uc->fw_build == CCG_FW_BUILD_NVIDIA &&
uc->fw_version <= CCG_OLD_FW_VERSION)
schedule_work(&uc->pm_work);
return 0;
}
static const struct dev_pm_ops ucsi_ccg_pm = {
.resume = ucsi_ccg_resume,
.runtime_suspend = ucsi_ccg_runtime_suspend,
.runtime_resume = ucsi_ccg_runtime_resume,
};
static struct i2c_driver ucsi_ccg_driver = {
.driver = {
.name = "ucsi_ccg",
.pm = &ucsi_ccg_pm,
.dev_groups = ucsi_ccg_groups,
.acpi_match_table = amd_i2c_ucsi_match,
.of_match_table = ucsi_ccg_of_match_table,
},
.probe = ucsi_ccg_probe,
.remove = ucsi_ccg_remove,
.id_table = ucsi_ccg_device_id,
};
module_i2c_driver(ucsi_ccg_driver);
MODULE_AUTHOR("Ajay Gupta <[email protected]>");
MODULE_DESCRIPTION("UCSI driver for Cypress CCGx Type-C controller");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/typec/ucsi/ucsi_ccg.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* cdc-acm.c
*
* Copyright (c) 1999 Armin Fuerst <[email protected]>
* Copyright (c) 1999 Pavel Machek <[email protected]>
* Copyright (c) 1999 Johannes Erdfelt <[email protected]>
* Copyright (c) 2000 Vojtech Pavlik <[email protected]>
* Copyright (c) 2004 Oliver Neukum <[email protected]>
* Copyright (c) 2005 David Kubicek <[email protected]>
* Copyright (c) 2011 Johan Hovold <[email protected]>
*
* USB Abstract Control Model driver for USB modems and ISDN adapters
*
* Sponsored by SuSE
*/
#undef DEBUG
#undef VERBOSE_DEBUG
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/log2.h>
#include <linux/tty.h>
#include <linux/serial.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/tty_ldisc.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/cdc.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#include <linux/idr.h>
#include <linux/list.h>
#include "cdc-acm.h"
#define DRIVER_AUTHOR "Armin Fuerst, Pavel Machek, Johannes Erdfelt, Vojtech Pavlik, David Kubicek, Johan Hovold"
#define DRIVER_DESC "USB Abstract Control Model driver for USB modems and ISDN adapters"
static struct usb_driver acm_driver;
static struct tty_driver *acm_tty_driver;
static DEFINE_IDR(acm_minors);
static DEFINE_MUTEX(acm_minors_lock);
static void acm_tty_set_termios(struct tty_struct *tty,
const struct ktermios *termios_old);
/*
* acm_minors accessors
*/
/*
* Look up an ACM structure by minor. If found and not disconnected, increment
* its refcount and return it with its mutex held.
*/
static struct acm *acm_get_by_minor(unsigned int minor)
{
struct acm *acm;
mutex_lock(&acm_minors_lock);
acm = idr_find(&acm_minors, minor);
if (acm) {
mutex_lock(&acm->mutex);
if (acm->disconnected) {
mutex_unlock(&acm->mutex);
acm = NULL;
} else {
tty_port_get(&acm->port);
mutex_unlock(&acm->mutex);
}
}
mutex_unlock(&acm_minors_lock);
return acm;
}
/*
* Try to find an available minor number and if found, associate it with 'acm'.
*/
static int acm_alloc_minor(struct acm *acm)
{
int minor;
mutex_lock(&acm_minors_lock);
minor = idr_alloc(&acm_minors, acm, 0, ACM_TTY_MINORS, GFP_KERNEL);
mutex_unlock(&acm_minors_lock);
return minor;
}
/* Release the minor number associated with 'acm'. */
static void acm_release_minor(struct acm *acm)
{
mutex_lock(&acm_minors_lock);
idr_remove(&acm_minors, acm->minor);
mutex_unlock(&acm_minors_lock);
}
/*
* Functions for ACM control messages.
*/
static int acm_ctrl_msg(struct acm *acm, int request, int value,
void *buf, int len)
{
int retval;
retval = usb_autopm_get_interface(acm->control);
if (retval)
return retval;
retval = usb_control_msg(acm->dev, usb_sndctrlpipe(acm->dev, 0),
request, USB_RT_ACM, value,
acm->control->altsetting[0].desc.bInterfaceNumber,
buf, len, USB_CTRL_SET_TIMEOUT);
dev_dbg(&acm->control->dev,
"%s - rq 0x%02x, val %#x, len %#x, result %d\n",
__func__, request, value, len, retval);
usb_autopm_put_interface(acm->control);
return retval < 0 ? retval : 0;
}
/* devices aren't required to support these requests.
* the cdc acm descriptor tells whether they do...
*/
static inline int acm_set_control(struct acm *acm, int control)
{
if (acm->quirks & QUIRK_CONTROL_LINE_STATE)
return -EOPNOTSUPP;
return acm_ctrl_msg(acm, USB_CDC_REQ_SET_CONTROL_LINE_STATE,
control, NULL, 0);
}
#define acm_set_line(acm, line) \
acm_ctrl_msg(acm, USB_CDC_REQ_SET_LINE_CODING, 0, line, sizeof *(line))
#define acm_send_break(acm, ms) \
acm_ctrl_msg(acm, USB_CDC_REQ_SEND_BREAK, ms, NULL, 0)
static void acm_poison_urbs(struct acm *acm)
{
int i;
usb_poison_urb(acm->ctrlurb);
for (i = 0; i < ACM_NW; i++)
usb_poison_urb(acm->wb[i].urb);
for (i = 0; i < acm->rx_buflimit; i++)
usb_poison_urb(acm->read_urbs[i]);
}
static void acm_unpoison_urbs(struct acm *acm)
{
int i;
for (i = 0; i < acm->rx_buflimit; i++)
usb_unpoison_urb(acm->read_urbs[i]);
for (i = 0; i < ACM_NW; i++)
usb_unpoison_urb(acm->wb[i].urb);
usb_unpoison_urb(acm->ctrlurb);
}
/*
* Write buffer management.
* All of these assume proper locks taken by the caller.
*/
static int acm_wb_alloc(struct acm *acm)
{
int i, wbn;
struct acm_wb *wb;
wbn = 0;
i = 0;
for (;;) {
wb = &acm->wb[wbn];
if (!wb->use) {
wb->use = true;
wb->len = 0;
return wbn;
}
wbn = (wbn + 1) % ACM_NW;
if (++i >= ACM_NW)
return -1;
}
}
static int acm_wb_is_avail(struct acm *acm)
{
int i, n;
unsigned long flags;
n = ACM_NW;
spin_lock_irqsave(&acm->write_lock, flags);
for (i = 0; i < ACM_NW; i++)
if(acm->wb[i].use)
n--;
spin_unlock_irqrestore(&acm->write_lock, flags);
return n;
}
/*
* Finish write. Caller must hold acm->write_lock
*/
static void acm_write_done(struct acm *acm, struct acm_wb *wb)
{
wb->use = false;
acm->transmitting--;
usb_autopm_put_interface_async(acm->control);
}
/*
* Poke write.
*
* the caller is responsible for locking
*/
static int acm_start_wb(struct acm *acm, struct acm_wb *wb)
{
int rc;
acm->transmitting++;
wb->urb->transfer_buffer = wb->buf;
wb->urb->transfer_dma = wb->dmah;
wb->urb->transfer_buffer_length = wb->len;
wb->urb->dev = acm->dev;
rc = usb_submit_urb(wb->urb, GFP_ATOMIC);
if (rc < 0) {
if (rc != -EPERM)
dev_err(&acm->data->dev,
"%s - usb_submit_urb(write bulk) failed: %d\n",
__func__, rc);
acm_write_done(acm, wb);
}
return rc;
}
/*
* attributes exported through sysfs
*/
static ssize_t bmCapabilities_show
(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct acm *acm = usb_get_intfdata(intf);
return sprintf(buf, "%d", acm->ctrl_caps);
}
static DEVICE_ATTR_RO(bmCapabilities);
static ssize_t wCountryCodes_show
(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct acm *acm = usb_get_intfdata(intf);
memcpy(buf, acm->country_codes, acm->country_code_size);
return acm->country_code_size;
}
static DEVICE_ATTR_RO(wCountryCodes);
static ssize_t iCountryCodeRelDate_show
(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct acm *acm = usb_get_intfdata(intf);
return sprintf(buf, "%d", acm->country_rel_date);
}
static DEVICE_ATTR_RO(iCountryCodeRelDate);
/*
* Interrupt handlers for various ACM device responses
*/
static void acm_process_notification(struct acm *acm, unsigned char *buf)
{
int newctrl;
int difference;
unsigned long flags;
struct usb_cdc_notification *dr = (struct usb_cdc_notification *)buf;
unsigned char *data = buf + sizeof(struct usb_cdc_notification);
switch (dr->bNotificationType) {
case USB_CDC_NOTIFY_NETWORK_CONNECTION:
dev_dbg(&acm->control->dev,
"%s - network connection: %d\n", __func__, dr->wValue);
break;
case USB_CDC_NOTIFY_SERIAL_STATE:
if (le16_to_cpu(dr->wLength) != 2) {
dev_dbg(&acm->control->dev,
"%s - malformed serial state\n", __func__);
break;
}
newctrl = get_unaligned_le16(data);
dev_dbg(&acm->control->dev,
"%s - serial state: 0x%x\n", __func__, newctrl);
if (!acm->clocal && (acm->ctrlin & ~newctrl & USB_CDC_SERIAL_STATE_DCD)) {
dev_dbg(&acm->control->dev,
"%s - calling hangup\n", __func__);
tty_port_tty_hangup(&acm->port, false);
}
difference = acm->ctrlin ^ newctrl;
if ((difference & USB_CDC_SERIAL_STATE_DCD) && acm->port.tty) {
struct tty_ldisc *ld = tty_ldisc_ref(acm->port.tty);
if (ld) {
if (ld->ops->dcd_change)
ld->ops->dcd_change(acm->port.tty, newctrl & USB_CDC_SERIAL_STATE_DCD);
tty_ldisc_deref(ld);
}
}
spin_lock_irqsave(&acm->read_lock, flags);
acm->ctrlin = newctrl;
acm->oldcount = acm->iocount;
if (difference & USB_CDC_SERIAL_STATE_DSR)
acm->iocount.dsr++;
if (difference & USB_CDC_SERIAL_STATE_DCD)
acm->iocount.dcd++;
if (newctrl & USB_CDC_SERIAL_STATE_BREAK) {
acm->iocount.brk++;
tty_insert_flip_char(&acm->port, 0, TTY_BREAK);
}
if (newctrl & USB_CDC_SERIAL_STATE_RING_SIGNAL)
acm->iocount.rng++;
if (newctrl & USB_CDC_SERIAL_STATE_FRAMING)
acm->iocount.frame++;
if (newctrl & USB_CDC_SERIAL_STATE_PARITY)
acm->iocount.parity++;
if (newctrl & USB_CDC_SERIAL_STATE_OVERRUN)
acm->iocount.overrun++;
spin_unlock_irqrestore(&acm->read_lock, flags);
if (newctrl & USB_CDC_SERIAL_STATE_BREAK)
tty_flip_buffer_push(&acm->port);
if (difference)
wake_up_all(&acm->wioctl);
break;
default:
dev_dbg(&acm->control->dev,
"%s - unknown notification %d received: index %d len %d\n",
__func__,
dr->bNotificationType, dr->wIndex, dr->wLength);
}
}
/* control interface reports status changes with "interrupt" transfers */
static void acm_ctrl_irq(struct urb *urb)
{
struct acm *acm = urb->context;
struct usb_cdc_notification *dr = urb->transfer_buffer;
unsigned int current_size = urb->actual_length;
unsigned int expected_size, copy_size, alloc_size;
int retval;
int status = urb->status;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&acm->control->dev,
"%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_dbg(&acm->control->dev,
"%s - nonzero urb status received: %d\n",
__func__, status);
goto exit;
}
usb_mark_last_busy(acm->dev);
if (acm->nb_index)
dr = (struct usb_cdc_notification *)acm->notification_buffer;
/* size = notification-header + (optional) data */
expected_size = sizeof(struct usb_cdc_notification) +
le16_to_cpu(dr->wLength);
if (current_size < expected_size) {
/* notification is transmitted fragmented, reassemble */
if (acm->nb_size < expected_size) {
u8 *new_buffer;
alloc_size = roundup_pow_of_two(expected_size);
/* Final freeing is done on disconnect. */
new_buffer = krealloc(acm->notification_buffer,
alloc_size, GFP_ATOMIC);
if (!new_buffer) {
acm->nb_index = 0;
goto exit;
}
acm->notification_buffer = new_buffer;
acm->nb_size = alloc_size;
dr = (struct usb_cdc_notification *)acm->notification_buffer;
}
copy_size = min(current_size,
expected_size - acm->nb_index);
memcpy(&acm->notification_buffer[acm->nb_index],
urb->transfer_buffer, copy_size);
acm->nb_index += copy_size;
current_size = acm->nb_index;
}
if (current_size >= expected_size) {
/* notification complete */
acm_process_notification(acm, (unsigned char *)dr);
acm->nb_index = 0;
}
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval && retval != -EPERM && retval != -ENODEV)
dev_err(&acm->control->dev,
"%s - usb_submit_urb failed: %d\n", __func__, retval);
else
dev_vdbg(&acm->control->dev,
"control resubmission terminated %d\n", retval);
}
static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
{
int res;
if (!test_and_clear_bit(index, &acm->read_urbs_free))
return 0;
res = usb_submit_urb(acm->read_urbs[index], mem_flags);
if (res) {
if (res != -EPERM && res != -ENODEV) {
dev_err(&acm->data->dev,
"urb %d failed submission with %d\n",
index, res);
} else {
dev_vdbg(&acm->data->dev, "intended failure %d\n", res);
}
set_bit(index, &acm->read_urbs_free);
return res;
} else {
dev_vdbg(&acm->data->dev, "submitted urb %d\n", index);
}
return 0;
}
static int acm_submit_read_urbs(struct acm *acm, gfp_t mem_flags)
{
int res;
int i;
for (i = 0; i < acm->rx_buflimit; ++i) {
res = acm_submit_read_urb(acm, i, mem_flags);
if (res)
return res;
}
return 0;
}
static void acm_process_read_urb(struct acm *acm, struct urb *urb)
{
unsigned long flags;
if (!urb->actual_length)
return;
spin_lock_irqsave(&acm->read_lock, flags);
tty_insert_flip_string(&acm->port, urb->transfer_buffer,
urb->actual_length);
spin_unlock_irqrestore(&acm->read_lock, flags);
tty_flip_buffer_push(&acm->port);
}
static void acm_read_bulk_callback(struct urb *urb)
{
struct acm_rb *rb = urb->context;
struct acm *acm = rb->instance;
int status = urb->status;
bool stopped = false;
bool stalled = false;
bool cooldown = false;
dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
rb->index, urb->actual_length, status);
switch (status) {
case 0:
usb_mark_last_busy(acm->dev);
acm_process_read_urb(acm, urb);
break;
case -EPIPE:
set_bit(EVENT_RX_STALL, &acm->flags);
stalled = true;
break;
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
dev_dbg(&acm->data->dev,
"%s - urb shutting down with status: %d\n",
__func__, status);
stopped = true;
break;
case -EOVERFLOW:
case -EPROTO:
dev_dbg(&acm->data->dev,
"%s - cooling babbling device\n", __func__);
usb_mark_last_busy(acm->dev);
set_bit(rb->index, &acm->urbs_in_error_delay);
set_bit(ACM_ERROR_DELAY, &acm->flags);
cooldown = true;
break;
default:
dev_dbg(&acm->data->dev,
"%s - nonzero urb status received: %d\n",
__func__, status);
break;
}
/*
* Make sure URB processing is done before marking as free to avoid
* racing with unthrottle() on another CPU. Matches the barriers
* implied by the test_and_clear_bit() in acm_submit_read_urb().
*/
smp_mb__before_atomic();
set_bit(rb->index, &acm->read_urbs_free);
/*
* Make sure URB is marked as free before checking the throttled flag
* to avoid racing with unthrottle() on another CPU. Matches the
* smp_mb() in unthrottle().
*/
smp_mb__after_atomic();
if (stopped || stalled || cooldown) {
if (stalled)
schedule_delayed_work(&acm->dwork, 0);
else if (cooldown)
schedule_delayed_work(&acm->dwork, HZ / 2);
return;
}
if (test_bit(ACM_THROTTLED, &acm->flags))
return;
acm_submit_read_urb(acm, rb->index, GFP_ATOMIC);
}
/* data interface wrote those outgoing bytes */
static void acm_write_bulk(struct urb *urb)
{
struct acm_wb *wb = urb->context;
struct acm *acm = wb->instance;
unsigned long flags;
int status = urb->status;
if (status || (urb->actual_length != urb->transfer_buffer_length))
dev_vdbg(&acm->data->dev, "wrote len %d/%d, status %d\n",
urb->actual_length,
urb->transfer_buffer_length,
status);
spin_lock_irqsave(&acm->write_lock, flags);
acm_write_done(acm, wb);
spin_unlock_irqrestore(&acm->write_lock, flags);
set_bit(EVENT_TTY_WAKEUP, &acm->flags);
schedule_delayed_work(&acm->dwork, 0);
}
static void acm_softint(struct work_struct *work)
{
int i;
struct acm *acm = container_of(work, struct acm, dwork.work);
if (test_bit(EVENT_RX_STALL, &acm->flags)) {
smp_mb(); /* against acm_suspend() */
if (!acm->susp_count) {
for (i = 0; i < acm->rx_buflimit; i++)
usb_kill_urb(acm->read_urbs[i]);
usb_clear_halt(acm->dev, acm->in);
acm_submit_read_urbs(acm, GFP_KERNEL);
clear_bit(EVENT_RX_STALL, &acm->flags);
}
}
if (test_and_clear_bit(ACM_ERROR_DELAY, &acm->flags)) {
for (i = 0; i < acm->rx_buflimit; i++)
if (test_and_clear_bit(i, &acm->urbs_in_error_delay))
acm_submit_read_urb(acm, i, GFP_KERNEL);
}
if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
tty_port_tty_wakeup(&acm->port);
}
/*
* TTY handlers
*/
static int acm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct acm *acm;
int retval;
acm = acm_get_by_minor(tty->index);
if (!acm)
return -ENODEV;
retval = tty_standard_install(driver, tty);
if (retval)
goto error_init_termios;
/*
* Suppress initial echoing for some devices which might send data
* immediately after acm driver has been installed.
*/
if (acm->quirks & DISABLE_ECHO)
tty->termios.c_lflag &= ~ECHO;
tty->driver_data = acm;
return 0;
error_init_termios:
tty_port_put(&acm->port);
return retval;
}
static int acm_tty_open(struct tty_struct *tty, struct file *filp)
{
struct acm *acm = tty->driver_data;
return tty_port_open(&acm->port, tty, filp);
}
static void acm_port_dtr_rts(struct tty_port *port, bool active)
{
struct acm *acm = container_of(port, struct acm, port);
int val;
int res;
if (active)
val = USB_CDC_CTRL_DTR | USB_CDC_CTRL_RTS;
else
val = 0;
/* FIXME: add missing ctrlout locking throughout driver */
acm->ctrlout = val;
res = acm_set_control(acm, val);
if (res && (acm->ctrl_caps & USB_CDC_CAP_LINE))
/* This is broken in too many devices to spam the logs */
dev_dbg(&acm->control->dev, "failed to set dtr/rts\n");
}
static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
{
struct acm *acm = container_of(port, struct acm, port);
int retval = -ENODEV;
int i;
mutex_lock(&acm->mutex);
if (acm->disconnected)
goto disconnected;
retval = usb_autopm_get_interface(acm->control);
if (retval)
goto error_get_interface;
set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
acm->control->needs_remote_wakeup = 1;
acm->ctrlurb->dev = acm->dev;
retval = usb_submit_urb(acm->ctrlurb, GFP_KERNEL);
if (retval) {
dev_err(&acm->control->dev,
"%s - usb_submit_urb(ctrl irq) failed\n", __func__);
goto error_submit_urb;
}
acm_tty_set_termios(tty, NULL);
/*
* Unthrottle device in case the TTY was closed while throttled.
*/
clear_bit(ACM_THROTTLED, &acm->flags);
retval = acm_submit_read_urbs(acm, GFP_KERNEL);
if (retval)
goto error_submit_read_urbs;
usb_autopm_put_interface(acm->control);
mutex_unlock(&acm->mutex);
return 0;
error_submit_read_urbs:
for (i = 0; i < acm->rx_buflimit; i++)
usb_kill_urb(acm->read_urbs[i]);
usb_kill_urb(acm->ctrlurb);
error_submit_urb:
usb_autopm_put_interface(acm->control);
error_get_interface:
disconnected:
mutex_unlock(&acm->mutex);
return usb_translate_errors(retval);
}
static void acm_port_destruct(struct tty_port *port)
{
struct acm *acm = container_of(port, struct acm, port);
if (acm->minor != ACM_MINOR_INVALID)
acm_release_minor(acm);
usb_put_intf(acm->control);
kfree(acm->country_codes);
kfree(acm);
}
static void acm_port_shutdown(struct tty_port *port)
{
struct acm *acm = container_of(port, struct acm, port);
struct urb *urb;
struct acm_wb *wb;
/*
* Need to grab write_lock to prevent race with resume, but no need to
* hold it due to the tty-port initialised flag.
*/
acm_poison_urbs(acm);
spin_lock_irq(&acm->write_lock);
spin_unlock_irq(&acm->write_lock);
usb_autopm_get_interface_no_resume(acm->control);
acm->control->needs_remote_wakeup = 0;
usb_autopm_put_interface(acm->control);
for (;;) {
urb = usb_get_from_anchor(&acm->delayed);
if (!urb)
break;
wb = urb->context;
wb->use = false;
usb_autopm_put_interface_async(acm->control);
}
acm_unpoison_urbs(acm);
}
static void acm_tty_cleanup(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
tty_port_put(&acm->port);
}
static void acm_tty_hangup(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
tty_port_hangup(&acm->port);
}
static void acm_tty_close(struct tty_struct *tty, struct file *filp)
{
struct acm *acm = tty->driver_data;
tty_port_close(&acm->port, tty, filp);
}
static ssize_t acm_tty_write(struct tty_struct *tty, const u8 *buf,
size_t count)
{
struct acm *acm = tty->driver_data;
int stat;
unsigned long flags;
int wbn;
struct acm_wb *wb;
if (!count)
return 0;
dev_vdbg(&acm->data->dev, "%zu bytes from tty layer\n", count);
spin_lock_irqsave(&acm->write_lock, flags);
wbn = acm_wb_alloc(acm);
if (wbn < 0) {
spin_unlock_irqrestore(&acm->write_lock, flags);
return 0;
}
wb = &acm->wb[wbn];
if (!acm->dev) {
wb->use = false;
spin_unlock_irqrestore(&acm->write_lock, flags);
return -ENODEV;
}
count = (count > acm->writesize) ? acm->writesize : count;
dev_vdbg(&acm->data->dev, "writing %zu bytes\n", count);
memcpy(wb->buf, buf, count);
wb->len = count;
stat = usb_autopm_get_interface_async(acm->control);
if (stat) {
wb->use = false;
spin_unlock_irqrestore(&acm->write_lock, flags);
return stat;
}
if (acm->susp_count) {
usb_anchor_urb(wb->urb, &acm->delayed);
spin_unlock_irqrestore(&acm->write_lock, flags);
return count;
}
stat = acm_start_wb(acm, wb);
spin_unlock_irqrestore(&acm->write_lock, flags);
if (stat < 0)
return stat;
return count;
}
static unsigned int acm_tty_write_room(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
/*
* Do not let the line discipline to know that we have a reserve,
* or it might get too enthusiastic.
*/
return acm_wb_is_avail(acm) ? acm->writesize : 0;
}
static void acm_tty_flush_buffer(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
unsigned long flags;
int i;
spin_lock_irqsave(&acm->write_lock, flags);
for (i = 0; i < ACM_NW; i++)
if (acm->wb[i].use)
usb_unlink_urb(acm->wb[i].urb);
spin_unlock_irqrestore(&acm->write_lock, flags);
}
static unsigned int acm_tty_chars_in_buffer(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
/*
* if the device was unplugged then any remaining characters fell out
* of the connector ;)
*/
if (acm->disconnected)
return 0;
/*
* This is inaccurate (overcounts), but it works.
*/
return (ACM_NW - acm_wb_is_avail(acm)) * acm->writesize;
}
static void acm_tty_throttle(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
set_bit(ACM_THROTTLED, &acm->flags);
}
static void acm_tty_unthrottle(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
clear_bit(ACM_THROTTLED, &acm->flags);
/* Matches the smp_mb__after_atomic() in acm_read_bulk_callback(). */
smp_mb();
acm_submit_read_urbs(acm, GFP_KERNEL);
}
static int acm_tty_break_ctl(struct tty_struct *tty, int state)
{
struct acm *acm = tty->driver_data;
int retval;
retval = acm_send_break(acm, state ? 0xffff : 0);
if (retval < 0)
dev_dbg(&acm->control->dev,
"%s - send break failed\n", __func__);
return retval;
}
static int acm_tty_tiocmget(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
return (acm->ctrlout & USB_CDC_CTRL_DTR ? TIOCM_DTR : 0) |
(acm->ctrlout & USB_CDC_CTRL_RTS ? TIOCM_RTS : 0) |
(acm->ctrlin & USB_CDC_SERIAL_STATE_DSR ? TIOCM_DSR : 0) |
(acm->ctrlin & USB_CDC_SERIAL_STATE_RING_SIGNAL ? TIOCM_RI : 0) |
(acm->ctrlin & USB_CDC_SERIAL_STATE_DCD ? TIOCM_CD : 0) |
TIOCM_CTS;
}
static int acm_tty_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct acm *acm = tty->driver_data;
unsigned int newctrl;
newctrl = acm->ctrlout;
set = (set & TIOCM_DTR ? USB_CDC_CTRL_DTR : 0) |
(set & TIOCM_RTS ? USB_CDC_CTRL_RTS : 0);
clear = (clear & TIOCM_DTR ? USB_CDC_CTRL_DTR : 0) |
(clear & TIOCM_RTS ? USB_CDC_CTRL_RTS : 0);
newctrl = (newctrl & ~clear) | set;
if (acm->ctrlout == newctrl)
return 0;
return acm_set_control(acm, acm->ctrlout = newctrl);
}
static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss)
{
struct acm *acm = tty->driver_data;
ss->line = acm->minor;
ss->close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
ss->closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
ASYNC_CLOSING_WAIT_NONE :
jiffies_to_msecs(acm->port.closing_wait) / 10;
return 0;
}
static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
{
struct acm *acm = tty->driver_data;
unsigned int closing_wait, close_delay;
int retval = 0;
close_delay = msecs_to_jiffies(ss->close_delay * 10);
closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
ASYNC_CLOSING_WAIT_NONE :
msecs_to_jiffies(ss->closing_wait * 10);
mutex_lock(&acm->port.mutex);
if (!capable(CAP_SYS_ADMIN)) {
if ((close_delay != acm->port.close_delay) ||
(closing_wait != acm->port.closing_wait))
retval = -EPERM;
} else {
acm->port.close_delay = close_delay;
acm->port.closing_wait = closing_wait;
}
mutex_unlock(&acm->port.mutex);
return retval;
}
static int wait_serial_change(struct acm *acm, unsigned long arg)
{
int rv = 0;
DECLARE_WAITQUEUE(wait, current);
struct async_icount old, new;
do {
spin_lock_irq(&acm->read_lock);
old = acm->oldcount;
new = acm->iocount;
acm->oldcount = new;
spin_unlock_irq(&acm->read_lock);
if ((arg & TIOCM_DSR) &&
old.dsr != new.dsr)
break;
if ((arg & TIOCM_CD) &&
old.dcd != new.dcd)
break;
if ((arg & TIOCM_RI) &&
old.rng != new.rng)
break;
add_wait_queue(&acm->wioctl, &wait);
set_current_state(TASK_INTERRUPTIBLE);
schedule();
remove_wait_queue(&acm->wioctl, &wait);
if (acm->disconnected) {
if (arg & TIOCM_CD)
break;
else
rv = -ENODEV;
} else {
if (signal_pending(current))
rv = -ERESTARTSYS;
}
} while (!rv);
return rv;
}
static int acm_tty_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount)
{
struct acm *acm = tty->driver_data;
icount->dsr = acm->iocount.dsr;
icount->rng = acm->iocount.rng;
icount->dcd = acm->iocount.dcd;
icount->frame = acm->iocount.frame;
icount->overrun = acm->iocount.overrun;
icount->parity = acm->iocount.parity;
icount->brk = acm->iocount.brk;
return 0;
}
static int acm_tty_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct acm *acm = tty->driver_data;
int rv = -ENOIOCTLCMD;
switch (cmd) {
case TIOCMIWAIT:
rv = usb_autopm_get_interface(acm->control);
if (rv < 0) {
rv = -EIO;
break;
}
rv = wait_serial_change(acm, arg);
usb_autopm_put_interface(acm->control);
break;
}
return rv;
}
static void acm_tty_set_termios(struct tty_struct *tty,
const struct ktermios *termios_old)
{
struct acm *acm = tty->driver_data;
struct ktermios *termios = &tty->termios;
struct usb_cdc_line_coding newline;
int newctrl = acm->ctrlout;
newline.dwDTERate = cpu_to_le32(tty_get_baud_rate(tty));
newline.bCharFormat = termios->c_cflag & CSTOPB ? 2 : 0;
newline.bParityType = termios->c_cflag & PARENB ?
(termios->c_cflag & PARODD ? 1 : 2) +
(termios->c_cflag & CMSPAR ? 2 : 0) : 0;
newline.bDataBits = tty_get_char_size(termios->c_cflag);
/* FIXME: Needs to clear unsupported bits in the termios */
acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
if (C_BAUD(tty) == B0) {
newline.dwDTERate = acm->line.dwDTERate;
newctrl &= ~USB_CDC_CTRL_DTR;
} else if (termios_old && (termios_old->c_cflag & CBAUD) == B0) {
newctrl |= USB_CDC_CTRL_DTR;
}
if (newctrl != acm->ctrlout)
acm_set_control(acm, acm->ctrlout = newctrl);
if (memcmp(&acm->line, &newline, sizeof newline)) {
memcpy(&acm->line, &newline, sizeof newline);
dev_dbg(&acm->control->dev, "%s - set line: %d %d %d %d\n",
__func__,
le32_to_cpu(newline.dwDTERate),
newline.bCharFormat, newline.bParityType,
newline.bDataBits);
acm_set_line(acm, &acm->line);
}
}
static const struct tty_port_operations acm_port_ops = {
.dtr_rts = acm_port_dtr_rts,
.shutdown = acm_port_shutdown,
.activate = acm_port_activate,
.destruct = acm_port_destruct,
};
/*
* USB probe and disconnect routines.
*/
/* Little helpers: write/read buffers free */
static void acm_write_buffers_free(struct acm *acm)
{
int i;
struct acm_wb *wb;
for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++)
usb_free_coherent(acm->dev, acm->writesize, wb->buf, wb->dmah);
}
static void acm_read_buffers_free(struct acm *acm)
{
int i;
for (i = 0; i < acm->rx_buflimit; i++)
usb_free_coherent(acm->dev, acm->readsize,
acm->read_buffers[i].base, acm->read_buffers[i].dma);
}
/* Little helper: write buffers allocate */
static int acm_write_buffers_alloc(struct acm *acm)
{
int i;
struct acm_wb *wb;
for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++) {
wb->buf = usb_alloc_coherent(acm->dev, acm->writesize, GFP_KERNEL,
&wb->dmah);
if (!wb->buf) {
while (i != 0) {
--i;
--wb;
usb_free_coherent(acm->dev, acm->writesize,
wb->buf, wb->dmah);
}
return -ENOMEM;
}
}
return 0;
}
static int acm_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_cdc_union_desc *union_header = NULL;
struct usb_cdc_call_mgmt_descriptor *cmgmd = NULL;
unsigned char *buffer = intf->altsetting->extra;
int buflen = intf->altsetting->extralen;
struct usb_interface *control_interface;
struct usb_interface *data_interface;
struct usb_endpoint_descriptor *epctrl = NULL;
struct usb_endpoint_descriptor *epread = NULL;
struct usb_endpoint_descriptor *epwrite = NULL;
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct usb_cdc_parsed_header h;
struct acm *acm;
int minor;
int ctrlsize, readsize;
u8 *buf;
int call_intf_num = -1;
int data_intf_num = -1;
unsigned long quirks;
int num_rx_buf;
int i;
int combined_interfaces = 0;
struct device *tty_dev;
int rv = -ENOMEM;
int res;
/* normal quirks */
quirks = (unsigned long)id->driver_info;
if (quirks == IGNORE_DEVICE)
return -ENODEV;
memset(&h, 0x00, sizeof(struct usb_cdc_parsed_header));
num_rx_buf = (quirks == SINGLE_RX_URB) ? 1 : ACM_NR;
/* handle quirks deadly to normal probing*/
if (quirks == NO_UNION_NORMAL) {
data_interface = usb_ifnum_to_if(usb_dev, 1);
control_interface = usb_ifnum_to_if(usb_dev, 0);
/* we would crash */
if (!data_interface || !control_interface)
return -ENODEV;
goto skip_normal_probe;
}
/* normal probing*/
if (!buffer) {
dev_err(&intf->dev, "Weird descriptor references\n");
return -EINVAL;
}
if (!buflen) {
if (intf->cur_altsetting->endpoint &&
intf->cur_altsetting->endpoint->extralen &&
intf->cur_altsetting->endpoint->extra) {
dev_dbg(&intf->dev,
"Seeking extra descriptors on endpoint\n");
buflen = intf->cur_altsetting->endpoint->extralen;
buffer = intf->cur_altsetting->endpoint->extra;
} else {
dev_err(&intf->dev,
"Zero length descriptor references\n");
return -EINVAL;
}
}
cdc_parse_cdc_header(&h, intf, buffer, buflen);
union_header = h.usb_cdc_union_desc;
cmgmd = h.usb_cdc_call_mgmt_descriptor;
if (cmgmd)
call_intf_num = cmgmd->bDataInterface;
if (!union_header) {
if (intf->cur_altsetting->desc.bNumEndpoints == 3) {
dev_dbg(&intf->dev, "No union descriptor, assuming single interface\n");
combined_interfaces = 1;
control_interface = data_interface = intf;
goto look_for_collapsed_interface;
} else if (call_intf_num > 0) {
dev_dbg(&intf->dev, "No union descriptor, using call management descriptor\n");
data_intf_num = call_intf_num;
data_interface = usb_ifnum_to_if(usb_dev, data_intf_num);
control_interface = intf;
} else {
dev_dbg(&intf->dev, "No union descriptor, giving up\n");
return -ENODEV;
}
} else {
int class = -1;
data_intf_num = union_header->bSlaveInterface0;
control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
data_interface = usb_ifnum_to_if(usb_dev, data_intf_num);
if (control_interface)
class = control_interface->cur_altsetting->desc.bInterfaceClass;
if (class != USB_CLASS_COMM && class != USB_CLASS_CDC_DATA) {
dev_dbg(&intf->dev, "Broken union descriptor, assuming single interface\n");
combined_interfaces = 1;
control_interface = data_interface = intf;
goto look_for_collapsed_interface;
}
}
if (!control_interface || !data_interface) {
dev_dbg(&intf->dev, "no interfaces\n");
return -ENODEV;
}
if (data_intf_num != call_intf_num)
dev_dbg(&intf->dev, "Separate call control interface. That is not fully supported.\n");
if (control_interface == data_interface) {
/* some broken devices designed for windows work this way */
dev_warn(&intf->dev,"Control and data interfaces are not separated!\n");
combined_interfaces = 1;
/* a popular other OS doesn't use it */
quirks |= NO_CAP_LINE;
if (data_interface->cur_altsetting->desc.bNumEndpoints != 3) {
dev_err(&intf->dev, "This needs exactly 3 endpoints\n");
return -EINVAL;
}
look_for_collapsed_interface:
res = usb_find_common_endpoints(data_interface->cur_altsetting,
&epread, &epwrite, &epctrl, NULL);
if (res)
return res;
goto made_compressed_probe;
}
skip_normal_probe:
/*workaround for switched interfaces */
if (data_interface->cur_altsetting->desc.bInterfaceClass != USB_CLASS_CDC_DATA) {
if (control_interface->cur_altsetting->desc.bInterfaceClass == USB_CLASS_CDC_DATA) {
dev_dbg(&intf->dev,
"Your device has switched interfaces.\n");
swap(control_interface, data_interface);
} else {
return -EINVAL;
}
}
/* Accept probe requests only for the control interface */
if (!combined_interfaces && intf != control_interface)
return -ENODEV;
if (data_interface->cur_altsetting->desc.bNumEndpoints < 2 ||
control_interface->cur_altsetting->desc.bNumEndpoints == 0)
return -EINVAL;
epctrl = &control_interface->cur_altsetting->endpoint[0].desc;
epread = &data_interface->cur_altsetting->endpoint[0].desc;
epwrite = &data_interface->cur_altsetting->endpoint[1].desc;
/* workaround for switched endpoints */
if (!usb_endpoint_dir_in(epread)) {
/* descriptors are swapped */
dev_dbg(&intf->dev,
"The data interface has switched endpoints\n");
swap(epread, epwrite);
}
made_compressed_probe:
dev_dbg(&intf->dev, "interfaces are valid\n");
acm = kzalloc(sizeof(struct acm), GFP_KERNEL);
if (!acm)
return -ENOMEM;
tty_port_init(&acm->port);
acm->port.ops = &acm_port_ops;
ctrlsize = usb_endpoint_maxp(epctrl);
readsize = usb_endpoint_maxp(epread) *
(quirks == SINGLE_RX_URB ? 1 : 2);
acm->combined_interfaces = combined_interfaces;
acm->writesize = usb_endpoint_maxp(epwrite) * 20;
acm->control = control_interface;
acm->data = data_interface;
usb_get_intf(acm->control); /* undone in destruct() */
minor = acm_alloc_minor(acm);
if (minor < 0) {
acm->minor = ACM_MINOR_INVALID;
goto err_put_port;
}
acm->minor = minor;
acm->dev = usb_dev;
if (h.usb_cdc_acm_descriptor)
acm->ctrl_caps = h.usb_cdc_acm_descriptor->bmCapabilities;
if (quirks & NO_CAP_LINE)
acm->ctrl_caps &= ~USB_CDC_CAP_LINE;
acm->ctrlsize = ctrlsize;
acm->readsize = readsize;
acm->rx_buflimit = num_rx_buf;
INIT_DELAYED_WORK(&acm->dwork, acm_softint);
init_waitqueue_head(&acm->wioctl);
spin_lock_init(&acm->write_lock);
spin_lock_init(&acm->read_lock);
mutex_init(&acm->mutex);
if (usb_endpoint_xfer_int(epread)) {
acm->bInterval = epread->bInterval;
acm->in = usb_rcvintpipe(usb_dev, epread->bEndpointAddress);
} else {
acm->in = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
}
if (usb_endpoint_xfer_int(epwrite))
acm->out = usb_sndintpipe(usb_dev, epwrite->bEndpointAddress);
else
acm->out = usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress);
init_usb_anchor(&acm->delayed);
acm->quirks = quirks;
buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
if (!buf)
goto err_put_port;
acm->ctrl_buffer = buf;
if (acm_write_buffers_alloc(acm) < 0)
goto err_free_ctrl_buffer;
acm->ctrlurb = usb_alloc_urb(0, GFP_KERNEL);
if (!acm->ctrlurb)
goto err_free_write_buffers;
for (i = 0; i < num_rx_buf; i++) {
struct acm_rb *rb = &(acm->read_buffers[i]);
struct urb *urb;
rb->base = usb_alloc_coherent(acm->dev, readsize, GFP_KERNEL,
&rb->dma);
if (!rb->base)
goto err_free_read_urbs;
rb->index = i;
rb->instance = acm;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
goto err_free_read_urbs;
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
urb->transfer_dma = rb->dma;
if (usb_endpoint_xfer_int(epread))
usb_fill_int_urb(urb, acm->dev, acm->in, rb->base,
acm->readsize,
acm_read_bulk_callback, rb,
acm->bInterval);
else
usb_fill_bulk_urb(urb, acm->dev, acm->in, rb->base,
acm->readsize,
acm_read_bulk_callback, rb);
acm->read_urbs[i] = urb;
__set_bit(i, &acm->read_urbs_free);
}
for (i = 0; i < ACM_NW; i++) {
struct acm_wb *snd = &(acm->wb[i]);
snd->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!snd->urb)
goto err_free_write_urbs;
if (usb_endpoint_xfer_int(epwrite))
usb_fill_int_urb(snd->urb, usb_dev, acm->out,
NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval);
else
usb_fill_bulk_urb(snd->urb, usb_dev, acm->out,
NULL, acm->writesize, acm_write_bulk, snd);
snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
if (quirks & SEND_ZERO_PACKET)
snd->urb->transfer_flags |= URB_ZERO_PACKET;
snd->instance = acm;
}
usb_set_intfdata(intf, acm);
i = device_create_file(&intf->dev, &dev_attr_bmCapabilities);
if (i < 0)
goto err_free_write_urbs;
if (h.usb_cdc_country_functional_desc) { /* export the country data */
struct usb_cdc_country_functional_desc * cfd =
h.usb_cdc_country_functional_desc;
acm->country_codes = kmalloc(cfd->bLength - 4, GFP_KERNEL);
if (!acm->country_codes)
goto skip_countries;
acm->country_code_size = cfd->bLength - 4;
memcpy(acm->country_codes, (u8 *)&cfd->wCountyCode0,
cfd->bLength - 4);
acm->country_rel_date = cfd->iCountryCodeRelDate;
i = device_create_file(&intf->dev, &dev_attr_wCountryCodes);
if (i < 0) {
kfree(acm->country_codes);
acm->country_codes = NULL;
acm->country_code_size = 0;
goto skip_countries;
}
i = device_create_file(&intf->dev,
&dev_attr_iCountryCodeRelDate);
if (i < 0) {
device_remove_file(&intf->dev, &dev_attr_wCountryCodes);
kfree(acm->country_codes);
acm->country_codes = NULL;
acm->country_code_size = 0;
goto skip_countries;
}
}
skip_countries:
usb_fill_int_urb(acm->ctrlurb, usb_dev,
usb_rcvintpipe(usb_dev, epctrl->bEndpointAddress),
acm->ctrl_buffer, ctrlsize, acm_ctrl_irq, acm,
/* works around buggy devices */
epctrl->bInterval ? epctrl->bInterval : 16);
acm->ctrlurb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
acm->ctrlurb->transfer_dma = acm->ctrl_dma;
acm->notification_buffer = NULL;
acm->nb_index = 0;
acm->nb_size = 0;
acm->line.dwDTERate = cpu_to_le32(9600);
acm->line.bDataBits = 8;
acm_set_line(acm, &acm->line);
if (!acm->combined_interfaces) {
rv = usb_driver_claim_interface(&acm_driver, data_interface, acm);
if (rv)
goto err_remove_files;
}
tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
&control_interface->dev);
if (IS_ERR(tty_dev)) {
rv = PTR_ERR(tty_dev);
goto err_release_data_interface;
}
if (quirks & CLEAR_HALT_CONDITIONS) {
usb_clear_halt(usb_dev, acm->in);
usb_clear_halt(usb_dev, acm->out);
}
dev_info(&intf->dev, "ttyACM%d: USB ACM device\n", minor);
return 0;
err_release_data_interface:
if (!acm->combined_interfaces) {
/* Clear driver data so that disconnect() returns early. */
usb_set_intfdata(data_interface, NULL);
usb_driver_release_interface(&acm_driver, data_interface);
}
err_remove_files:
if (acm->country_codes) {
device_remove_file(&acm->control->dev,
&dev_attr_wCountryCodes);
device_remove_file(&acm->control->dev,
&dev_attr_iCountryCodeRelDate);
}
device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
err_free_write_urbs:
for (i = 0; i < ACM_NW; i++)
usb_free_urb(acm->wb[i].urb);
err_free_read_urbs:
for (i = 0; i < num_rx_buf; i++)
usb_free_urb(acm->read_urbs[i]);
acm_read_buffers_free(acm);
usb_free_urb(acm->ctrlurb);
err_free_write_buffers:
acm_write_buffers_free(acm);
err_free_ctrl_buffer:
usb_free_coherent(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
err_put_port:
tty_port_put(&acm->port);
return rv;
}
static void acm_disconnect(struct usb_interface *intf)
{
struct acm *acm = usb_get_intfdata(intf);
struct tty_struct *tty;
int i;
/* sibling interface is already cleaning up */
if (!acm)
return;
acm->disconnected = true;
/*
* there is a circular dependency. acm_softint() can resubmit
* the URBs in error handling so we need to block any
* submission right away
*/
acm_poison_urbs(acm);
mutex_lock(&acm->mutex);
if (acm->country_codes) {
device_remove_file(&acm->control->dev,
&dev_attr_wCountryCodes);
device_remove_file(&acm->control->dev,
&dev_attr_iCountryCodeRelDate);
}
wake_up_all(&acm->wioctl);
device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
usb_set_intfdata(acm->control, NULL);
usb_set_intfdata(acm->data, NULL);
mutex_unlock(&acm->mutex);
tty = tty_port_tty_get(&acm->port);
if (tty) {
tty_vhangup(tty);
tty_kref_put(tty);
}
cancel_delayed_work_sync(&acm->dwork);
tty_unregister_device(acm_tty_driver, acm->minor);
usb_free_urb(acm->ctrlurb);
for (i = 0; i < ACM_NW; i++)
usb_free_urb(acm->wb[i].urb);
for (i = 0; i < acm->rx_buflimit; i++)
usb_free_urb(acm->read_urbs[i]);
acm_write_buffers_free(acm);
usb_free_coherent(acm->dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
acm_read_buffers_free(acm);
kfree(acm->notification_buffer);
if (!acm->combined_interfaces)
usb_driver_release_interface(&acm_driver, intf == acm->control ?
acm->data : acm->control);
tty_port_put(&acm->port);
}
#ifdef CONFIG_PM
static int acm_suspend(struct usb_interface *intf, pm_message_t message)
{
struct acm *acm = usb_get_intfdata(intf);
int cnt;
spin_lock_irq(&acm->write_lock);
if (PMSG_IS_AUTO(message)) {
if (acm->transmitting) {
spin_unlock_irq(&acm->write_lock);
return -EBUSY;
}
}
cnt = acm->susp_count++;
spin_unlock_irq(&acm->write_lock);
if (cnt)
return 0;
acm_poison_urbs(acm);
cancel_delayed_work_sync(&acm->dwork);
acm->urbs_in_error_delay = 0;
return 0;
}
static int acm_resume(struct usb_interface *intf)
{
struct acm *acm = usb_get_intfdata(intf);
struct urb *urb;
int rv = 0;
spin_lock_irq(&acm->write_lock);
if (--acm->susp_count)
goto out;
acm_unpoison_urbs(acm);
if (tty_port_initialized(&acm->port)) {
rv = usb_submit_urb(acm->ctrlurb, GFP_ATOMIC);
for (;;) {
urb = usb_get_from_anchor(&acm->delayed);
if (!urb)
break;
acm_start_wb(acm, urb->context);
}
/*
* delayed error checking because we must
* do the write path at all cost
*/
if (rv < 0)
goto out;
rv = acm_submit_read_urbs(acm, GFP_ATOMIC);
}
out:
spin_unlock_irq(&acm->write_lock);
return rv;
}
static int acm_reset_resume(struct usb_interface *intf)
{
struct acm *acm = usb_get_intfdata(intf);
if (tty_port_initialized(&acm->port))
tty_port_tty_hangup(&acm->port, false);
return acm_resume(intf);
}
#endif /* CONFIG_PM */
static int acm_pre_reset(struct usb_interface *intf)
{
struct acm *acm = usb_get_intfdata(intf);
clear_bit(EVENT_RX_STALL, &acm->flags);
acm->nb_index = 0; /* pending control transfers are lost */
return 0;
}
#define NOKIA_PCSUITE_ACM_INFO(x) \
USB_DEVICE_AND_INTERFACE_INFO(0x0421, x, \
USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
USB_CDC_ACM_PROTO_VENDOR)
#define SAMSUNG_PCSUITE_ACM_INFO(x) \
USB_DEVICE_AND_INTERFACE_INFO(0x04e7, x, \
USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
USB_CDC_ACM_PROTO_VENDOR)
/*
* USB driver structure.
*/
static const struct usb_device_id acm_ids[] = {
/* quirky and broken devices */
{ USB_DEVICE(0x0424, 0x274e), /* Microchip Technology, Inc. (formerly SMSC) */
.driver_info = DISABLE_ECHO, }, /* DISABLE ECHO in termios flag */
{ USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */
.driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
{ USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */
.driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
{ USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x045b, 0x023c), /* Renesas USB Download mode */
.driver_info = DISABLE_ECHO, /* Don't echo banner */
},
{ USB_DEVICE(0x045b, 0x0248), /* Renesas USB Download mode */
.driver_info = DISABLE_ECHO, /* Don't echo banner */
},
{ USB_DEVICE(0x045b, 0x024D), /* Renesas USB Download mode */
.driver_info = DISABLE_ECHO, /* Don't echo banner */
},
{ USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; [email protected] */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x0e8d, 0x2000), /* MediaTek Inc Preloader */
.driver_info = DISABLE_ECHO, /* DISABLE ECHO in termios flag */
},
{ USB_DEVICE(0x0e8d, 0x3329), /* MediaTek Inc GPS */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x0482, 0x0203), /* KYOCERA AH-K3001V */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x079b, 0x000f), /* BT On-Air USB MODEM */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x0ace, 0x1602), /* ZyDAS 56K USB MODEM */
.driver_info = SINGLE_RX_URB,
},
{ USB_DEVICE(0x0ace, 0x1608), /* ZyDAS 56K USB MODEM */
.driver_info = SINGLE_RX_URB, /* firmware bug */
},
{ USB_DEVICE(0x0ace, 0x1611), /* ZyDAS 56K USB MODEM - new version */
.driver_info = SINGLE_RX_URB, /* firmware bug */
},
{ USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
.driver_info = SINGLE_RX_URB,
},
{ USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x0803, 0x3095), /* Zoom Telephonics Model 3095F USB MODEM */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x0572, 0x1321), /* Conexant USB MODEM CX93010 */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x0572, 0x1324), /* Conexant USB MODEM RD02-D400 */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x0572, 0x1349), /* Hiro (Conexant) USB MODEM H50228 */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */
.driver_info = QUIRK_CONTROL_LINE_STATE, },
{ USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
{ USB_DEVICE(0x2184, 0x0036) }, /* GW Instek AFG-125 */
{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
},
/* Motorola H24 HSPA module: */
{ USB_DEVICE(0x22b8, 0x2d91) }, /* modem */
{ USB_DEVICE(0x22b8, 0x2d92), /* modem + diagnostics */
.driver_info = NO_UNION_NORMAL, /* handle only modem interface */
},
{ USB_DEVICE(0x22b8, 0x2d93), /* modem + AT port */
.driver_info = NO_UNION_NORMAL, /* handle only modem interface */
},
{ USB_DEVICE(0x22b8, 0x2d95), /* modem + AT port + diagnostics */
.driver_info = NO_UNION_NORMAL, /* handle only modem interface */
},
{ USB_DEVICE(0x22b8, 0x2d96), /* modem + NMEA */
.driver_info = NO_UNION_NORMAL, /* handle only modem interface */
},
{ USB_DEVICE(0x22b8, 0x2d97), /* modem + diagnostics + NMEA */
.driver_info = NO_UNION_NORMAL, /* handle only modem interface */
},
{ USB_DEVICE(0x22b8, 0x2d99), /* modem + AT port + NMEA */
.driver_info = NO_UNION_NORMAL, /* handle only modem interface */
},
{ USB_DEVICE(0x22b8, 0x2d9a), /* modem + AT port + diagnostics + NMEA */
.driver_info = NO_UNION_NORMAL, /* handle only modem interface */
},
{ USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
.driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
data interface instead of
communications interface.
Maybe we should define a new
quirk for this. */
},
{ USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */
.driver_info = NO_UNION_NORMAL,
},
{ USB_DEVICE(0x05f9, 0x4002), /* PSC Scanning, Magellan 800i */
.driver_info = NO_UNION_NORMAL,
},
{ USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
{ USB_DEVICE(0x1576, 0x03b1), /* Maretron USB100 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
{ USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
{ USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
.driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
},
{ USB_DEVICE(0x0c26, 0x0020), /* Icom ICF3400 Serie */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
{ USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
.driver_info = CLEAR_HALT_CONDITIONS,
},
/* Nokia S60 phones expose two ACM channels. The first is
* a modem and is picked up by the standard AT-command
* information below. The second is 'vendor-specific' but
* is treated as a serial device at the S60 end, so we want
* to expose it on Linux too. */
{ NOKIA_PCSUITE_ACM_INFO(0x042D), }, /* Nokia 3250 */
{ NOKIA_PCSUITE_ACM_INFO(0x04D8), }, /* Nokia 5500 Sport */
{ NOKIA_PCSUITE_ACM_INFO(0x04C9), }, /* Nokia E50 */
{ NOKIA_PCSUITE_ACM_INFO(0x0419), }, /* Nokia E60 */
{ NOKIA_PCSUITE_ACM_INFO(0x044D), }, /* Nokia E61 */
{ NOKIA_PCSUITE_ACM_INFO(0x0001), }, /* Nokia E61i */
{ NOKIA_PCSUITE_ACM_INFO(0x0475), }, /* Nokia E62 */
{ NOKIA_PCSUITE_ACM_INFO(0x0508), }, /* Nokia E65 */
{ NOKIA_PCSUITE_ACM_INFO(0x0418), }, /* Nokia E70 */
{ NOKIA_PCSUITE_ACM_INFO(0x0425), }, /* Nokia N71 */
{ NOKIA_PCSUITE_ACM_INFO(0x0486), }, /* Nokia N73 */
{ NOKIA_PCSUITE_ACM_INFO(0x04DF), }, /* Nokia N75 */
{ NOKIA_PCSUITE_ACM_INFO(0x000e), }, /* Nokia N77 */
{ NOKIA_PCSUITE_ACM_INFO(0x0445), }, /* Nokia N80 */
{ NOKIA_PCSUITE_ACM_INFO(0x042F), }, /* Nokia N91 & N91 8GB */
{ NOKIA_PCSUITE_ACM_INFO(0x048E), }, /* Nokia N92 */
{ NOKIA_PCSUITE_ACM_INFO(0x0420), }, /* Nokia N93 */
{ NOKIA_PCSUITE_ACM_INFO(0x04E6), }, /* Nokia N93i */
{ NOKIA_PCSUITE_ACM_INFO(0x04B2), }, /* Nokia 5700 XpressMusic */
{ NOKIA_PCSUITE_ACM_INFO(0x0134), }, /* Nokia 6110 Navigator (China) */
{ NOKIA_PCSUITE_ACM_INFO(0x046E), }, /* Nokia 6110 Navigator */
{ NOKIA_PCSUITE_ACM_INFO(0x002f), }, /* Nokia 6120 classic & */
{ NOKIA_PCSUITE_ACM_INFO(0x0088), }, /* Nokia 6121 classic */
{ NOKIA_PCSUITE_ACM_INFO(0x00fc), }, /* Nokia 6124 classic */
{ NOKIA_PCSUITE_ACM_INFO(0x0042), }, /* Nokia E51 */
{ NOKIA_PCSUITE_ACM_INFO(0x00b0), }, /* Nokia E66 */
{ NOKIA_PCSUITE_ACM_INFO(0x00ab), }, /* Nokia E71 */
{ NOKIA_PCSUITE_ACM_INFO(0x0481), }, /* Nokia N76 */
{ NOKIA_PCSUITE_ACM_INFO(0x0007), }, /* Nokia N81 & N81 8GB */
{ NOKIA_PCSUITE_ACM_INFO(0x0071), }, /* Nokia N82 */
{ NOKIA_PCSUITE_ACM_INFO(0x04F0), }, /* Nokia N95 & N95-3 NAM */
{ NOKIA_PCSUITE_ACM_INFO(0x0070), }, /* Nokia N95 8GB */
{ NOKIA_PCSUITE_ACM_INFO(0x0099), }, /* Nokia 6210 Navigator, RM-367 */
{ NOKIA_PCSUITE_ACM_INFO(0x0128), }, /* Nokia 6210 Navigator, RM-419 */
{ NOKIA_PCSUITE_ACM_INFO(0x008f), }, /* Nokia 6220 Classic */
{ NOKIA_PCSUITE_ACM_INFO(0x00a0), }, /* Nokia 6650 */
{ NOKIA_PCSUITE_ACM_INFO(0x007b), }, /* Nokia N78 */
{ NOKIA_PCSUITE_ACM_INFO(0x0094), }, /* Nokia N85 */
{ NOKIA_PCSUITE_ACM_INFO(0x003a), }, /* Nokia N96 & N96-3 */
{ NOKIA_PCSUITE_ACM_INFO(0x00e9), }, /* Nokia 5320 XpressMusic */
{ NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */
{ NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */
{ NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */
{ NOKIA_PCSUITE_ACM_INFO(0x0178), }, /* Nokia E63 */
{ NOKIA_PCSUITE_ACM_INFO(0x010e), }, /* Nokia E75 */
{ NOKIA_PCSUITE_ACM_INFO(0x02d9), }, /* Nokia 6760 Slide */
{ NOKIA_PCSUITE_ACM_INFO(0x01d0), }, /* Nokia E52 */
{ NOKIA_PCSUITE_ACM_INFO(0x0223), }, /* Nokia E72 */
{ NOKIA_PCSUITE_ACM_INFO(0x0275), }, /* Nokia X6 */
{ NOKIA_PCSUITE_ACM_INFO(0x026c), }, /* Nokia N97 Mini */
{ NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
{ NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
{ NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
{ NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */
{ NOKIA_PCSUITE_ACM_INFO(0x0335), }, /* Nokia E7 */
{ NOKIA_PCSUITE_ACM_INFO(0x03cd), }, /* Nokia C7 */
{ SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
/* Support for Owen devices */
{ USB_DEVICE(0x03eb, 0x0030), }, /* Owen SI30 */
/* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
#if IS_ENABLED(CONFIG_INPUT_IMS_PCU)
{ USB_DEVICE(0x04d8, 0x0082), /* Application mode */
.driver_info = IGNORE_DEVICE,
},
{ USB_DEVICE(0x04d8, 0x0083), /* Bootloader mode */
.driver_info = IGNORE_DEVICE,
},
#endif
#if IS_ENABLED(CONFIG_IR_TOY)
{ USB_DEVICE(0x04d8, 0xfd08),
.driver_info = IGNORE_DEVICE,
},
{ USB_DEVICE(0x04d8, 0xf58b),
.driver_info = IGNORE_DEVICE,
},
#endif
#if IS_ENABLED(CONFIG_USB_SERIAL_XR)
{ USB_DEVICE(0x04e2, 0x1400), .driver_info = IGNORE_DEVICE },
{ USB_DEVICE(0x04e2, 0x1401), .driver_info = IGNORE_DEVICE },
{ USB_DEVICE(0x04e2, 0x1402), .driver_info = IGNORE_DEVICE },
{ USB_DEVICE(0x04e2, 0x1403), .driver_info = IGNORE_DEVICE },
{ USB_DEVICE(0x04e2, 0x1410), .driver_info = IGNORE_DEVICE },
{ USB_DEVICE(0x04e2, 0x1411), .driver_info = IGNORE_DEVICE },
{ USB_DEVICE(0x04e2, 0x1412), .driver_info = IGNORE_DEVICE },
{ USB_DEVICE(0x04e2, 0x1414), .driver_info = IGNORE_DEVICE },
{ USB_DEVICE(0x04e2, 0x1420), .driver_info = IGNORE_DEVICE },
{ USB_DEVICE(0x04e2, 0x1422), .driver_info = IGNORE_DEVICE },
{ USB_DEVICE(0x04e2, 0x1424), .driver_info = IGNORE_DEVICE },
#endif
/*Samsung phone in firmware update mode */
{ USB_DEVICE(0x04e8, 0x685d),
.driver_info = IGNORE_DEVICE,
},
/* Exclude Infineon Flash Loader utility */
{ USB_DEVICE(0x058b, 0x0041),
.driver_info = IGNORE_DEVICE,
},
/* Exclude ETAS ES58x */
{ USB_DEVICE(0x108c, 0x0159), /* ES581.4 */
.driver_info = IGNORE_DEVICE,
},
{ USB_DEVICE(0x108c, 0x0168), /* ES582.1 */
.driver_info = IGNORE_DEVICE,
},
{ USB_DEVICE(0x108c, 0x0169), /* ES584.1 */
.driver_info = IGNORE_DEVICE,
},
{ USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */
.driver_info = SEND_ZERO_PACKET,
},
{ USB_DEVICE(0x1bc7, 0x0023), /* Telit 3G ACM + ECM composition */
.driver_info = SEND_ZERO_PACKET,
},
/* Exclude Goodix Fingerprint Reader */
{ USB_DEVICE(0x27c6, 0x5395),
.driver_info = IGNORE_DEVICE,
},
/* Exclude Heimann Sensor GmbH USB appset demo */
{ USB_DEVICE(0x32a7, 0x0000),
.driver_info = IGNORE_DEVICE,
},
/* control interfaces without any protocol set */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_PROTO_NONE) },
/* control interfaces with various AT-command sets */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_V25TER) },
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_PCCA101) },
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_PCCA101_WAKE) },
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_GSM) },
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_3G) },
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_CDMA) },
{ USB_DEVICE(0x1519, 0x0452), /* Intel 7260 modem */
.driver_info = SEND_ZERO_PACKET,
},
{ }
};
MODULE_DEVICE_TABLE(usb, acm_ids);
static struct usb_driver acm_driver = {
.name = "cdc_acm",
.probe = acm_probe,
.disconnect = acm_disconnect,
#ifdef CONFIG_PM
.suspend = acm_suspend,
.resume = acm_resume,
.reset_resume = acm_reset_resume,
#endif
.pre_reset = acm_pre_reset,
.id_table = acm_ids,
#ifdef CONFIG_PM
.supports_autosuspend = 1,
#endif
.disable_hub_initiated_lpm = 1,
};
/*
* TTY driver structures.
*/
static const struct tty_operations acm_ops = {
.install = acm_tty_install,
.open = acm_tty_open,
.close = acm_tty_close,
.cleanup = acm_tty_cleanup,
.hangup = acm_tty_hangup,
.write = acm_tty_write,
.write_room = acm_tty_write_room,
.flush_buffer = acm_tty_flush_buffer,
.ioctl = acm_tty_ioctl,
.throttle = acm_tty_throttle,
.unthrottle = acm_tty_unthrottle,
.chars_in_buffer = acm_tty_chars_in_buffer,
.break_ctl = acm_tty_break_ctl,
.set_termios = acm_tty_set_termios,
.tiocmget = acm_tty_tiocmget,
.tiocmset = acm_tty_tiocmset,
.get_serial = get_serial_info,
.set_serial = set_serial_info,
.get_icount = acm_tty_get_icount,
};
/*
* Init / exit.
*/
static int __init acm_init(void)
{
int retval;
acm_tty_driver = tty_alloc_driver(ACM_TTY_MINORS, TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(acm_tty_driver))
return PTR_ERR(acm_tty_driver);
acm_tty_driver->driver_name = "acm",
acm_tty_driver->name = "ttyACM",
acm_tty_driver->major = ACM_TTY_MAJOR,
acm_tty_driver->minor_start = 0,
acm_tty_driver->type = TTY_DRIVER_TYPE_SERIAL,
acm_tty_driver->subtype = SERIAL_TYPE_NORMAL,
acm_tty_driver->init_termios = tty_std_termios;
acm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD |
HUPCL | CLOCAL;
tty_set_operations(acm_tty_driver, &acm_ops);
retval = tty_register_driver(acm_tty_driver);
if (retval) {
tty_driver_kref_put(acm_tty_driver);
return retval;
}
retval = usb_register(&acm_driver);
if (retval) {
tty_unregister_driver(acm_tty_driver);
tty_driver_kref_put(acm_tty_driver);
return retval;
}
printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_DESC "\n");
return 0;
}
static void __exit acm_exit(void)
{
usb_deregister(&acm_driver);
tty_unregister_driver(acm_tty_driver);
tty_driver_kref_put(acm_tty_driver);
idr_destroy(&acm_minors);
}
module_init(acm_init);
module_exit(acm_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(ACM_TTY_MAJOR);
| linux-master | drivers/usb/class/cdc-acm.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* usblp.c
*
* Copyright (c) 1999 Michael Gee <[email protected]>
* Copyright (c) 1999 Pavel Machek <[email protected]>
* Copyright (c) 2000 Randy Dunlap <[email protected]>
* Copyright (c) 2000 Vojtech Pavlik <[email protected]>
# Copyright (c) 2001 Pete Zaitcev <[email protected]>
# Copyright (c) 2001 David Paschal <[email protected]>
* Copyright (c) 2006 Oliver Neukum <[email protected]>
*
* USB Printer Device Class driver for USB printers and printer cables
*
* Sponsored by SuSE
*
* ChangeLog:
* v0.1 - thorough cleaning, URBification, almost a rewrite
* v0.2 - some more cleanups
* v0.3 - cleaner again, waitqueue fixes
* v0.4 - fixes in unidirectional mode
* v0.5 - add DEVICE_ID string support
* v0.6 - never time out
* v0.7 - fixed bulk-IN read and poll (David Paschal)
* v0.8 - add devfs support
* v0.9 - fix unplug-while-open paths
* v0.10- remove sleep_on, fix error on oom ([email protected])
* v0.11 - add proto_bias option (Pete Zaitcev)
* v0.12 - add hpoj.sourceforge.net ioctls (David Paschal)
* v0.13 - alloc space for statusbuf (<status> not on stack);
* use usb_alloc_coherent() for read buf & write buf;
* none - Maintained in Linux kernel after v0.13
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/signal.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/lp.h>
#include <linux/mutex.h>
#undef DEBUG
#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/ratelimit.h>
/*
* Version Information
*/
#define DRIVER_AUTHOR "Michael Gee, Pavel Machek, Vojtech Pavlik, Randy Dunlap, Pete Zaitcev, David Paschal"
#define DRIVER_DESC "USB Printer Device Class driver"
#define USBLP_BUF_SIZE 8192
#define USBLP_BUF_SIZE_IN 1024
#define USBLP_DEVICE_ID_SIZE 1024
/* ioctls: */
#define IOCNR_GET_DEVICE_ID 1
#define IOCNR_GET_PROTOCOLS 2
#define IOCNR_SET_PROTOCOL 3
#define IOCNR_HP_SET_CHANNEL 4
#define IOCNR_GET_BUS_ADDRESS 5
#define IOCNR_GET_VID_PID 6
#define IOCNR_SOFT_RESET 7
/* Get device_id string: */
#define LPIOC_GET_DEVICE_ID(len) _IOC(_IOC_READ, 'P', IOCNR_GET_DEVICE_ID, len)
/* The following ioctls were added for http://hpoj.sourceforge.net:
* Get two-int array:
* [0]=current protocol
* (1=USB_CLASS_PRINTER/1/1, 2=USB_CLASS_PRINTER/1/2,
* 3=USB_CLASS_PRINTER/1/3),
* [1]=supported protocol mask (mask&(1<<n)!=0 means
* USB_CLASS_PRINTER/1/n supported):
*/
#define LPIOC_GET_PROTOCOLS(len) _IOC(_IOC_READ, 'P', IOCNR_GET_PROTOCOLS, len)
/*
* Set protocol
* (arg: 1=USB_CLASS_PRINTER/1/1, 2=USB_CLASS_PRINTER/1/2,
* 3=USB_CLASS_PRINTER/1/3):
*/
#define LPIOC_SET_PROTOCOL _IOC(_IOC_WRITE, 'P', IOCNR_SET_PROTOCOL, 0)
/* Set channel number (HP Vendor-specific command): */
#define LPIOC_HP_SET_CHANNEL _IOC(_IOC_WRITE, 'P', IOCNR_HP_SET_CHANNEL, 0)
/* Get two-int array: [0]=bus number, [1]=device address: */
#define LPIOC_GET_BUS_ADDRESS(len) _IOC(_IOC_READ, 'P', IOCNR_GET_BUS_ADDRESS, len)
/* Get two-int array: [0]=vendor ID, [1]=product ID: */
#define LPIOC_GET_VID_PID(len) _IOC(_IOC_READ, 'P', IOCNR_GET_VID_PID, len)
/* Perform class specific soft reset */
#define LPIOC_SOFT_RESET _IOC(_IOC_NONE, 'P', IOCNR_SOFT_RESET, 0);
/*
* A DEVICE_ID string may include the printer's serial number.
* It should end with a semi-colon (';').
* An example from an HP 970C DeskJet printer is (this is one long string,
* with the serial number changed):
MFG:HEWLETT-PACKARD;MDL:DESKJET 970C;CMD:MLC,PCL,PML;CLASS:PRINTER;DESCRIPTION:Hewlett-Packard DeskJet 970C;SERN:US970CSEPROF;VSTATUS:$HB0$NC0,ff,DN,IDLE,CUT,K1,C0,DP,NR,KP000,CP027;VP:0800,FL,B0;VJ: ;
*/
/*
* USB Printer Requests
*/
#define USBLP_REQ_GET_ID 0x00
#define USBLP_REQ_GET_STATUS 0x01
#define USBLP_REQ_RESET 0x02
#define USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST 0x00 /* HP Vendor-specific */
#define USBLP_MINORS 16
#define USBLP_MINOR_BASE 0
#define USBLP_CTL_TIMEOUT 5000 /* 5 seconds */
#define USBLP_FIRST_PROTOCOL 1
#define USBLP_LAST_PROTOCOL 3
#define USBLP_MAX_PROTOCOLS (USBLP_LAST_PROTOCOL+1)
/*
* some arbitrary status buffer size;
* need a status buffer that is allocated via kmalloc(), not on stack
*/
#define STATUS_BUF_SIZE 8
/*
* Locks down the locking order:
* ->wmut locks wstatus.
* ->mut locks the whole usblp, except [rw]complete, and thus, by indirection,
* [rw]status. We only touch status when we know the side idle.
* ->lock locks what interrupt accesses.
*/
struct usblp {
struct usb_device *dev; /* USB device */
struct mutex wmut;
struct mutex mut;
spinlock_t lock; /* locks rcomplete, wcomplete */
char *readbuf; /* read transfer_buffer */
char *statusbuf; /* status transfer_buffer */
struct usb_anchor urbs;
wait_queue_head_t rwait, wwait;
int readcount; /* Counter for reads */
int ifnum; /* Interface number */
struct usb_interface *intf; /* The interface */
/*
* Alternate-setting numbers and endpoints for each protocol
* (USB_CLASS_PRINTER/1/{index=1,2,3}) that the device supports:
*/
struct {
int alt_setting;
struct usb_endpoint_descriptor *epwrite;
struct usb_endpoint_descriptor *epread;
} protocol[USBLP_MAX_PROTOCOLS];
int current_protocol;
int minor; /* minor number of device */
int wcomplete, rcomplete;
int wstatus; /* bytes written or error */
int rstatus; /* bytes ready or error */
unsigned int quirks; /* quirks flags */
unsigned int flags; /* mode flags */
unsigned char used; /* True if open */
unsigned char present; /* True if not disconnected */
unsigned char bidir; /* interface is bidirectional */
unsigned char no_paper; /* Paper Out happened */
unsigned char *device_id_string; /* IEEE 1284 DEVICE ID string (ptr) */
/* first 2 bytes are (big-endian) length */
};
#ifdef DEBUG
static void usblp_dump(struct usblp *usblp)
{
struct device *dev = &usblp->intf->dev;
int p;
dev_dbg(dev, "usblp=0x%p\n", usblp);
dev_dbg(dev, "dev=0x%p\n", usblp->dev);
dev_dbg(dev, "present=%d\n", usblp->present);
dev_dbg(dev, "readbuf=0x%p\n", usblp->readbuf);
dev_dbg(dev, "readcount=%d\n", usblp->readcount);
dev_dbg(dev, "ifnum=%d\n", usblp->ifnum);
for (p = USBLP_FIRST_PROTOCOL; p <= USBLP_LAST_PROTOCOL; p++) {
dev_dbg(dev, "protocol[%d].alt_setting=%d\n", p,
usblp->protocol[p].alt_setting);
dev_dbg(dev, "protocol[%d].epwrite=%p\n", p,
usblp->protocol[p].epwrite);
dev_dbg(dev, "protocol[%d].epread=%p\n", p,
usblp->protocol[p].epread);
}
dev_dbg(dev, "current_protocol=%d\n", usblp->current_protocol);
dev_dbg(dev, "minor=%d\n", usblp->minor);
dev_dbg(dev, "wstatus=%d\n", usblp->wstatus);
dev_dbg(dev, "rstatus=%d\n", usblp->rstatus);
dev_dbg(dev, "quirks=%d\n", usblp->quirks);
dev_dbg(dev, "used=%d\n", usblp->used);
dev_dbg(dev, "bidir=%d\n", usblp->bidir);
dev_dbg(dev, "device_id_string=\"%s\"\n",
usblp->device_id_string ?
usblp->device_id_string + 2 :
(unsigned char *)"(null)");
}
#endif
/* Quirks: various printer quirks are handled by this table & its flags. */
struct quirk_printer_struct {
__u16 vendorId;
__u16 productId;
unsigned int quirks;
};
#define USBLP_QUIRK_BIDIR 0x1 /* reports bidir but requires unidirectional mode (no INs/reads) */
#define USBLP_QUIRK_USB_INIT 0x2 /* needs vendor USB init string */
#define USBLP_QUIRK_BAD_CLASS 0x4 /* descriptor uses vendor-specific Class or SubClass */
static const struct quirk_printer_struct quirk_printers[] = {
{ 0x03f0, 0x0004, USBLP_QUIRK_BIDIR }, /* HP DeskJet 895C */
{ 0x03f0, 0x0104, USBLP_QUIRK_BIDIR }, /* HP DeskJet 880C */
{ 0x03f0, 0x0204, USBLP_QUIRK_BIDIR }, /* HP DeskJet 815C */
{ 0x03f0, 0x0304, USBLP_QUIRK_BIDIR }, /* HP DeskJet 810C/812C */
{ 0x03f0, 0x0404, USBLP_QUIRK_BIDIR }, /* HP DeskJet 830C */
{ 0x03f0, 0x0504, USBLP_QUIRK_BIDIR }, /* HP DeskJet 885C */
{ 0x03f0, 0x0604, USBLP_QUIRK_BIDIR }, /* HP DeskJet 840C */
{ 0x03f0, 0x0804, USBLP_QUIRK_BIDIR }, /* HP DeskJet 816C */
{ 0x03f0, 0x1104, USBLP_QUIRK_BIDIR }, /* HP Deskjet 959C */
{ 0x0409, 0xefbe, USBLP_QUIRK_BIDIR }, /* NEC Picty900 (HP OEM) */
{ 0x0409, 0xbef4, USBLP_QUIRK_BIDIR }, /* NEC Picty760 (HP OEM) */
{ 0x0409, 0xf0be, USBLP_QUIRK_BIDIR }, /* NEC Picty920 (HP OEM) */
{ 0x0409, 0xf1be, USBLP_QUIRK_BIDIR }, /* NEC Picty800 (HP OEM) */
{ 0x0482, 0x0010, USBLP_QUIRK_BIDIR }, /* Kyocera Mita FS 820, by zut <[email protected]> */
{ 0x04f9, 0x000d, USBLP_QUIRK_BIDIR }, /* Brother Industries, Ltd HL-1440 Laser Printer */
{ 0x04b8, 0x0202, USBLP_QUIRK_BAD_CLASS }, /* Seiko Epson Receipt Printer M129C */
{ 0, 0 }
};
static int usblp_wwait(struct usblp *usblp, int nonblock);
static int usblp_wtest(struct usblp *usblp, int nonblock);
static int usblp_rwait_and_lock(struct usblp *usblp, int nonblock);
static int usblp_rtest(struct usblp *usblp, int nonblock);
static int usblp_submit_read(struct usblp *usblp);
static int usblp_select_alts(struct usblp *usblp);
static int usblp_set_protocol(struct usblp *usblp, int protocol);
static int usblp_cache_device_id_string(struct usblp *usblp);
/* forward reference to make our lives easier */
static struct usb_driver usblp_driver;
static DEFINE_MUTEX(usblp_mutex); /* locks the existence of usblp's */
/*
* Functions for usblp control messages.
*/
static int usblp_ctrl_msg(struct usblp *usblp, int request, int type, int dir, int recip, int value, void *buf, int len)
{
int retval;
int index = usblp->ifnum;
/* High byte has the interface index.
Low byte has the alternate setting.
*/
if ((request == USBLP_REQ_GET_ID) && (type == USB_TYPE_CLASS))
index = (usblp->ifnum<<8)|usblp->protocol[usblp->current_protocol].alt_setting;
retval = usb_control_msg(usblp->dev,
dir ? usb_rcvctrlpipe(usblp->dev, 0) : usb_sndctrlpipe(usblp->dev, 0),
request, type | dir | recip, value, index, buf, len, USBLP_CTL_TIMEOUT);
dev_dbg(&usblp->intf->dev,
"usblp_control_msg: rq: 0x%02x dir: %d recip: %d value: %d idx: %d len: %#x result: %d\n",
request, !!dir, recip, value, index, len, retval);
return retval < 0 ? retval : 0;
}
#define usblp_read_status(usblp, status)\
usblp_ctrl_msg(usblp, USBLP_REQ_GET_STATUS, USB_TYPE_CLASS, USB_DIR_IN, USB_RECIP_INTERFACE, 0, status, 1)
#define usblp_get_id(usblp, config, id, maxlen)\
usblp_ctrl_msg(usblp, USBLP_REQ_GET_ID, USB_TYPE_CLASS, USB_DIR_IN, USB_RECIP_INTERFACE, config, id, maxlen)
#define usblp_reset(usblp)\
usblp_ctrl_msg(usblp, USBLP_REQ_RESET, USB_TYPE_CLASS, USB_DIR_OUT, USB_RECIP_OTHER, 0, NULL, 0)
static int usblp_hp_channel_change_request(struct usblp *usblp, int channel, u8 *new_channel)
{
u8 *buf;
int ret;
buf = kzalloc(1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST,
USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE,
channel, buf, 1);
if (ret == 0)
*new_channel = buf[0];
kfree(buf);
return ret;
}
/*
* See the description for usblp_select_alts() below for the usage
* explanation. Look into your /sys/kernel/debug/usb/devices and dmesg in
* case of any trouble.
*/
static int proto_bias = -1;
/*
* URB callback.
*/
static void usblp_bulk_read(struct urb *urb)
{
struct usblp *usblp = urb->context;
int status = urb->status;
unsigned long flags;
if (usblp->present && usblp->used) {
if (status)
printk(KERN_WARNING "usblp%d: "
"nonzero read bulk status received: %d\n",
usblp->minor, status);
}
spin_lock_irqsave(&usblp->lock, flags);
if (status < 0)
usblp->rstatus = status;
else
usblp->rstatus = urb->actual_length;
usblp->rcomplete = 1;
wake_up(&usblp->rwait);
spin_unlock_irqrestore(&usblp->lock, flags);
usb_free_urb(urb);
}
static void usblp_bulk_write(struct urb *urb)
{
struct usblp *usblp = urb->context;
int status = urb->status;
unsigned long flags;
if (usblp->present && usblp->used) {
if (status)
printk(KERN_WARNING "usblp%d: "
"nonzero write bulk status received: %d\n",
usblp->minor, status);
}
spin_lock_irqsave(&usblp->lock, flags);
if (status < 0)
usblp->wstatus = status;
else
usblp->wstatus = urb->actual_length;
usblp->no_paper = 0;
usblp->wcomplete = 1;
wake_up(&usblp->wwait);
spin_unlock_irqrestore(&usblp->lock, flags);
usb_free_urb(urb);
}
/*
* Get and print printer errors.
*/
static const char *usblp_messages[] = { "ok", "out of paper", "off-line", "on fire" };
static int usblp_check_status(struct usblp *usblp, int err)
{
unsigned char status, newerr = 0;
int error;
mutex_lock(&usblp->mut);
if ((error = usblp_read_status(usblp, usblp->statusbuf)) < 0) {
mutex_unlock(&usblp->mut);
printk_ratelimited(KERN_ERR
"usblp%d: error %d reading printer status\n",
usblp->minor, error);
return 0;
}
status = *usblp->statusbuf;
mutex_unlock(&usblp->mut);
if (~status & LP_PERRORP)
newerr = 3;
if (status & LP_POUTPA)
newerr = 1;
if (~status & LP_PSELECD)
newerr = 2;
if (newerr != err) {
printk(KERN_INFO "usblp%d: %s\n",
usblp->minor, usblp_messages[newerr]);
}
return newerr;
}
static int handle_bidir(struct usblp *usblp)
{
if (usblp->bidir && usblp->used) {
if (usblp_submit_read(usblp) < 0)
return -EIO;
}
return 0;
}
/*
* File op functions.
*/
static int usblp_open(struct inode *inode, struct file *file)
{
int minor = iminor(inode);
struct usblp *usblp;
struct usb_interface *intf;
int retval;
if (minor < 0)
return -ENODEV;
mutex_lock(&usblp_mutex);
retval = -ENODEV;
intf = usb_find_interface(&usblp_driver, minor);
if (!intf)
goto out;
usblp = usb_get_intfdata(intf);
if (!usblp || !usblp->dev || !usblp->present)
goto out;
retval = -EBUSY;
if (usblp->used)
goto out;
/*
* We do not implement LP_ABORTOPEN/LPABORTOPEN for two reasons:
* - We do not want persistent state which close(2) does not clear
* - It is not used anyway, according to CUPS people
*/
retval = usb_autopm_get_interface(intf);
if (retval < 0)
goto out;
usblp->used = 1;
file->private_data = usblp;
usblp->wcomplete = 1; /* we begin writeable */
usblp->wstatus = 0;
usblp->rcomplete = 0;
if (handle_bidir(usblp) < 0) {
usb_autopm_put_interface(intf);
usblp->used = 0;
file->private_data = NULL;
retval = -EIO;
}
out:
mutex_unlock(&usblp_mutex);
return retval;
}
static void usblp_cleanup(struct usblp *usblp)
{
printk(KERN_INFO "usblp%d: removed\n", usblp->minor);
kfree(usblp->readbuf);
kfree(usblp->device_id_string);
kfree(usblp->statusbuf);
usb_put_intf(usblp->intf);
kfree(usblp);
}
static void usblp_unlink_urbs(struct usblp *usblp)
{
usb_kill_anchored_urbs(&usblp->urbs);
}
static int usblp_release(struct inode *inode, struct file *file)
{
struct usblp *usblp = file->private_data;
usblp->flags &= ~LP_ABORT;
mutex_lock(&usblp_mutex);
usblp->used = 0;
if (usblp->present)
usblp_unlink_urbs(usblp);
usb_autopm_put_interface(usblp->intf);
if (!usblp->present) /* finish cleanup from disconnect */
usblp_cleanup(usblp); /* any URBs must be dead */
mutex_unlock(&usblp_mutex);
return 0;
}
/* No kernel lock - fine */
static __poll_t usblp_poll(struct file *file, struct poll_table_struct *wait)
{
struct usblp *usblp = file->private_data;
__poll_t ret = 0;
unsigned long flags;
/* Should we check file->f_mode & FMODE_WRITE before poll_wait()? */
poll_wait(file, &usblp->rwait, wait);
poll_wait(file, &usblp->wwait, wait);
mutex_lock(&usblp->mut);
if (!usblp->present)
ret |= EPOLLHUP;
mutex_unlock(&usblp->mut);
spin_lock_irqsave(&usblp->lock, flags);
if (usblp->bidir && usblp->rcomplete)
ret |= EPOLLIN | EPOLLRDNORM;
if (usblp->no_paper || usblp->wcomplete)
ret |= EPOLLOUT | EPOLLWRNORM;
spin_unlock_irqrestore(&usblp->lock, flags);
return ret;
}
static long usblp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct usblp *usblp = file->private_data;
int length, err, i;
unsigned char newChannel;
int status;
int twoints[2];
int retval = 0;
mutex_lock(&usblp->mut);
if (!usblp->present) {
retval = -ENODEV;
goto done;
}
dev_dbg(&usblp->intf->dev,
"usblp_ioctl: cmd=0x%x (%c nr=%d len=%d dir=%d)\n", cmd,
_IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd), _IOC_DIR(cmd));
if (_IOC_TYPE(cmd) == 'P') /* new-style ioctl number */
switch (_IOC_NR(cmd)) {
case IOCNR_GET_DEVICE_ID: /* get the DEVICE_ID string */
if (_IOC_DIR(cmd) != _IOC_READ) {
retval = -EINVAL;
goto done;
}
length = usblp_cache_device_id_string(usblp);
if (length < 0) {
retval = length;
goto done;
}
if (length > _IOC_SIZE(cmd))
length = _IOC_SIZE(cmd); /* truncate */
if (copy_to_user((void __user *) arg,
usblp->device_id_string,
(unsigned long) length)) {
retval = -EFAULT;
goto done;
}
break;
case IOCNR_GET_PROTOCOLS:
if (_IOC_DIR(cmd) != _IOC_READ ||
_IOC_SIZE(cmd) < sizeof(twoints)) {
retval = -EINVAL;
goto done;
}
twoints[0] = usblp->current_protocol;
twoints[1] = 0;
for (i = USBLP_FIRST_PROTOCOL;
i <= USBLP_LAST_PROTOCOL; i++) {
if (usblp->protocol[i].alt_setting >= 0)
twoints[1] |= (1<<i);
}
if (copy_to_user((void __user *)arg,
(unsigned char *)twoints,
sizeof(twoints))) {
retval = -EFAULT;
goto done;
}
break;
case IOCNR_SET_PROTOCOL:
if (_IOC_DIR(cmd) != _IOC_WRITE) {
retval = -EINVAL;
goto done;
}
#ifdef DEBUG
if (arg == -10) {
usblp_dump(usblp);
break;
}
#endif
usblp_unlink_urbs(usblp);
retval = usblp_set_protocol(usblp, arg);
if (retval < 0) {
usblp_set_protocol(usblp,
usblp->current_protocol);
}
break;
case IOCNR_HP_SET_CHANNEL:
if (_IOC_DIR(cmd) != _IOC_WRITE ||
le16_to_cpu(usblp->dev->descriptor.idVendor) != 0x03F0 ||
usblp->quirks & USBLP_QUIRK_BIDIR) {
retval = -EINVAL;
goto done;
}
err = usblp_hp_channel_change_request(usblp,
arg, &newChannel);
if (err < 0) {
dev_err(&usblp->dev->dev,
"usblp%d: error = %d setting "
"HP channel\n",
usblp->minor, err);
retval = -EIO;
goto done;
}
dev_dbg(&usblp->intf->dev,
"usblp%d requested/got HP channel %ld/%d\n",
usblp->minor, arg, newChannel);
break;
case IOCNR_GET_BUS_ADDRESS:
if (_IOC_DIR(cmd) != _IOC_READ ||
_IOC_SIZE(cmd) < sizeof(twoints)) {
retval = -EINVAL;
goto done;
}
twoints[0] = usblp->dev->bus->busnum;
twoints[1] = usblp->dev->devnum;
if (copy_to_user((void __user *)arg,
(unsigned char *)twoints,
sizeof(twoints))) {
retval = -EFAULT;
goto done;
}
dev_dbg(&usblp->intf->dev,
"usblp%d is bus=%d, device=%d\n",
usblp->minor, twoints[0], twoints[1]);
break;
case IOCNR_GET_VID_PID:
if (_IOC_DIR(cmd) != _IOC_READ ||
_IOC_SIZE(cmd) < sizeof(twoints)) {
retval = -EINVAL;
goto done;
}
twoints[0] = le16_to_cpu(usblp->dev->descriptor.idVendor);
twoints[1] = le16_to_cpu(usblp->dev->descriptor.idProduct);
if (copy_to_user((void __user *)arg,
(unsigned char *)twoints,
sizeof(twoints))) {
retval = -EFAULT;
goto done;
}
dev_dbg(&usblp->intf->dev,
"usblp%d is VID=0x%4.4X, PID=0x%4.4X\n",
usblp->minor, twoints[0], twoints[1]);
break;
case IOCNR_SOFT_RESET:
if (_IOC_DIR(cmd) != _IOC_NONE) {
retval = -EINVAL;
goto done;
}
retval = usblp_reset(usblp);
break;
default:
retval = -ENOTTY;
}
else /* old-style ioctl value */
switch (cmd) {
case LPGETSTATUS:
retval = usblp_read_status(usblp, usblp->statusbuf);
if (retval) {
printk_ratelimited(KERN_ERR "usblp%d:"
"failed reading printer status (%d)\n",
usblp->minor, retval);
retval = -EIO;
goto done;
}
status = *usblp->statusbuf;
if (copy_to_user((void __user *)arg, &status, sizeof(int)))
retval = -EFAULT;
break;
case LPABORT:
if (arg)
usblp->flags |= LP_ABORT;
else
usblp->flags &= ~LP_ABORT;
break;
default:
retval = -ENOTTY;
}
done:
mutex_unlock(&usblp->mut);
return retval;
}
static struct urb *usblp_new_writeurb(struct usblp *usblp, int transfer_length)
{
struct urb *urb;
char *writebuf;
writebuf = kmalloc(transfer_length, GFP_KERNEL);
if (writebuf == NULL)
return NULL;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (urb == NULL) {
kfree(writebuf);
return NULL;
}
usb_fill_bulk_urb(urb, usblp->dev,
usb_sndbulkpipe(usblp->dev,
usblp->protocol[usblp->current_protocol].epwrite->bEndpointAddress),
writebuf, transfer_length, usblp_bulk_write, usblp);
urb->transfer_flags |= URB_FREE_BUFFER;
return urb;
}
static ssize_t usblp_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
struct usblp *usblp = file->private_data;
struct urb *writeurb;
int rv;
int transfer_length;
ssize_t writecount = 0;
if (mutex_lock_interruptible(&usblp->wmut)) {
rv = -EINTR;
goto raise_biglock;
}
if ((rv = usblp_wwait(usblp, !!(file->f_flags & O_NONBLOCK))) < 0)
goto raise_wait;
while (writecount < count) {
/*
* Step 1: Submit next block.
*/
if ((transfer_length = count - writecount) > USBLP_BUF_SIZE)
transfer_length = USBLP_BUF_SIZE;
rv = -ENOMEM;
writeurb = usblp_new_writeurb(usblp, transfer_length);
if (writeurb == NULL)
goto raise_urb;
usb_anchor_urb(writeurb, &usblp->urbs);
if (copy_from_user(writeurb->transfer_buffer,
buffer + writecount, transfer_length)) {
rv = -EFAULT;
goto raise_badaddr;
}
spin_lock_irq(&usblp->lock);
usblp->wcomplete = 0;
spin_unlock_irq(&usblp->lock);
if ((rv = usb_submit_urb(writeurb, GFP_KERNEL)) < 0) {
usblp->wstatus = 0;
spin_lock_irq(&usblp->lock);
usblp->no_paper = 0;
usblp->wcomplete = 1;
wake_up(&usblp->wwait);
spin_unlock_irq(&usblp->lock);
if (rv != -ENOMEM)
rv = -EIO;
goto raise_submit;
}
/*
* Step 2: Wait for transfer to end, collect results.
*/
rv = usblp_wwait(usblp, !!(file->f_flags&O_NONBLOCK));
if (rv < 0) {
if (rv == -EAGAIN) {
/* Presume that it's going to complete well. */
writecount += transfer_length;
}
if (rv == -ENOSPC) {
spin_lock_irq(&usblp->lock);
usblp->no_paper = 1; /* Mark for poll(2) */
spin_unlock_irq(&usblp->lock);
writecount += transfer_length;
}
/* Leave URB dangling, to be cleaned on close. */
goto collect_error;
}
if (usblp->wstatus < 0) {
rv = -EIO;
goto collect_error;
}
/*
* This is critical: it must be our URB, not other writer's.
* The wmut exists mainly to cover us here.
*/
writecount += usblp->wstatus;
}
mutex_unlock(&usblp->wmut);
return writecount;
raise_submit:
raise_badaddr:
usb_unanchor_urb(writeurb);
usb_free_urb(writeurb);
raise_urb:
raise_wait:
collect_error: /* Out of raise sequence */
mutex_unlock(&usblp->wmut);
raise_biglock:
return writecount ? writecount : rv;
}
/*
* Notice that we fail to restart in a few cases: on EFAULT, on restart
* error, etc. This is the historical behaviour. In all such cases we return
* EIO, and applications loop in order to get the new read going.
*/
static ssize_t usblp_read(struct file *file, char __user *buffer, size_t len, loff_t *ppos)
{
struct usblp *usblp = file->private_data;
ssize_t count;
ssize_t avail;
int rv;
if (!usblp->bidir)
return -EINVAL;
rv = usblp_rwait_and_lock(usblp, !!(file->f_flags & O_NONBLOCK));
if (rv < 0)
return rv;
if (!usblp->present) {
count = -ENODEV;
goto done;
}
if ((avail = usblp->rstatus) < 0) {
printk(KERN_ERR "usblp%d: error %d reading from printer\n",
usblp->minor, (int)avail);
usblp_submit_read(usblp);
count = -EIO;
goto done;
}
count = len < avail - usblp->readcount ? len : avail - usblp->readcount;
if (count != 0 &&
copy_to_user(buffer, usblp->readbuf + usblp->readcount, count)) {
count = -EFAULT;
goto done;
}
if ((usblp->readcount += count) == avail) {
if (usblp_submit_read(usblp) < 0) {
/* We don't want to leak USB return codes into errno. */
if (count == 0)
count = -EIO;
goto done;
}
}
done:
mutex_unlock(&usblp->mut);
return count;
}
/*
* Wait for the write path to come idle.
* This is called under the ->wmut, so the idle path stays idle.
*
* Our write path has a peculiar property: it does not buffer like a tty,
* but waits for the write to succeed. This allows our ->release to bug out
* without waiting for writes to drain. But it obviously does not work
* when O_NONBLOCK is set. So, applications setting O_NONBLOCK must use
* select(2) or poll(2) to wait for the buffer to drain before closing.
* Alternatively, set blocking mode with fcntl and issue a zero-size write.
*/
static int usblp_wwait(struct usblp *usblp, int nonblock)
{
DECLARE_WAITQUEUE(waita, current);
int rc;
int err = 0;
add_wait_queue(&usblp->wwait, &waita);
for (;;) {
if (mutex_lock_interruptible(&usblp->mut)) {
rc = -EINTR;
break;
}
set_current_state(TASK_INTERRUPTIBLE);
rc = usblp_wtest(usblp, nonblock);
mutex_unlock(&usblp->mut);
if (rc <= 0)
break;
if (schedule_timeout(msecs_to_jiffies(1500)) == 0) {
if (usblp->flags & LP_ABORT) {
err = usblp_check_status(usblp, err);
if (err == 1) { /* Paper out */
rc = -ENOSPC;
break;
}
} else {
/* Prod the printer, Gentoo#251237. */
mutex_lock(&usblp->mut);
usblp_read_status(usblp, usblp->statusbuf);
mutex_unlock(&usblp->mut);
}
}
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&usblp->wwait, &waita);
return rc;
}
static int usblp_wtest(struct usblp *usblp, int nonblock)
{
unsigned long flags;
if (!usblp->present)
return -ENODEV;
if (signal_pending(current))
return -EINTR;
spin_lock_irqsave(&usblp->lock, flags);
if (usblp->wcomplete) {
spin_unlock_irqrestore(&usblp->lock, flags);
return 0;
}
spin_unlock_irqrestore(&usblp->lock, flags);
if (nonblock)
return -EAGAIN;
return 1;
}
/*
* Wait for read bytes to become available. This probably should have been
* called usblp_r_lock_and_wait(), because we lock first. But it's a traditional
* name for functions which lock and return.
*
* We do not use wait_event_interruptible because it makes locking iffy.
*/
static int usblp_rwait_and_lock(struct usblp *usblp, int nonblock)
{
DECLARE_WAITQUEUE(waita, current);
int rc;
add_wait_queue(&usblp->rwait, &waita);
for (;;) {
if (mutex_lock_interruptible(&usblp->mut)) {
rc = -EINTR;
break;
}
set_current_state(TASK_INTERRUPTIBLE);
if ((rc = usblp_rtest(usblp, nonblock)) < 0) {
mutex_unlock(&usblp->mut);
break;
}
if (rc == 0) /* Keep it locked */
break;
mutex_unlock(&usblp->mut);
schedule();
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&usblp->rwait, &waita);
return rc;
}
static int usblp_rtest(struct usblp *usblp, int nonblock)
{
unsigned long flags;
if (!usblp->present)
return -ENODEV;
if (signal_pending(current))
return -EINTR;
spin_lock_irqsave(&usblp->lock, flags);
if (usblp->rcomplete) {
spin_unlock_irqrestore(&usblp->lock, flags);
return 0;
}
spin_unlock_irqrestore(&usblp->lock, flags);
if (nonblock)
return -EAGAIN;
return 1;
}
/*
* Please check ->bidir and other such things outside for now.
*/
static int usblp_submit_read(struct usblp *usblp)
{
struct urb *urb;
unsigned long flags;
int rc;
rc = -ENOMEM;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (urb == NULL)
goto raise_urb;
usb_fill_bulk_urb(urb, usblp->dev,
usb_rcvbulkpipe(usblp->dev,
usblp->protocol[usblp->current_protocol].epread->bEndpointAddress),
usblp->readbuf, USBLP_BUF_SIZE_IN,
usblp_bulk_read, usblp);
usb_anchor_urb(urb, &usblp->urbs);
spin_lock_irqsave(&usblp->lock, flags);
usblp->readcount = 0; /* XXX Why here? */
usblp->rcomplete = 0;
spin_unlock_irqrestore(&usblp->lock, flags);
if ((rc = usb_submit_urb(urb, GFP_KERNEL)) < 0) {
dev_dbg(&usblp->intf->dev, "error submitting urb (%d)\n", rc);
spin_lock_irqsave(&usblp->lock, flags);
usblp->rstatus = rc;
usblp->rcomplete = 1;
spin_unlock_irqrestore(&usblp->lock, flags);
goto raise_submit;
}
return 0;
raise_submit:
usb_unanchor_urb(urb);
usb_free_urb(urb);
raise_urb:
return rc;
}
/*
* Checks for printers that have quirks, such as requiring unidirectional
* communication but reporting bidirectional; currently some HP printers
* have this flaw (HP 810, 880, 895, etc.), or needing an init string
* sent at each open (like some Epsons).
* Returns 1 if found, 0 if not found.
*
* HP recommended that we use the bidirectional interface but
* don't attempt any bulk IN transfers from the IN endpoint.
* Here's some more detail on the problem:
* The problem is not that it isn't bidirectional though. The problem
* is that if you request a device ID, or status information, while
* the buffers are full, the return data will end up in the print data
* buffer. For example if you make sure you never request the device ID
* while you are sending print data, and you don't try to query the
* printer status every couple of milliseconds, you will probably be OK.
*/
static unsigned int usblp_quirks(__u16 vendor, __u16 product)
{
int i;
for (i = 0; quirk_printers[i].vendorId; i++) {
if (vendor == quirk_printers[i].vendorId &&
product == quirk_printers[i].productId)
return quirk_printers[i].quirks;
}
return 0;
}
static const struct file_operations usblp_fops = {
.owner = THIS_MODULE,
.read = usblp_read,
.write = usblp_write,
.poll = usblp_poll,
.unlocked_ioctl = usblp_ioctl,
.compat_ioctl = usblp_ioctl,
.open = usblp_open,
.release = usblp_release,
.llseek = noop_llseek,
};
static char *usblp_devnode(const struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
}
static struct usb_class_driver usblp_class = {
.name = "lp%d",
.devnode = usblp_devnode,
.fops = &usblp_fops,
.minor_base = USBLP_MINOR_BASE,
};
static ssize_t ieee1284_id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usblp *usblp = usb_get_intfdata(intf);
if (usblp->device_id_string[0] == 0 &&
usblp->device_id_string[1] == 0)
return 0;
return sprintf(buf, "%s", usblp->device_id_string+2);
}
static DEVICE_ATTR_RO(ieee1284_id);
static struct attribute *usblp_attrs[] = {
&dev_attr_ieee1284_id.attr,
NULL,
};
ATTRIBUTE_GROUPS(usblp);
static int usblp_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *dev = interface_to_usbdev(intf);
struct usblp *usblp;
int protocol;
int retval;
/* Malloc and start initializing usblp structure so we can use it
* directly. */
usblp = kzalloc(sizeof(struct usblp), GFP_KERNEL);
if (!usblp) {
retval = -ENOMEM;
goto abort_ret;
}
usblp->dev = dev;
mutex_init(&usblp->wmut);
mutex_init(&usblp->mut);
spin_lock_init(&usblp->lock);
init_waitqueue_head(&usblp->rwait);
init_waitqueue_head(&usblp->wwait);
init_usb_anchor(&usblp->urbs);
usblp->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
usblp->intf = usb_get_intf(intf);
/* Malloc device ID string buffer to the largest expected length,
* since we can re-query it on an ioctl and a dynamic string
* could change in length. */
if (!(usblp->device_id_string = kmalloc(USBLP_DEVICE_ID_SIZE, GFP_KERNEL))) {
retval = -ENOMEM;
goto abort;
}
/*
* Allocate read buffer. We somewhat wastefully
* malloc both regardless of bidirectionality, because the
* alternate setting can be changed later via an ioctl.
*/
if (!(usblp->readbuf = kmalloc(USBLP_BUF_SIZE_IN, GFP_KERNEL))) {
retval = -ENOMEM;
goto abort;
}
/* Allocate buffer for printer status */
usblp->statusbuf = kmalloc(STATUS_BUF_SIZE, GFP_KERNEL);
if (!usblp->statusbuf) {
retval = -ENOMEM;
goto abort;
}
/* Lookup quirks for this printer. */
usblp->quirks = usblp_quirks(
le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct));
/* Analyze and pick initial alternate settings and endpoints. */
protocol = usblp_select_alts(usblp);
if (protocol < 0) {
dev_dbg(&intf->dev,
"incompatible printer-class device 0x%4.4X/0x%4.4X\n",
le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct));
retval = -ENODEV;
goto abort;
}
/* Setup the selected alternate setting and endpoints. */
if (usblp_set_protocol(usblp, protocol) < 0) {
retval = -ENODEV; /* ->probe isn't ->ioctl */
goto abort;
}
/* Retrieve and store the device ID string. */
usblp_cache_device_id_string(usblp);
#ifdef DEBUG
usblp_check_status(usblp, 0);
#endif
usb_set_intfdata(intf, usblp);
usblp->present = 1;
retval = usb_register_dev(intf, &usblp_class);
if (retval) {
dev_err(&intf->dev,
"usblp: Not able to get a minor (base %u, slice default): %d\n",
USBLP_MINOR_BASE, retval);
goto abort_intfdata;
}
usblp->minor = intf->minor;
dev_info(&intf->dev,
"usblp%d: USB %sdirectional printer dev %d if %d alt %d proto %d vid 0x%4.4X pid 0x%4.4X\n",
usblp->minor, usblp->bidir ? "Bi" : "Uni", dev->devnum,
usblp->ifnum,
usblp->protocol[usblp->current_protocol].alt_setting,
usblp->current_protocol,
le16_to_cpu(usblp->dev->descriptor.idVendor),
le16_to_cpu(usblp->dev->descriptor.idProduct));
return 0;
abort_intfdata:
usb_set_intfdata(intf, NULL);
abort:
kfree(usblp->readbuf);
kfree(usblp->statusbuf);
kfree(usblp->device_id_string);
usb_put_intf(usblp->intf);
kfree(usblp);
abort_ret:
return retval;
}
/*
* We are a "new" style driver with usb_device_id table,
* but our requirements are too intricate for simple match to handle.
*
* The "proto_bias" option may be used to specify the preferred protocol
* for all USB printers (1=USB_CLASS_PRINTER/1/1, 2=USB_CLASS_PRINTER/1/2,
* 3=USB_CLASS_PRINTER/1/3). If the device supports the preferred protocol,
* then we bind to it.
*
* The best interface for us is USB_CLASS_PRINTER/1/2, because it
* is compatible with a stream of characters. If we find it, we bind to it.
*
* Note that the people from hpoj.sourceforge.net need to be able to
* bind to USB_CLASS_PRINTER/1/3 (MLC/1284.4), so we provide them ioctls
* for this purpose.
*
* Failing USB_CLASS_PRINTER/1/2, we look for USB_CLASS_PRINTER/1/3,
* even though it's probably not stream-compatible, because this matches
* the behaviour of the old code.
*
* If nothing else, we bind to USB_CLASS_PRINTER/1/1
* - the unidirectional interface.
*/
static int usblp_select_alts(struct usblp *usblp)
{
struct usb_interface *if_alt;
struct usb_host_interface *ifd;
struct usb_endpoint_descriptor *epwrite, *epread;
int p, i;
int res;
if_alt = usblp->intf;
for (p = 0; p < USBLP_MAX_PROTOCOLS; p++)
usblp->protocol[p].alt_setting = -1;
/* Find out what we have. */
for (i = 0; i < if_alt->num_altsetting; i++) {
ifd = &if_alt->altsetting[i];
if (ifd->desc.bInterfaceClass != USB_CLASS_PRINTER ||
ifd->desc.bInterfaceSubClass != 1)
if (!(usblp->quirks & USBLP_QUIRK_BAD_CLASS))
continue;
if (ifd->desc.bInterfaceProtocol < USBLP_FIRST_PROTOCOL ||
ifd->desc.bInterfaceProtocol > USBLP_LAST_PROTOCOL)
continue;
/* Look for the expected bulk endpoints. */
if (ifd->desc.bInterfaceProtocol > 1) {
res = usb_find_common_endpoints(ifd,
&epread, &epwrite, NULL, NULL);
} else {
epread = NULL;
res = usb_find_bulk_out_endpoint(ifd, &epwrite);
}
/* Ignore buggy hardware without the right endpoints. */
if (res)
continue;
/* Turn off reads for buggy bidirectional printers. */
if (usblp->quirks & USBLP_QUIRK_BIDIR) {
printk(KERN_INFO "usblp%d: Disabling reads from "
"problematic bidirectional printer\n",
usblp->minor);
epread = NULL;
}
usblp->protocol[ifd->desc.bInterfaceProtocol].alt_setting =
ifd->desc.bAlternateSetting;
usblp->protocol[ifd->desc.bInterfaceProtocol].epwrite = epwrite;
usblp->protocol[ifd->desc.bInterfaceProtocol].epread = epread;
}
/* If our requested protocol is supported, then use it. */
if (proto_bias >= USBLP_FIRST_PROTOCOL &&
proto_bias <= USBLP_LAST_PROTOCOL &&
usblp->protocol[proto_bias].alt_setting != -1)
return proto_bias;
/* Ordering is important here. */
if (usblp->protocol[2].alt_setting != -1)
return 2;
if (usblp->protocol[1].alt_setting != -1)
return 1;
if (usblp->protocol[3].alt_setting != -1)
return 3;
/* If nothing is available, then don't bind to this device. */
return -1;
}
static int usblp_set_protocol(struct usblp *usblp, int protocol)
{
int r, alts;
if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL)
return -EINVAL;
/* Don't unnecessarily set the interface if there's a single alt. */
if (usblp->intf->num_altsetting > 1) {
alts = usblp->protocol[protocol].alt_setting;
if (alts < 0)
return -EINVAL;
r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
if (r < 0) {
printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
alts, usblp->ifnum);
return r;
}
}
usblp->bidir = (usblp->protocol[protocol].epread != NULL);
usblp->current_protocol = protocol;
dev_dbg(&usblp->intf->dev, "usblp%d set protocol %d\n",
usblp->minor, protocol);
return 0;
}
/* Retrieves and caches device ID string.
* Returns length, including length bytes but not null terminator.
* On error, returns a negative errno value. */
static int usblp_cache_device_id_string(struct usblp *usblp)
{
int err, length;
err = usblp_get_id(usblp, 0, usblp->device_id_string, USBLP_DEVICE_ID_SIZE - 1);
if (err < 0) {
dev_dbg(&usblp->intf->dev,
"usblp%d: error = %d reading IEEE-1284 Device ID string\n",
usblp->minor, err);
usblp->device_id_string[0] = usblp->device_id_string[1] = '\0';
return -EIO;
}
/* First two bytes are length in big-endian.
* They count themselves, and we copy them into
* the user's buffer. */
length = be16_to_cpu(*((__be16 *)usblp->device_id_string));
if (length < 2)
length = 2;
else if (length >= USBLP_DEVICE_ID_SIZE)
length = USBLP_DEVICE_ID_SIZE - 1;
usblp->device_id_string[length] = '\0';
dev_dbg(&usblp->intf->dev, "usblp%d Device ID string [len=%d]=\"%s\"\n",
usblp->minor, length, &usblp->device_id_string[2]);
return length;
}
static void usblp_disconnect(struct usb_interface *intf)
{
struct usblp *usblp = usb_get_intfdata(intf);
usb_deregister_dev(intf, &usblp_class);
if (!usblp || !usblp->dev) {
dev_err(&intf->dev, "bogus disconnect\n");
BUG();
}
mutex_lock(&usblp_mutex);
mutex_lock(&usblp->mut);
usblp->present = 0;
wake_up(&usblp->wwait);
wake_up(&usblp->rwait);
usb_set_intfdata(intf, NULL);
usblp_unlink_urbs(usblp);
mutex_unlock(&usblp->mut);
usb_poison_anchored_urbs(&usblp->urbs);
if (!usblp->used)
usblp_cleanup(usblp);
mutex_unlock(&usblp_mutex);
}
static int usblp_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usblp *usblp = usb_get_intfdata(intf);
usblp_unlink_urbs(usblp);
#if 0 /* XXX Do we want this? What if someone is reading, should we fail? */
/* not strictly necessary, but just in case */
wake_up(&usblp->wwait);
wake_up(&usblp->rwait);
#endif
return 0;
}
static int usblp_resume(struct usb_interface *intf)
{
struct usblp *usblp = usb_get_intfdata(intf);
int r;
r = handle_bidir(usblp);
return r;
}
static const struct usb_device_id usblp_ids[] = {
{ USB_DEVICE_INFO(USB_CLASS_PRINTER, 1, 1) },
{ USB_DEVICE_INFO(USB_CLASS_PRINTER, 1, 2) },
{ USB_DEVICE_INFO(USB_CLASS_PRINTER, 1, 3) },
{ USB_INTERFACE_INFO(USB_CLASS_PRINTER, 1, 1) },
{ USB_INTERFACE_INFO(USB_CLASS_PRINTER, 1, 2) },
{ USB_INTERFACE_INFO(USB_CLASS_PRINTER, 1, 3) },
{ USB_DEVICE(0x04b8, 0x0202) }, /* Seiko Epson Receipt Printer M129C */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, usblp_ids);
static struct usb_driver usblp_driver = {
.name = "usblp",
.probe = usblp_probe,
.disconnect = usblp_disconnect,
.suspend = usblp_suspend,
.resume = usblp_resume,
.id_table = usblp_ids,
.dev_groups = usblp_groups,
.supports_autosuspend = 1,
};
module_usb_driver(usblp_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
module_param(proto_bias, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(proto_bias, "Favourite protocol number");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/class/usblp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* cdc-wdm.c
*
* This driver supports USB CDC WCM Device Management.
*
* Copyright (c) 2007-2009 Oliver Neukum
*
* Some code taken from cdc-acm.c
*
* Released under the GPLv2.
*
* Many thanks to Carl Nordbeck
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/ioctl.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/bitops.h>
#include <linux/poll.h>
#include <linux/skbuff.h>
#include <linux/usb.h>
#include <linux/usb/cdc.h>
#include <linux/wwan.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#include <linux/usb/cdc-wdm.h>
#define DRIVER_AUTHOR "Oliver Neukum"
#define DRIVER_DESC "USB Abstract Control Model driver for USB WCM Device Management"
static const struct usb_device_id wdm_ids[] = {
{
.match_flags = USB_DEVICE_ID_MATCH_INT_CLASS |
USB_DEVICE_ID_MATCH_INT_SUBCLASS,
.bInterfaceClass = USB_CLASS_COMM,
.bInterfaceSubClass = USB_CDC_SUBCLASS_DMM
},
{ }
};
MODULE_DEVICE_TABLE (usb, wdm_ids);
#define WDM_MINOR_BASE 176
#define WDM_IN_USE 1
#define WDM_DISCONNECTING 2
#define WDM_RESULT 3
#define WDM_READ 4
#define WDM_INT_STALL 5
#define WDM_POLL_RUNNING 6
#define WDM_RESPONDING 7
#define WDM_SUSPENDING 8
#define WDM_RESETTING 9
#define WDM_OVERFLOW 10
#define WDM_WWAN_IN_USE 11
#define WDM_MAX 16
/* we cannot wait forever at flush() */
#define WDM_FLUSH_TIMEOUT (30 * HZ)
/* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 decimal (0x100)" */
#define WDM_DEFAULT_BUFSIZE 256
static DEFINE_MUTEX(wdm_mutex);
static DEFINE_SPINLOCK(wdm_device_list_lock);
static LIST_HEAD(wdm_device_list);
/* --- method tables --- */
struct wdm_device {
u8 *inbuf; /* buffer for response */
u8 *outbuf; /* buffer for command */
u8 *sbuf; /* buffer for status */
u8 *ubuf; /* buffer for copy to user space */
struct urb *command;
struct urb *response;
struct urb *validity;
struct usb_interface *intf;
struct usb_ctrlrequest *orq;
struct usb_ctrlrequest *irq;
spinlock_t iuspin;
unsigned long flags;
u16 bufsize;
u16 wMaxCommand;
u16 wMaxPacketSize;
__le16 inum;
int reslength;
int length;
int read;
int count;
dma_addr_t shandle;
dma_addr_t ihandle;
struct mutex wlock;
struct mutex rlock;
wait_queue_head_t wait;
struct work_struct rxwork;
struct work_struct service_outs_intr;
int werr;
int rerr;
int resp_count;
struct list_head device_list;
int (*manage_power)(struct usb_interface *, int);
enum wwan_port_type wwanp_type;
struct wwan_port *wwanp;
};
static struct usb_driver wdm_driver;
/* return intfdata if we own the interface, else look up intf in the list */
static struct wdm_device *wdm_find_device(struct usb_interface *intf)
{
struct wdm_device *desc;
spin_lock(&wdm_device_list_lock);
list_for_each_entry(desc, &wdm_device_list, device_list)
if (desc->intf == intf)
goto found;
desc = NULL;
found:
spin_unlock(&wdm_device_list_lock);
return desc;
}
static struct wdm_device *wdm_find_device_by_minor(int minor)
{
struct wdm_device *desc;
spin_lock(&wdm_device_list_lock);
list_for_each_entry(desc, &wdm_device_list, device_list)
if (desc->intf->minor == minor)
goto found;
desc = NULL;
found:
spin_unlock(&wdm_device_list_lock);
return desc;
}
/* --- callbacks --- */
static void wdm_out_callback(struct urb *urb)
{
struct wdm_device *desc;
unsigned long flags;
desc = urb->context;
spin_lock_irqsave(&desc->iuspin, flags);
desc->werr = urb->status;
spin_unlock_irqrestore(&desc->iuspin, flags);
kfree(desc->outbuf);
desc->outbuf = NULL;
clear_bit(WDM_IN_USE, &desc->flags);
wake_up_all(&desc->wait);
}
static void wdm_wwan_rx(struct wdm_device *desc, int length);
static void wdm_in_callback(struct urb *urb)
{
unsigned long flags;
struct wdm_device *desc = urb->context;
int status = urb->status;
int length = urb->actual_length;
spin_lock_irqsave(&desc->iuspin, flags);
clear_bit(WDM_RESPONDING, &desc->flags);
if (status) {
switch (status) {
case -ENOENT:
dev_dbg(&desc->intf->dev,
"nonzero urb status received: -ENOENT\n");
goto skip_error;
case -ECONNRESET:
dev_dbg(&desc->intf->dev,
"nonzero urb status received: -ECONNRESET\n");
goto skip_error;
case -ESHUTDOWN:
dev_dbg(&desc->intf->dev,
"nonzero urb status received: -ESHUTDOWN\n");
goto skip_error;
case -EPIPE:
dev_err(&desc->intf->dev,
"nonzero urb status received: -EPIPE\n");
break;
default:
dev_err(&desc->intf->dev,
"Unexpected error %d\n", status);
break;
}
}
if (test_bit(WDM_WWAN_IN_USE, &desc->flags)) {
wdm_wwan_rx(desc, length);
goto out;
}
/*
* only set a new error if there is no previous error.
* Errors are only cleared during read/open
* Avoid propagating -EPIPE (stall) to userspace since it is
* better handled as an empty read
*/
if (desc->rerr == 0 && status != -EPIPE)
desc->rerr = status;
if (length + desc->length > desc->wMaxCommand) {
/* The buffer would overflow */
set_bit(WDM_OVERFLOW, &desc->flags);
} else {
/* we may already be in overflow */
if (!test_bit(WDM_OVERFLOW, &desc->flags)) {
memmove(desc->ubuf + desc->length, desc->inbuf, length);
desc->length += length;
desc->reslength = length;
}
}
skip_error:
if (desc->rerr) {
/*
* Since there was an error, userspace may decide to not read
* any data after poll'ing.
* We should respond to further attempts from the device to send
* data, so that we can get unstuck.
*/
schedule_work(&desc->service_outs_intr);
} else {
set_bit(WDM_READ, &desc->flags);
wake_up(&desc->wait);
}
out:
spin_unlock_irqrestore(&desc->iuspin, flags);
}
static void wdm_int_callback(struct urb *urb)
{
unsigned long flags;
int rv = 0;
int responding;
int status = urb->status;
struct wdm_device *desc;
struct usb_cdc_notification *dr;
desc = urb->context;
dr = (struct usb_cdc_notification *)desc->sbuf;
if (status) {
switch (status) {
case -ESHUTDOWN:
case -ENOENT:
case -ECONNRESET:
return; /* unplug */
case -EPIPE:
set_bit(WDM_INT_STALL, &desc->flags);
dev_err(&desc->intf->dev, "Stall on int endpoint\n");
goto sw; /* halt is cleared in work */
default:
dev_err(&desc->intf->dev,
"nonzero urb status received: %d\n", status);
break;
}
}
if (urb->actual_length < sizeof(struct usb_cdc_notification)) {
dev_err(&desc->intf->dev, "wdm_int_callback - %d bytes\n",
urb->actual_length);
goto exit;
}
switch (dr->bNotificationType) {
case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
dev_dbg(&desc->intf->dev,
"NOTIFY_RESPONSE_AVAILABLE received: index %d len %d\n",
le16_to_cpu(dr->wIndex), le16_to_cpu(dr->wLength));
break;
case USB_CDC_NOTIFY_NETWORK_CONNECTION:
dev_dbg(&desc->intf->dev,
"NOTIFY_NETWORK_CONNECTION %s network\n",
dr->wValue ? "connected to" : "disconnected from");
goto exit;
case USB_CDC_NOTIFY_SPEED_CHANGE:
dev_dbg(&desc->intf->dev, "SPEED_CHANGE received (len %u)\n",
urb->actual_length);
goto exit;
default:
clear_bit(WDM_POLL_RUNNING, &desc->flags);
dev_err(&desc->intf->dev,
"unknown notification %d received: index %d len %d\n",
dr->bNotificationType,
le16_to_cpu(dr->wIndex),
le16_to_cpu(dr->wLength));
goto exit;
}
spin_lock_irqsave(&desc->iuspin, flags);
responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
if (!desc->resp_count++ && !responding
&& !test_bit(WDM_DISCONNECTING, &desc->flags)
&& !test_bit(WDM_SUSPENDING, &desc->flags)) {
rv = usb_submit_urb(desc->response, GFP_ATOMIC);
dev_dbg(&desc->intf->dev, "submit response URB %d\n", rv);
}
spin_unlock_irqrestore(&desc->iuspin, flags);
if (rv < 0) {
clear_bit(WDM_RESPONDING, &desc->flags);
if (rv == -EPERM)
return;
if (rv == -ENOMEM) {
sw:
rv = schedule_work(&desc->rxwork);
if (rv)
dev_err(&desc->intf->dev,
"Cannot schedule work\n");
}
}
exit:
rv = usb_submit_urb(urb, GFP_ATOMIC);
if (rv)
dev_err(&desc->intf->dev,
"%s - usb_submit_urb failed with result %d\n",
__func__, rv);
}
static void poison_urbs(struct wdm_device *desc)
{
/* the order here is essential */
usb_poison_urb(desc->command);
usb_poison_urb(desc->validity);
usb_poison_urb(desc->response);
}
static void unpoison_urbs(struct wdm_device *desc)
{
/*
* the order here is not essential
* it is symmetrical just to be nice
*/
usb_unpoison_urb(desc->response);
usb_unpoison_urb(desc->validity);
usb_unpoison_urb(desc->command);
}
static void free_urbs(struct wdm_device *desc)
{
usb_free_urb(desc->validity);
usb_free_urb(desc->response);
usb_free_urb(desc->command);
}
static void cleanup(struct wdm_device *desc)
{
kfree(desc->sbuf);
kfree(desc->inbuf);
kfree(desc->orq);
kfree(desc->irq);
kfree(desc->ubuf);
free_urbs(desc);
kfree(desc);
}
static ssize_t wdm_write
(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
u8 *buf;
int rv = -EMSGSIZE, r, we;
struct wdm_device *desc = file->private_data;
struct usb_ctrlrequest *req;
if (count > desc->wMaxCommand)
count = desc->wMaxCommand;
spin_lock_irq(&desc->iuspin);
we = desc->werr;
desc->werr = 0;
spin_unlock_irq(&desc->iuspin);
if (we < 0)
return usb_translate_errors(we);
buf = memdup_user(buffer, count);
if (IS_ERR(buf))
return PTR_ERR(buf);
/* concurrent writes and disconnect */
r = mutex_lock_interruptible(&desc->wlock);
rv = -ERESTARTSYS;
if (r)
goto out_free_mem;
if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
rv = -ENODEV;
goto out_free_mem_lock;
}
r = usb_autopm_get_interface(desc->intf);
if (r < 0) {
rv = usb_translate_errors(r);
goto out_free_mem_lock;
}
if (!(file->f_flags & O_NONBLOCK))
r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
&desc->flags));
else
if (test_bit(WDM_IN_USE, &desc->flags))
r = -EAGAIN;
if (test_bit(WDM_RESETTING, &desc->flags))
r = -EIO;
if (test_bit(WDM_DISCONNECTING, &desc->flags))
r = -ENODEV;
if (r < 0) {
rv = r;
goto out_free_mem_pm;
}
req = desc->orq;
usb_fill_control_urb(
desc->command,
interface_to_usbdev(desc->intf),
/* using common endpoint 0 */
usb_sndctrlpipe(interface_to_usbdev(desc->intf), 0),
(unsigned char *)req,
buf,
count,
wdm_out_callback,
desc
);
req->bRequestType = (USB_DIR_OUT | USB_TYPE_CLASS |
USB_RECIP_INTERFACE);
req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
req->wValue = 0;
req->wIndex = desc->inum; /* already converted */
req->wLength = cpu_to_le16(count);
set_bit(WDM_IN_USE, &desc->flags);
desc->outbuf = buf;
rv = usb_submit_urb(desc->command, GFP_KERNEL);
if (rv < 0) {
desc->outbuf = NULL;
clear_bit(WDM_IN_USE, &desc->flags);
wake_up_all(&desc->wait); /* for wdm_wait_for_response() */
dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv);
rv = usb_translate_errors(rv);
goto out_free_mem_pm;
} else {
dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d\n",
le16_to_cpu(req->wIndex));
}
usb_autopm_put_interface(desc->intf);
mutex_unlock(&desc->wlock);
return count;
out_free_mem_pm:
usb_autopm_put_interface(desc->intf);
out_free_mem_lock:
mutex_unlock(&desc->wlock);
out_free_mem:
kfree(buf);
return rv;
}
/*
* Submit the read urb if resp_count is non-zero.
*
* Called with desc->iuspin locked
*/
static int service_outstanding_interrupt(struct wdm_device *desc)
{
int rv = 0;
/* submit read urb only if the device is waiting for it */
if (!desc->resp_count || !--desc->resp_count)
goto out;
if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
rv = -ENODEV;
goto out;
}
if (test_bit(WDM_RESETTING, &desc->flags)) {
rv = -EIO;
goto out;
}
set_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
rv = usb_submit_urb(desc->response, GFP_KERNEL);
spin_lock_irq(&desc->iuspin);
if (rv) {
if (!test_bit(WDM_DISCONNECTING, &desc->flags))
dev_err(&desc->intf->dev,
"usb_submit_urb failed with result %d\n", rv);
/* make sure the next notification trigger a submit */
clear_bit(WDM_RESPONDING, &desc->flags);
desc->resp_count = 0;
}
out:
return rv;
}
static ssize_t wdm_read
(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
int rv, cntr;
int i = 0;
struct wdm_device *desc = file->private_data;
rv = mutex_lock_interruptible(&desc->rlock); /*concurrent reads */
if (rv < 0)
return -ERESTARTSYS;
cntr = READ_ONCE(desc->length);
if (cntr == 0) {
desc->read = 0;
retry:
if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
rv = -ENODEV;
goto err;
}
if (test_bit(WDM_OVERFLOW, &desc->flags)) {
clear_bit(WDM_OVERFLOW, &desc->flags);
rv = -ENOBUFS;
goto err;
}
i++;
if (file->f_flags & O_NONBLOCK) {
if (!test_bit(WDM_READ, &desc->flags)) {
rv = -EAGAIN;
goto err;
}
rv = 0;
} else {
rv = wait_event_interruptible(desc->wait,
test_bit(WDM_READ, &desc->flags));
}
/* may have happened while we slept */
if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
rv = -ENODEV;
goto err;
}
if (test_bit(WDM_RESETTING, &desc->flags)) {
rv = -EIO;
goto err;
}
usb_mark_last_busy(interface_to_usbdev(desc->intf));
if (rv < 0) {
rv = -ERESTARTSYS;
goto err;
}
spin_lock_irq(&desc->iuspin);
if (desc->rerr) { /* read completed, error happened */
rv = usb_translate_errors(desc->rerr);
desc->rerr = 0;
spin_unlock_irq(&desc->iuspin);
goto err;
}
/*
* recheck whether we've lost the race
* against the completion handler
*/
if (!test_bit(WDM_READ, &desc->flags)) { /* lost race */
spin_unlock_irq(&desc->iuspin);
goto retry;
}
if (!desc->reslength) { /* zero length read */
dev_dbg(&desc->intf->dev, "zero length - clearing WDM_READ\n");
clear_bit(WDM_READ, &desc->flags);
rv = service_outstanding_interrupt(desc);
spin_unlock_irq(&desc->iuspin);
if (rv < 0)
goto err;
goto retry;
}
cntr = desc->length;
spin_unlock_irq(&desc->iuspin);
}
if (cntr > count)
cntr = count;
rv = copy_to_user(buffer, desc->ubuf, cntr);
if (rv > 0) {
rv = -EFAULT;
goto err;
}
spin_lock_irq(&desc->iuspin);
for (i = 0; i < desc->length - cntr; i++)
desc->ubuf[i] = desc->ubuf[i + cntr];
desc->length -= cntr;
/* in case we had outstanding data */
if (!desc->length) {
clear_bit(WDM_READ, &desc->flags);
service_outstanding_interrupt(desc);
}
spin_unlock_irq(&desc->iuspin);
rv = cntr;
err:
mutex_unlock(&desc->rlock);
return rv;
}
static int wdm_wait_for_response(struct file *file, long timeout)
{
struct wdm_device *desc = file->private_data;
long rv; /* Use long here because (int) MAX_SCHEDULE_TIMEOUT < 0. */
/*
* Needs both flags. We cannot do with one because resetting it would
* cause a race with write() yet we need to signal a disconnect.
*/
rv = wait_event_interruptible_timeout(desc->wait,
!test_bit(WDM_IN_USE, &desc->flags) ||
test_bit(WDM_DISCONNECTING, &desc->flags),
timeout);
/*
* To report the correct error. This is best effort.
* We are inevitably racing with the hardware.
*/
if (test_bit(WDM_DISCONNECTING, &desc->flags))
return -ENODEV;
if (!rv)
return -EIO;
if (rv < 0)
return -EINTR;
spin_lock_irq(&desc->iuspin);
rv = desc->werr;
desc->werr = 0;
spin_unlock_irq(&desc->iuspin);
return usb_translate_errors(rv);
}
/*
* You need to send a signal when you react to malicious or defective hardware.
* Also, don't abort when fsync() returned -EINVAL, for older kernels which do
* not implement wdm_flush() will return -EINVAL.
*/
static int wdm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
return wdm_wait_for_response(file, MAX_SCHEDULE_TIMEOUT);
}
/*
* Same with wdm_fsync(), except it uses finite timeout in order to react to
* malicious or defective hardware which ceased communication after close() was
* implicitly called due to process termination.
*/
static int wdm_flush(struct file *file, fl_owner_t id)
{
return wdm_wait_for_response(file, WDM_FLUSH_TIMEOUT);
}
static __poll_t wdm_poll(struct file *file, struct poll_table_struct *wait)
{
struct wdm_device *desc = file->private_data;
unsigned long flags;
__poll_t mask = 0;
spin_lock_irqsave(&desc->iuspin, flags);
if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
mask = EPOLLHUP | EPOLLERR;
spin_unlock_irqrestore(&desc->iuspin, flags);
goto desc_out;
}
if (test_bit(WDM_READ, &desc->flags))
mask = EPOLLIN | EPOLLRDNORM;
if (desc->rerr || desc->werr)
mask |= EPOLLERR;
if (!test_bit(WDM_IN_USE, &desc->flags))
mask |= EPOLLOUT | EPOLLWRNORM;
spin_unlock_irqrestore(&desc->iuspin, flags);
poll_wait(file, &desc->wait, wait);
desc_out:
return mask;
}
static int wdm_open(struct inode *inode, struct file *file)
{
int minor = iminor(inode);
int rv = -ENODEV;
struct usb_interface *intf;
struct wdm_device *desc;
mutex_lock(&wdm_mutex);
desc = wdm_find_device_by_minor(minor);
if (!desc)
goto out;
intf = desc->intf;
if (test_bit(WDM_DISCONNECTING, &desc->flags))
goto out;
file->private_data = desc;
if (test_bit(WDM_WWAN_IN_USE, &desc->flags)) {
rv = -EBUSY;
goto out;
}
rv = usb_autopm_get_interface(desc->intf);
if (rv < 0) {
dev_err(&desc->intf->dev, "Error autopm - %d\n", rv);
goto out;
}
/* using write lock to protect desc->count */
mutex_lock(&desc->wlock);
if (!desc->count++) {
desc->werr = 0;
desc->rerr = 0;
rv = usb_submit_urb(desc->validity, GFP_KERNEL);
if (rv < 0) {
desc->count--;
dev_err(&desc->intf->dev,
"Error submitting int urb - %d\n", rv);
rv = usb_translate_errors(rv);
}
} else {
rv = 0;
}
mutex_unlock(&desc->wlock);
if (desc->count == 1)
desc->manage_power(intf, 1);
usb_autopm_put_interface(desc->intf);
out:
mutex_unlock(&wdm_mutex);
return rv;
}
static int wdm_release(struct inode *inode, struct file *file)
{
struct wdm_device *desc = file->private_data;
mutex_lock(&wdm_mutex);
/* using write lock to protect desc->count */
mutex_lock(&desc->wlock);
desc->count--;
mutex_unlock(&desc->wlock);
if (!desc->count) {
if (!test_bit(WDM_DISCONNECTING, &desc->flags)) {
dev_dbg(&desc->intf->dev, "wdm_release: cleanup\n");
poison_urbs(desc);
spin_lock_irq(&desc->iuspin);
desc->resp_count = 0;
clear_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
desc->manage_power(desc->intf, 0);
unpoison_urbs(desc);
} else {
/* must avoid dev_printk here as desc->intf is invalid */
pr_debug(KBUILD_MODNAME " %s: device gone - cleaning up\n", __func__);
cleanup(desc);
}
}
mutex_unlock(&wdm_mutex);
return 0;
}
static long wdm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct wdm_device *desc = file->private_data;
int rv = 0;
switch (cmd) {
case IOCTL_WDM_MAX_COMMAND:
if (copy_to_user((void __user *)arg, &desc->wMaxCommand, sizeof(desc->wMaxCommand)))
rv = -EFAULT;
break;
default:
rv = -ENOTTY;
}
return rv;
}
static const struct file_operations wdm_fops = {
.owner = THIS_MODULE,
.read = wdm_read,
.write = wdm_write,
.fsync = wdm_fsync,
.open = wdm_open,
.flush = wdm_flush,
.release = wdm_release,
.poll = wdm_poll,
.unlocked_ioctl = wdm_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
static struct usb_class_driver wdm_class = {
.name = "cdc-wdm%d",
.fops = &wdm_fops,
.minor_base = WDM_MINOR_BASE,
};
/* --- WWAN framework integration --- */
#ifdef CONFIG_WWAN
static int wdm_wwan_port_start(struct wwan_port *port)
{
struct wdm_device *desc = wwan_port_get_drvdata(port);
/* The interface is both exposed via the WWAN framework and as a
* legacy usbmisc chardev. If chardev is already open, just fail
* to prevent concurrent usage. Otherwise, switch to WWAN mode.
*/
mutex_lock(&wdm_mutex);
if (desc->count) {
mutex_unlock(&wdm_mutex);
return -EBUSY;
}
set_bit(WDM_WWAN_IN_USE, &desc->flags);
mutex_unlock(&wdm_mutex);
desc->manage_power(desc->intf, 1);
/* tx is allowed */
wwan_port_txon(port);
/* Start getting events */
return usb_submit_urb(desc->validity, GFP_KERNEL);
}
static void wdm_wwan_port_stop(struct wwan_port *port)
{
struct wdm_device *desc = wwan_port_get_drvdata(port);
/* Stop all transfers and disable WWAN mode */
poison_urbs(desc);
desc->manage_power(desc->intf, 0);
clear_bit(WDM_READ, &desc->flags);
clear_bit(WDM_WWAN_IN_USE, &desc->flags);
unpoison_urbs(desc);
}
static void wdm_wwan_port_tx_complete(struct urb *urb)
{
struct sk_buff *skb = urb->context;
struct wdm_device *desc = skb_shinfo(skb)->destructor_arg;
usb_autopm_put_interface(desc->intf);
wwan_port_txon(desc->wwanp);
kfree_skb(skb);
}
static int wdm_wwan_port_tx(struct wwan_port *port, struct sk_buff *skb)
{
struct wdm_device *desc = wwan_port_get_drvdata(port);
struct usb_interface *intf = desc->intf;
struct usb_ctrlrequest *req = desc->orq;
int rv;
rv = usb_autopm_get_interface(intf);
if (rv)
return rv;
usb_fill_control_urb(
desc->command,
interface_to_usbdev(intf),
usb_sndctrlpipe(interface_to_usbdev(intf), 0),
(unsigned char *)req,
skb->data,
skb->len,
wdm_wwan_port_tx_complete,
skb
);
req->bRequestType = (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
req->wValue = 0;
req->wIndex = desc->inum;
req->wLength = cpu_to_le16(skb->len);
skb_shinfo(skb)->destructor_arg = desc;
rv = usb_submit_urb(desc->command, GFP_KERNEL);
if (rv)
usb_autopm_put_interface(intf);
else /* One transfer at a time, stop TX until URB completion */
wwan_port_txoff(port);
return rv;
}
static const struct wwan_port_ops wdm_wwan_port_ops = {
.start = wdm_wwan_port_start,
.stop = wdm_wwan_port_stop,
.tx = wdm_wwan_port_tx,
};
static void wdm_wwan_init(struct wdm_device *desc)
{
struct usb_interface *intf = desc->intf;
struct wwan_port *port;
/* Only register to WWAN core if protocol/type is known */
if (desc->wwanp_type == WWAN_PORT_UNKNOWN) {
dev_info(&intf->dev, "Unknown control protocol\n");
return;
}
port = wwan_create_port(&intf->dev, desc->wwanp_type, &wdm_wwan_port_ops,
NULL, desc);
if (IS_ERR(port)) {
dev_err(&intf->dev, "%s: Unable to create WWAN port\n",
dev_name(intf->usb_dev));
return;
}
desc->wwanp = port;
}
static void wdm_wwan_deinit(struct wdm_device *desc)
{
if (!desc->wwanp)
return;
wwan_remove_port(desc->wwanp);
desc->wwanp = NULL;
}
static void wdm_wwan_rx(struct wdm_device *desc, int length)
{
struct wwan_port *port = desc->wwanp;
struct sk_buff *skb;
/* Forward data to WWAN port */
skb = alloc_skb(length, GFP_ATOMIC);
if (!skb)
return;
skb_put_data(skb, desc->inbuf, length);
wwan_port_rx(port, skb);
/* inbuf has been copied, it is safe to check for outstanding data */
schedule_work(&desc->service_outs_intr);
}
#else /* CONFIG_WWAN */
static void wdm_wwan_init(struct wdm_device *desc) {}
static void wdm_wwan_deinit(struct wdm_device *desc) {}
static void wdm_wwan_rx(struct wdm_device *desc, int length) {}
#endif /* CONFIG_WWAN */
/* --- error handling --- */
static void wdm_rxwork(struct work_struct *work)
{
struct wdm_device *desc = container_of(work, struct wdm_device, rxwork);
unsigned long flags;
int rv = 0;
int responding;
spin_lock_irqsave(&desc->iuspin, flags);
if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
spin_unlock_irqrestore(&desc->iuspin, flags);
} else {
responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irqrestore(&desc->iuspin, flags);
if (!responding)
rv = usb_submit_urb(desc->response, GFP_KERNEL);
if (rv < 0 && rv != -EPERM) {
spin_lock_irqsave(&desc->iuspin, flags);
clear_bit(WDM_RESPONDING, &desc->flags);
if (!test_bit(WDM_DISCONNECTING, &desc->flags))
schedule_work(&desc->rxwork);
spin_unlock_irqrestore(&desc->iuspin, flags);
}
}
}
static void service_interrupt_work(struct work_struct *work)
{
struct wdm_device *desc;
desc = container_of(work, struct wdm_device, service_outs_intr);
spin_lock_irq(&desc->iuspin);
service_outstanding_interrupt(desc);
if (!desc->resp_count) {
set_bit(WDM_READ, &desc->flags);
wake_up(&desc->wait);
}
spin_unlock_irq(&desc->iuspin);
}
/* --- hotplug --- */
static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor *ep,
u16 bufsize, enum wwan_port_type type,
int (*manage_power)(struct usb_interface *, int))
{
int rv = -ENOMEM;
struct wdm_device *desc;
desc = kzalloc(sizeof(struct wdm_device), GFP_KERNEL);
if (!desc)
goto out;
INIT_LIST_HEAD(&desc->device_list);
mutex_init(&desc->rlock);
mutex_init(&desc->wlock);
spin_lock_init(&desc->iuspin);
init_waitqueue_head(&desc->wait);
desc->wMaxCommand = bufsize;
/* this will be expanded and needed in hardware endianness */
desc->inum = cpu_to_le16((u16)intf->cur_altsetting->desc.bInterfaceNumber);
desc->intf = intf;
desc->wwanp_type = type;
INIT_WORK(&desc->rxwork, wdm_rxwork);
INIT_WORK(&desc->service_outs_intr, service_interrupt_work);
if (!usb_endpoint_is_int_in(ep)) {
rv = -EINVAL;
goto err;
}
desc->wMaxPacketSize = usb_endpoint_maxp(ep);
desc->orq = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
if (!desc->orq)
goto err;
desc->irq = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
if (!desc->irq)
goto err;
desc->validity = usb_alloc_urb(0, GFP_KERNEL);
if (!desc->validity)
goto err;
desc->response = usb_alloc_urb(0, GFP_KERNEL);
if (!desc->response)
goto err;
desc->command = usb_alloc_urb(0, GFP_KERNEL);
if (!desc->command)
goto err;
desc->ubuf = kmalloc(desc->wMaxCommand, GFP_KERNEL);
if (!desc->ubuf)
goto err;
desc->sbuf = kmalloc(desc->wMaxPacketSize, GFP_KERNEL);
if (!desc->sbuf)
goto err;
desc->inbuf = kmalloc(desc->wMaxCommand, GFP_KERNEL);
if (!desc->inbuf)
goto err;
usb_fill_int_urb(
desc->validity,
interface_to_usbdev(intf),
usb_rcvintpipe(interface_to_usbdev(intf), ep->bEndpointAddress),
desc->sbuf,
desc->wMaxPacketSize,
wdm_int_callback,
desc,
ep->bInterval
);
desc->irq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
desc->irq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE;
desc->irq->wValue = 0;
desc->irq->wIndex = desc->inum; /* already converted */
desc->irq->wLength = cpu_to_le16(desc->wMaxCommand);
usb_fill_control_urb(
desc->response,
interface_to_usbdev(intf),
/* using common endpoint 0 */
usb_rcvctrlpipe(interface_to_usbdev(desc->intf), 0),
(unsigned char *)desc->irq,
desc->inbuf,
desc->wMaxCommand,
wdm_in_callback,
desc
);
desc->manage_power = manage_power;
spin_lock(&wdm_device_list_lock);
list_add(&desc->device_list, &wdm_device_list);
spin_unlock(&wdm_device_list_lock);
rv = usb_register_dev(intf, &wdm_class);
if (rv < 0)
goto err;
else
dev_info(&intf->dev, "%s: USB WDM device\n", dev_name(intf->usb_dev));
wdm_wwan_init(desc);
out:
return rv;
err:
spin_lock(&wdm_device_list_lock);
list_del(&desc->device_list);
spin_unlock(&wdm_device_list_lock);
cleanup(desc);
return rv;
}
static int wdm_manage_power(struct usb_interface *intf, int on)
{
/* need autopm_get/put here to ensure the usbcore sees the new value */
int rv = usb_autopm_get_interface(intf);
intf->needs_remote_wakeup = on;
if (!rv)
usb_autopm_put_interface(intf);
return 0;
}
static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
int rv = -EINVAL;
struct usb_host_interface *iface;
struct usb_endpoint_descriptor *ep;
struct usb_cdc_parsed_header hdr;
u8 *buffer = intf->altsetting->extra;
int buflen = intf->altsetting->extralen;
u16 maxcom = WDM_DEFAULT_BUFSIZE;
if (!buffer)
goto err;
cdc_parse_cdc_header(&hdr, intf, buffer, buflen);
if (hdr.usb_cdc_dmm_desc)
maxcom = le16_to_cpu(hdr.usb_cdc_dmm_desc->wMaxCommand);
iface = intf->cur_altsetting;
if (iface->desc.bNumEndpoints != 1)
goto err;
ep = &iface->endpoint[0].desc;
rv = wdm_create(intf, ep, maxcom, WWAN_PORT_UNKNOWN, &wdm_manage_power);
err:
return rv;
}
/**
* usb_cdc_wdm_register - register a WDM subdriver
* @intf: usb interface the subdriver will associate with
* @ep: interrupt endpoint to monitor for notifications
* @bufsize: maximum message size to support for read/write
* @type: Type/protocol of the transported data (MBIM, QMI...)
* @manage_power: call-back invoked during open and release to
* manage the device's power
* Create WDM usb class character device and associate it with intf
* without binding, allowing another driver to manage the interface.
*
* The subdriver will manage the given interrupt endpoint exclusively
* and will issue control requests referring to the given intf. It
* will otherwise avoid interferring, and in particular not do
* usb_set_intfdata/usb_get_intfdata on intf.
*
* The return value is a pointer to the subdriver's struct usb_driver.
* The registering driver is responsible for calling this subdriver's
* disconnect, suspend, resume, pre_reset and post_reset methods from
* its own.
*/
struct usb_driver *usb_cdc_wdm_register(struct usb_interface *intf,
struct usb_endpoint_descriptor *ep,
int bufsize, enum wwan_port_type type,
int (*manage_power)(struct usb_interface *, int))
{
int rv;
rv = wdm_create(intf, ep, bufsize, type, manage_power);
if (rv < 0)
goto err;
return &wdm_driver;
err:
return ERR_PTR(rv);
}
EXPORT_SYMBOL(usb_cdc_wdm_register);
static void wdm_disconnect(struct usb_interface *intf)
{
struct wdm_device *desc;
unsigned long flags;
usb_deregister_dev(intf, &wdm_class);
desc = wdm_find_device(intf);
mutex_lock(&wdm_mutex);
wdm_wwan_deinit(desc);
/* the spinlock makes sure no new urbs are generated in the callbacks */
spin_lock_irqsave(&desc->iuspin, flags);
set_bit(WDM_DISCONNECTING, &desc->flags);
set_bit(WDM_READ, &desc->flags);
spin_unlock_irqrestore(&desc->iuspin, flags);
wake_up_all(&desc->wait);
mutex_lock(&desc->rlock);
mutex_lock(&desc->wlock);
poison_urbs(desc);
cancel_work_sync(&desc->rxwork);
cancel_work_sync(&desc->service_outs_intr);
mutex_unlock(&desc->wlock);
mutex_unlock(&desc->rlock);
/* the desc->intf pointer used as list key is now invalid */
spin_lock(&wdm_device_list_lock);
list_del(&desc->device_list);
spin_unlock(&wdm_device_list_lock);
if (!desc->count)
cleanup(desc);
else
dev_dbg(&intf->dev, "%d open files - postponing cleanup\n", desc->count);
mutex_unlock(&wdm_mutex);
}
#ifdef CONFIG_PM
static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
{
struct wdm_device *desc = wdm_find_device(intf);
int rv = 0;
dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor);
/* if this is an autosuspend the caller does the locking */
if (!PMSG_IS_AUTO(message)) {
mutex_lock(&desc->rlock);
mutex_lock(&desc->wlock);
}
spin_lock_irq(&desc->iuspin);
if (PMSG_IS_AUTO(message) &&
(test_bit(WDM_IN_USE, &desc->flags)
|| test_bit(WDM_RESPONDING, &desc->flags))) {
spin_unlock_irq(&desc->iuspin);
rv = -EBUSY;
} else {
set_bit(WDM_SUSPENDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
/* callback submits work - order is essential */
poison_urbs(desc);
cancel_work_sync(&desc->rxwork);
cancel_work_sync(&desc->service_outs_intr);
unpoison_urbs(desc);
}
if (!PMSG_IS_AUTO(message)) {
mutex_unlock(&desc->wlock);
mutex_unlock(&desc->rlock);
}
return rv;
}
#endif
static int recover_from_urb_loss(struct wdm_device *desc)
{
int rv = 0;
if (desc->count) {
rv = usb_submit_urb(desc->validity, GFP_NOIO);
if (rv < 0)
dev_err(&desc->intf->dev,
"Error resume submitting int urb - %d\n", rv);
}
return rv;
}
#ifdef CONFIG_PM
static int wdm_resume(struct usb_interface *intf)
{
struct wdm_device *desc = wdm_find_device(intf);
int rv;
dev_dbg(&desc->intf->dev, "wdm%d_resume\n", intf->minor);
clear_bit(WDM_SUSPENDING, &desc->flags);
rv = recover_from_urb_loss(desc);
return rv;
}
#endif
static int wdm_pre_reset(struct usb_interface *intf)
{
struct wdm_device *desc = wdm_find_device(intf);
/*
* we notify everybody using poll of
* an exceptional situation
* must be done before recovery lest a spontaneous
* message from the device is lost
*/
spin_lock_irq(&desc->iuspin);
set_bit(WDM_RESETTING, &desc->flags); /* inform read/write */
set_bit(WDM_READ, &desc->flags); /* unblock read */
clear_bit(WDM_IN_USE, &desc->flags); /* unblock write */
desc->rerr = -EINTR;
spin_unlock_irq(&desc->iuspin);
wake_up_all(&desc->wait);
mutex_lock(&desc->rlock);
mutex_lock(&desc->wlock);
poison_urbs(desc);
cancel_work_sync(&desc->rxwork);
cancel_work_sync(&desc->service_outs_intr);
return 0;
}
static int wdm_post_reset(struct usb_interface *intf)
{
struct wdm_device *desc = wdm_find_device(intf);
int rv;
unpoison_urbs(desc);
clear_bit(WDM_OVERFLOW, &desc->flags);
clear_bit(WDM_RESETTING, &desc->flags);
rv = recover_from_urb_loss(desc);
mutex_unlock(&desc->wlock);
mutex_unlock(&desc->rlock);
return rv;
}
static struct usb_driver wdm_driver = {
.name = "cdc_wdm",
.probe = wdm_probe,
.disconnect = wdm_disconnect,
#ifdef CONFIG_PM
.suspend = wdm_suspend,
.resume = wdm_resume,
.reset_resume = wdm_resume,
#endif
.pre_reset = wdm_pre_reset,
.post_reset = wdm_post_reset,
.id_table = wdm_ids,
.supports_autosuspend = 1,
.disable_hub_initiated_lpm = 1,
};
module_usb_driver(wdm_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/class/cdc-wdm.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/usb/class/usbtmc.c - USB Test & Measurement class driver
*
* Copyright (C) 2007 Stefan Kopp, Gechingen, Germany
* Copyright (C) 2008 Novell, Inc.
* Copyright (C) 2008 Greg Kroah-Hartman <[email protected]>
* Copyright (C) 2018 IVI Foundation, Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/kref.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/mutex.h>
#include <linux/usb.h>
#include <linux/compat.h>
#include <linux/usb/tmc.h>
/* Increment API VERSION when changing tmc.h with new flags or ioctls
* or when changing a significant behavior of the driver.
*/
#define USBTMC_API_VERSION (3)
#define USBTMC_HEADER_SIZE 12
#define USBTMC_MINOR_BASE 176
/* Minimum USB timeout (in milliseconds) */
#define USBTMC_MIN_TIMEOUT 100
/* Default USB timeout (in milliseconds) */
#define USBTMC_TIMEOUT 5000
/* Max number of urbs used in write transfers */
#define MAX_URBS_IN_FLIGHT 16
/* I/O buffer size used in generic read/write functions */
#define USBTMC_BUFSIZE (4096)
/*
* Maximum number of read cycles to empty bulk in endpoint during CLEAR and
* ABORT_BULK_IN requests. Ends the loop if (for whatever reason) a short
* packet is never read.
*/
#define USBTMC_MAX_READS_TO_CLEAR_BULK_IN 100
static const struct usb_device_id usbtmc_devices[] = {
{ USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 0), },
{ USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 1), },
{ 0, } /* terminating entry */
};
MODULE_DEVICE_TABLE(usb, usbtmc_devices);
/*
* This structure is the capabilities for the device
* See section 4.2.1.8 of the USBTMC specification,
* and section 4.2.2 of the USBTMC usb488 subclass
* specification for details.
*/
struct usbtmc_dev_capabilities {
__u8 interface_capabilities;
__u8 device_capabilities;
__u8 usb488_interface_capabilities;
__u8 usb488_device_capabilities;
};
/* This structure holds private data for each USBTMC device. One copy is
* allocated for each USBTMC device in the driver's probe function.
*/
struct usbtmc_device_data {
const struct usb_device_id *id;
struct usb_device *usb_dev;
struct usb_interface *intf;
struct list_head file_list;
unsigned int bulk_in;
unsigned int bulk_out;
u8 bTag;
u8 bTag_last_write; /* needed for abort */
u8 bTag_last_read; /* needed for abort */
/* packet size of IN bulk */
u16 wMaxPacketSize;
/* data for interrupt in endpoint handling */
u8 bNotify1;
u8 bNotify2;
u16 ifnum;
u8 iin_bTag;
u8 *iin_buffer;
atomic_t iin_data_valid;
unsigned int iin_ep;
int iin_ep_present;
int iin_interval;
struct urb *iin_urb;
u16 iin_wMaxPacketSize;
/* coalesced usb488_caps from usbtmc_dev_capabilities */
__u8 usb488_caps;
bool zombie; /* fd of disconnected device */
struct usbtmc_dev_capabilities capabilities;
struct kref kref;
struct mutex io_mutex; /* only one i/o function running at a time */
wait_queue_head_t waitq;
struct fasync_struct *fasync;
spinlock_t dev_lock; /* lock for file_list */
};
#define to_usbtmc_data(d) container_of(d, struct usbtmc_device_data, kref)
/*
* This structure holds private data for each USBTMC file handle.
*/
struct usbtmc_file_data {
struct usbtmc_device_data *data;
struct list_head file_elem;
u32 timeout;
u8 srq_byte;
atomic_t srq_asserted;
atomic_t closing;
u8 bmTransferAttributes; /* member of DEV_DEP_MSG_IN */
u8 eom_val;
u8 term_char;
bool term_char_enabled;
bool auto_abort;
spinlock_t err_lock; /* lock for errors */
struct usb_anchor submitted;
/* data for generic_write */
struct semaphore limit_write_sem;
u32 out_transfer_size;
int out_status;
/* data for generic_read */
u32 in_transfer_size;
int in_status;
int in_urbs_used;
struct usb_anchor in_anchor;
wait_queue_head_t wait_bulk_in;
};
/* Forward declarations */
static struct usb_driver usbtmc_driver;
static void usbtmc_draw_down(struct usbtmc_file_data *file_data);
static void usbtmc_delete(struct kref *kref)
{
struct usbtmc_device_data *data = to_usbtmc_data(kref);
usb_put_dev(data->usb_dev);
kfree(data);
}
static int usbtmc_open(struct inode *inode, struct file *filp)
{
struct usb_interface *intf;
struct usbtmc_device_data *data;
struct usbtmc_file_data *file_data;
intf = usb_find_interface(&usbtmc_driver, iminor(inode));
if (!intf) {
pr_err("can not find device for minor %d", iminor(inode));
return -ENODEV;
}
file_data = kzalloc(sizeof(*file_data), GFP_KERNEL);
if (!file_data)
return -ENOMEM;
spin_lock_init(&file_data->err_lock);
sema_init(&file_data->limit_write_sem, MAX_URBS_IN_FLIGHT);
init_usb_anchor(&file_data->submitted);
init_usb_anchor(&file_data->in_anchor);
init_waitqueue_head(&file_data->wait_bulk_in);
data = usb_get_intfdata(intf);
/* Protect reference to data from file structure until release */
kref_get(&data->kref);
mutex_lock(&data->io_mutex);
file_data->data = data;
atomic_set(&file_data->closing, 0);
file_data->timeout = USBTMC_TIMEOUT;
file_data->term_char = '\n';
file_data->term_char_enabled = 0;
file_data->auto_abort = 0;
file_data->eom_val = 1;
INIT_LIST_HEAD(&file_data->file_elem);
spin_lock_irq(&data->dev_lock);
list_add_tail(&file_data->file_elem, &data->file_list);
spin_unlock_irq(&data->dev_lock);
mutex_unlock(&data->io_mutex);
/* Store pointer in file structure's private data field */
filp->private_data = file_data;
return 0;
}
/*
* usbtmc_flush - called before file handle is closed
*/
static int usbtmc_flush(struct file *file, fl_owner_t id)
{
struct usbtmc_file_data *file_data;
struct usbtmc_device_data *data;
file_data = file->private_data;
if (file_data == NULL)
return -ENODEV;
atomic_set(&file_data->closing, 1);
data = file_data->data;
/* wait for io to stop */
mutex_lock(&data->io_mutex);
usbtmc_draw_down(file_data);
spin_lock_irq(&file_data->err_lock);
file_data->in_status = 0;
file_data->in_transfer_size = 0;
file_data->in_urbs_used = 0;
file_data->out_status = 0;
file_data->out_transfer_size = 0;
spin_unlock_irq(&file_data->err_lock);
wake_up_interruptible_all(&data->waitq);
mutex_unlock(&data->io_mutex);
return 0;
}
static int usbtmc_release(struct inode *inode, struct file *file)
{
struct usbtmc_file_data *file_data = file->private_data;
/* prevent IO _AND_ usbtmc_interrupt */
mutex_lock(&file_data->data->io_mutex);
spin_lock_irq(&file_data->data->dev_lock);
list_del(&file_data->file_elem);
spin_unlock_irq(&file_data->data->dev_lock);
mutex_unlock(&file_data->data->io_mutex);
kref_put(&file_data->data->kref, usbtmc_delete);
file_data->data = NULL;
kfree(file_data);
return 0;
}
static int usbtmc_ioctl_abort_bulk_in_tag(struct usbtmc_device_data *data,
u8 tag)
{
u8 *buffer;
struct device *dev;
int rv;
int n;
int actual;
dev = &data->intf->dev;
buffer = kmalloc(USBTMC_BUFSIZE, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
rv = usb_control_msg(data->usb_dev,
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_INITIATE_ABORT_BULK_IN,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT,
tag, data->bulk_in,
buffer, 2, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto exit;
}
dev_dbg(dev, "INITIATE_ABORT_BULK_IN returned %x with tag %02x\n",
buffer[0], buffer[1]);
if (buffer[0] == USBTMC_STATUS_FAILED) {
/* No transfer in progress and the Bulk-OUT FIFO is empty. */
rv = 0;
goto exit;
}
if (buffer[0] == USBTMC_STATUS_TRANSFER_NOT_IN_PROGRESS) {
/* The device returns this status if either:
* - There is a transfer in progress, but the specified bTag
* does not match.
* - There is no transfer in progress, but the Bulk-OUT FIFO
* is not empty.
*/
rv = -ENOMSG;
goto exit;
}
if (buffer[0] != USBTMC_STATUS_SUCCESS) {
dev_err(dev, "INITIATE_ABORT_BULK_IN returned %x\n",
buffer[0]);
rv = -EPERM;
goto exit;
}
n = 0;
usbtmc_abort_bulk_in_status:
dev_dbg(dev, "Reading from bulk in EP\n");
/* Data must be present. So use low timeout 300 ms */
actual = 0;
rv = usb_bulk_msg(data->usb_dev,
usb_rcvbulkpipe(data->usb_dev,
data->bulk_in),
buffer, USBTMC_BUFSIZE,
&actual, 300);
print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE, 16, 1,
buffer, actual, true);
n++;
if (rv < 0) {
dev_err(dev, "usb_bulk_msg returned %d\n", rv);
if (rv != -ETIMEDOUT)
goto exit;
}
if (actual == USBTMC_BUFSIZE)
goto usbtmc_abort_bulk_in_status;
if (n >= USBTMC_MAX_READS_TO_CLEAR_BULK_IN) {
dev_err(dev, "Couldn't clear device buffer within %d cycles\n",
USBTMC_MAX_READS_TO_CLEAR_BULK_IN);
rv = -EPERM;
goto exit;
}
rv = usb_control_msg(data->usb_dev,
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_CHECK_ABORT_BULK_IN_STATUS,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT,
0, data->bulk_in, buffer, 0x08,
USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto exit;
}
dev_dbg(dev, "CHECK_ABORT_BULK_IN returned %x\n", buffer[0]);
if (buffer[0] == USBTMC_STATUS_SUCCESS) {
rv = 0;
goto exit;
}
if (buffer[0] != USBTMC_STATUS_PENDING) {
dev_err(dev, "CHECK_ABORT_BULK_IN returned %x\n", buffer[0]);
rv = -EPERM;
goto exit;
}
if ((buffer[1] & 1) > 0) {
/* The device has 1 or more queued packets the Host can read */
goto usbtmc_abort_bulk_in_status;
}
/* The Host must send CHECK_ABORT_BULK_IN_STATUS at a later time. */
rv = -EAGAIN;
exit:
kfree(buffer);
return rv;
}
static int usbtmc_ioctl_abort_bulk_in(struct usbtmc_device_data *data)
{
return usbtmc_ioctl_abort_bulk_in_tag(data, data->bTag_last_read);
}
static int usbtmc_ioctl_abort_bulk_out_tag(struct usbtmc_device_data *data,
u8 tag)
{
struct device *dev;
u8 *buffer;
int rv;
int n;
dev = &data->intf->dev;
buffer = kmalloc(8, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
rv = usb_control_msg(data->usb_dev,
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_INITIATE_ABORT_BULK_OUT,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT,
tag, data->bulk_out,
buffer, 2, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto exit;
}
dev_dbg(dev, "INITIATE_ABORT_BULK_OUT returned %x\n", buffer[0]);
if (buffer[0] != USBTMC_STATUS_SUCCESS) {
dev_err(dev, "INITIATE_ABORT_BULK_OUT returned %x\n",
buffer[0]);
rv = -EPERM;
goto exit;
}
n = 0;
usbtmc_abort_bulk_out_check_status:
/* do not stress device with subsequent requests */
msleep(50);
rv = usb_control_msg(data->usb_dev,
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_CHECK_ABORT_BULK_OUT_STATUS,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT,
0, data->bulk_out, buffer, 0x08,
USB_CTRL_GET_TIMEOUT);
n++;
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto exit;
}
dev_dbg(dev, "CHECK_ABORT_BULK_OUT returned %x\n", buffer[0]);
if (buffer[0] == USBTMC_STATUS_SUCCESS)
goto usbtmc_abort_bulk_out_clear_halt;
if ((buffer[0] == USBTMC_STATUS_PENDING) &&
(n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN))
goto usbtmc_abort_bulk_out_check_status;
rv = -EPERM;
goto exit;
usbtmc_abort_bulk_out_clear_halt:
rv = usb_clear_halt(data->usb_dev,
usb_sndbulkpipe(data->usb_dev, data->bulk_out));
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto exit;
}
rv = 0;
exit:
kfree(buffer);
return rv;
}
static int usbtmc_ioctl_abort_bulk_out(struct usbtmc_device_data *data)
{
return usbtmc_ioctl_abort_bulk_out_tag(data, data->bTag_last_write);
}
static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb)
{
struct usbtmc_device_data *data = file_data->data;
struct device *dev = &data->intf->dev;
u8 *buffer;
u8 tag;
int rv;
dev_dbg(dev, "Enter ioctl_read_stb iin_ep_present: %d\n",
data->iin_ep_present);
buffer = kmalloc(8, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
atomic_set(&data->iin_data_valid, 0);
rv = usb_control_msg(data->usb_dev,
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC488_REQUEST_READ_STATUS_BYTE,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
data->iin_bTag,
data->ifnum,
buffer, 0x03, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "stb usb_control_msg returned %d\n", rv);
goto exit;
}
if (buffer[0] != USBTMC_STATUS_SUCCESS) {
dev_err(dev, "control status returned %x\n", buffer[0]);
rv = -EIO;
goto exit;
}
if (data->iin_ep_present) {
rv = wait_event_interruptible_timeout(
data->waitq,
atomic_read(&data->iin_data_valid) != 0,
file_data->timeout);
if (rv < 0) {
dev_dbg(dev, "wait interrupted %d\n", rv);
goto exit;
}
if (rv == 0) {
dev_dbg(dev, "wait timed out\n");
rv = -ETIMEDOUT;
goto exit;
}
tag = data->bNotify1 & 0x7f;
if (tag != data->iin_bTag) {
dev_err(dev, "expected bTag %x got %x\n",
data->iin_bTag, tag);
}
*stb = data->bNotify2;
} else {
*stb = buffer[2];
}
dev_dbg(dev, "stb:0x%02x received %d\n", (unsigned int)*stb, rv);
exit:
/* bump interrupt bTag */
data->iin_bTag += 1;
if (data->iin_bTag > 127)
/* 1 is for SRQ see USBTMC-USB488 subclass spec section 4.3.1 */
data->iin_bTag = 2;
kfree(buffer);
return rv;
}
static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data,
void __user *arg)
{
int srq_asserted = 0;
__u8 stb;
int rv;
rv = usbtmc_get_stb(file_data, &stb);
if (rv > 0) {
srq_asserted = atomic_xchg(&file_data->srq_asserted,
srq_asserted);
if (srq_asserted)
stb |= 0x40; /* Set RQS bit */
rv = put_user(stb, (__u8 __user *)arg);
}
return rv;
}
static int usbtmc_ioctl_get_srq_stb(struct usbtmc_file_data *file_data,
void __user *arg)
{
struct usbtmc_device_data *data = file_data->data;
struct device *dev = &data->intf->dev;
int srq_asserted = 0;
__u8 stb = 0;
int rv;
spin_lock_irq(&data->dev_lock);
srq_asserted = atomic_xchg(&file_data->srq_asserted, srq_asserted);
if (srq_asserted) {
stb = file_data->srq_byte;
spin_unlock_irq(&data->dev_lock);
rv = put_user(stb, (__u8 __user *)arg);
} else {
spin_unlock_irq(&data->dev_lock);
rv = -ENOMSG;
}
dev_dbg(dev, "stb:0x%02x with srq received %d\n", (unsigned int)stb, rv);
return rv;
}
static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data,
__u32 __user *arg)
{
struct usbtmc_device_data *data = file_data->data;
struct device *dev = &data->intf->dev;
int rv;
u32 timeout;
unsigned long expire;
if (!data->iin_ep_present) {
dev_dbg(dev, "no interrupt endpoint present\n");
return -EFAULT;
}
if (get_user(timeout, arg))
return -EFAULT;
expire = msecs_to_jiffies(timeout);
mutex_unlock(&data->io_mutex);
rv = wait_event_interruptible_timeout(
data->waitq,
atomic_read(&file_data->srq_asserted) != 0 ||
atomic_read(&file_data->closing),
expire);
mutex_lock(&data->io_mutex);
/* Note! disconnect or close could be called in the meantime */
if (atomic_read(&file_data->closing) || data->zombie)
rv = -ENODEV;
if (rv < 0) {
/* dev can be invalid now! */
pr_debug("%s - wait interrupted %d\n", __func__, rv);
return rv;
}
if (rv == 0) {
dev_dbg(dev, "%s - wait timed out\n", __func__);
return -ETIMEDOUT;
}
dev_dbg(dev, "%s - srq asserted\n", __func__);
return 0;
}
static int usbtmc488_ioctl_simple(struct usbtmc_device_data *data,
void __user *arg, unsigned int cmd)
{
struct device *dev = &data->intf->dev;
__u8 val;
u8 *buffer;
u16 wValue;
int rv;
if (!(data->usb488_caps & USBTMC488_CAPABILITY_SIMPLE))
return -EINVAL;
buffer = kmalloc(8, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
if (cmd == USBTMC488_REQUEST_REN_CONTROL) {
rv = copy_from_user(&val, arg, sizeof(val));
if (rv) {
rv = -EFAULT;
goto exit;
}
wValue = val ? 1 : 0;
} else {
wValue = 0;
}
rv = usb_control_msg(data->usb_dev,
usb_rcvctrlpipe(data->usb_dev, 0),
cmd,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
wValue,
data->ifnum,
buffer, 0x01, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "simple usb_control_msg failed %d\n", rv);
goto exit;
} else if (rv != 1) {
dev_warn(dev, "simple usb_control_msg returned %d\n", rv);
rv = -EIO;
goto exit;
}
if (buffer[0] != USBTMC_STATUS_SUCCESS) {
dev_err(dev, "simple control status returned %x\n", buffer[0]);
rv = -EIO;
goto exit;
}
rv = 0;
exit:
kfree(buffer);
return rv;
}
/*
* Sends a TRIGGER Bulk-OUT command message
* See the USBTMC-USB488 specification, Table 2.
*
* Also updates bTag_last_write.
*/
static int usbtmc488_ioctl_trigger(struct usbtmc_file_data *file_data)
{
struct usbtmc_device_data *data = file_data->data;
int retval;
u8 *buffer;
int actual;
buffer = kzalloc(USBTMC_HEADER_SIZE, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
buffer[0] = 128;
buffer[1] = data->bTag;
buffer[2] = ~data->bTag;
retval = usb_bulk_msg(data->usb_dev,
usb_sndbulkpipe(data->usb_dev,
data->bulk_out),
buffer, USBTMC_HEADER_SIZE,
&actual, file_data->timeout);
/* Store bTag (in case we need to abort) */
data->bTag_last_write = data->bTag;
/* Increment bTag -- and increment again if zero */
data->bTag++;
if (!data->bTag)
data->bTag++;
kfree(buffer);
if (retval < 0) {
dev_err(&data->intf->dev, "%s returned %d\n",
__func__, retval);
return retval;
}
return 0;
}
static struct urb *usbtmc_create_urb(void)
{
const size_t bufsize = USBTMC_BUFSIZE;
u8 *dmabuf = NULL;
struct urb *urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
return NULL;
dmabuf = kmalloc(bufsize, GFP_KERNEL);
if (!dmabuf) {
usb_free_urb(urb);
return NULL;
}
urb->transfer_buffer = dmabuf;
urb->transfer_buffer_length = bufsize;
urb->transfer_flags |= URB_FREE_BUFFER;
return urb;
}
static void usbtmc_read_bulk_cb(struct urb *urb)
{
struct usbtmc_file_data *file_data = urb->context;
int status = urb->status;
unsigned long flags;
/* sync/async unlink faults aren't errors */
if (status) {
if (!(/* status == -ENOENT || */
status == -ECONNRESET ||
status == -EREMOTEIO || /* Short packet */
status == -ESHUTDOWN))
dev_err(&file_data->data->intf->dev,
"%s - nonzero read bulk status received: %d\n",
__func__, status);
spin_lock_irqsave(&file_data->err_lock, flags);
if (!file_data->in_status)
file_data->in_status = status;
spin_unlock_irqrestore(&file_data->err_lock, flags);
}
spin_lock_irqsave(&file_data->err_lock, flags);
file_data->in_transfer_size += urb->actual_length;
dev_dbg(&file_data->data->intf->dev,
"%s - total size: %u current: %d status: %d\n",
__func__, file_data->in_transfer_size,
urb->actual_length, status);
spin_unlock_irqrestore(&file_data->err_lock, flags);
usb_anchor_urb(urb, &file_data->in_anchor);
wake_up_interruptible(&file_data->wait_bulk_in);
wake_up_interruptible(&file_data->data->waitq);
}
static inline bool usbtmc_do_transfer(struct usbtmc_file_data *file_data)
{
bool data_or_error;
spin_lock_irq(&file_data->err_lock);
data_or_error = !usb_anchor_empty(&file_data->in_anchor)
|| file_data->in_status;
spin_unlock_irq(&file_data->err_lock);
dev_dbg(&file_data->data->intf->dev, "%s: returns %d\n", __func__,
data_or_error);
return data_or_error;
}
static ssize_t usbtmc_generic_read(struct usbtmc_file_data *file_data,
void __user *user_buffer,
u32 transfer_size,
u32 *transferred,
u32 flags)
{
struct usbtmc_device_data *data = file_data->data;
struct device *dev = &data->intf->dev;
u32 done = 0;
u32 remaining;
const u32 bufsize = USBTMC_BUFSIZE;
int retval = 0;
u32 max_transfer_size;
unsigned long expire;
int bufcount = 1;
int again = 0;
/* mutex already locked */
*transferred = done;
max_transfer_size = transfer_size;
if (flags & USBTMC_FLAG_IGNORE_TRAILER) {
/* The device may send extra alignment bytes (up to
* wMaxPacketSize – 1) to avoid sending a zero-length
* packet
*/
remaining = transfer_size;
if ((max_transfer_size % data->wMaxPacketSize) == 0)
max_transfer_size += (data->wMaxPacketSize - 1);
} else {
/* round down to bufsize to avoid truncated data left */
if (max_transfer_size > bufsize) {
max_transfer_size =
roundup(max_transfer_size + 1 - bufsize,
bufsize);
}
remaining = max_transfer_size;
}
spin_lock_irq(&file_data->err_lock);
if (file_data->in_status) {
/* return the very first error */
retval = file_data->in_status;
spin_unlock_irq(&file_data->err_lock);
goto error;
}
if (flags & USBTMC_FLAG_ASYNC) {
if (usb_anchor_empty(&file_data->in_anchor))
again = 1;
if (file_data->in_urbs_used == 0) {
file_data->in_transfer_size = 0;
file_data->in_status = 0;
}
} else {
file_data->in_transfer_size = 0;
file_data->in_status = 0;
}
if (max_transfer_size == 0) {
bufcount = 0;
} else {
bufcount = roundup(max_transfer_size, bufsize) / bufsize;
if (bufcount > file_data->in_urbs_used)
bufcount -= file_data->in_urbs_used;
else
bufcount = 0;
if (bufcount + file_data->in_urbs_used > MAX_URBS_IN_FLIGHT) {
bufcount = MAX_URBS_IN_FLIGHT -
file_data->in_urbs_used;
}
}
spin_unlock_irq(&file_data->err_lock);
dev_dbg(dev, "%s: requested=%u flags=0x%X size=%u bufs=%d used=%d\n",
__func__, transfer_size, flags,
max_transfer_size, bufcount, file_data->in_urbs_used);
while (bufcount > 0) {
u8 *dmabuf = NULL;
struct urb *urb = usbtmc_create_urb();
if (!urb) {
retval = -ENOMEM;
goto error;
}
dmabuf = urb->transfer_buffer;
usb_fill_bulk_urb(urb, data->usb_dev,
usb_rcvbulkpipe(data->usb_dev, data->bulk_in),
dmabuf, bufsize,
usbtmc_read_bulk_cb, file_data);
usb_anchor_urb(urb, &file_data->submitted);
retval = usb_submit_urb(urb, GFP_KERNEL);
/* urb is anchored. We can release our reference. */
usb_free_urb(urb);
if (unlikely(retval)) {
usb_unanchor_urb(urb);
goto error;
}
file_data->in_urbs_used++;
bufcount--;
}
if (again) {
dev_dbg(dev, "%s: ret=again\n", __func__);
return -EAGAIN;
}
if (user_buffer == NULL)
return -EINVAL;
expire = msecs_to_jiffies(file_data->timeout);
while (max_transfer_size > 0) {
u32 this_part;
struct urb *urb = NULL;
if (!(flags & USBTMC_FLAG_ASYNC)) {
dev_dbg(dev, "%s: before wait time %lu\n",
__func__, expire);
retval = wait_event_interruptible_timeout(
file_data->wait_bulk_in,
usbtmc_do_transfer(file_data),
expire);
dev_dbg(dev, "%s: wait returned %d\n",
__func__, retval);
if (retval <= 0) {
if (retval == 0)
retval = -ETIMEDOUT;
goto error;
}
}
urb = usb_get_from_anchor(&file_data->in_anchor);
if (!urb) {
if (!(flags & USBTMC_FLAG_ASYNC)) {
/* synchronous case: must not happen */
retval = -EFAULT;
goto error;
}
/* asynchronous case: ready, do not block or wait */
*transferred = done;
dev_dbg(dev, "%s: (async) done=%u ret=0\n",
__func__, done);
return 0;
}
file_data->in_urbs_used--;
if (max_transfer_size > urb->actual_length)
max_transfer_size -= urb->actual_length;
else
max_transfer_size = 0;
if (remaining > urb->actual_length)
this_part = urb->actual_length;
else
this_part = remaining;
print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE, 16, 1,
urb->transfer_buffer, urb->actual_length, true);
if (copy_to_user(user_buffer + done,
urb->transfer_buffer, this_part)) {
usb_free_urb(urb);
retval = -EFAULT;
goto error;
}
remaining -= this_part;
done += this_part;
spin_lock_irq(&file_data->err_lock);
if (urb->status) {
/* return the very first error */
retval = file_data->in_status;
spin_unlock_irq(&file_data->err_lock);
usb_free_urb(urb);
goto error;
}
spin_unlock_irq(&file_data->err_lock);
if (urb->actual_length < bufsize) {
/* short packet or ZLP received => ready */
usb_free_urb(urb);
retval = 1;
break;
}
if (!(flags & USBTMC_FLAG_ASYNC) &&
max_transfer_size > (bufsize * file_data->in_urbs_used)) {
/* resubmit, since other buffers still not enough */
usb_anchor_urb(urb, &file_data->submitted);
retval = usb_submit_urb(urb, GFP_KERNEL);
if (unlikely(retval)) {
usb_unanchor_urb(urb);
usb_free_urb(urb);
goto error;
}
file_data->in_urbs_used++;
}
usb_free_urb(urb);
retval = 0;
}
error:
*transferred = done;
dev_dbg(dev, "%s: before kill\n", __func__);
/* Attention: killing urbs can take long time (2 ms) */
usb_kill_anchored_urbs(&file_data->submitted);
dev_dbg(dev, "%s: after kill\n", __func__);
usb_scuttle_anchored_urbs(&file_data->in_anchor);
file_data->in_urbs_used = 0;
file_data->in_status = 0; /* no spinlock needed here */
dev_dbg(dev, "%s: done=%u ret=%d\n", __func__, done, retval);
return retval;
}
static ssize_t usbtmc_ioctl_generic_read(struct usbtmc_file_data *file_data,
void __user *arg)
{
struct usbtmc_message msg;
ssize_t retval = 0;
/* mutex already locked */
if (copy_from_user(&msg, arg, sizeof(struct usbtmc_message)))
return -EFAULT;
retval = usbtmc_generic_read(file_data, msg.message,
msg.transfer_size, &msg.transferred,
msg.flags);
if (put_user(msg.transferred,
&((struct usbtmc_message __user *)arg)->transferred))
return -EFAULT;
return retval;
}
static void usbtmc_write_bulk_cb(struct urb *urb)
{
struct usbtmc_file_data *file_data = urb->context;
int wakeup = 0;
unsigned long flags;
spin_lock_irqsave(&file_data->err_lock, flags);
file_data->out_transfer_size += urb->actual_length;
/* sync/async unlink faults aren't errors */
if (urb->status) {
if (!(urb->status == -ENOENT ||
urb->status == -ECONNRESET ||
urb->status == -ESHUTDOWN))
dev_err(&file_data->data->intf->dev,
"%s - nonzero write bulk status received: %d\n",
__func__, urb->status);
if (!file_data->out_status) {
file_data->out_status = urb->status;
wakeup = 1;
}
}
spin_unlock_irqrestore(&file_data->err_lock, flags);
dev_dbg(&file_data->data->intf->dev,
"%s - write bulk total size: %u\n",
__func__, file_data->out_transfer_size);
up(&file_data->limit_write_sem);
if (usb_anchor_empty(&file_data->submitted) || wakeup)
wake_up_interruptible(&file_data->data->waitq);
}
static ssize_t usbtmc_generic_write(struct usbtmc_file_data *file_data,
const void __user *user_buffer,
u32 transfer_size,
u32 *transferred,
u32 flags)
{
struct usbtmc_device_data *data = file_data->data;
struct device *dev;
u32 done = 0;
u32 remaining;
unsigned long expire;
const u32 bufsize = USBTMC_BUFSIZE;
struct urb *urb = NULL;
int retval = 0;
u32 timeout;
*transferred = 0;
/* Get pointer to private data structure */
dev = &data->intf->dev;
dev_dbg(dev, "%s: size=%u flags=0x%X sema=%u\n",
__func__, transfer_size, flags,
file_data->limit_write_sem.count);
if (flags & USBTMC_FLAG_APPEND) {
spin_lock_irq(&file_data->err_lock);
retval = file_data->out_status;
spin_unlock_irq(&file_data->err_lock);
if (retval < 0)
return retval;
} else {
spin_lock_irq(&file_data->err_lock);
file_data->out_transfer_size = 0;
file_data->out_status = 0;
spin_unlock_irq(&file_data->err_lock);
}
remaining = transfer_size;
if (remaining > INT_MAX)
remaining = INT_MAX;
timeout = file_data->timeout;
expire = msecs_to_jiffies(timeout);
while (remaining > 0) {
u32 this_part, aligned;
u8 *buffer = NULL;
if (flags & USBTMC_FLAG_ASYNC) {
if (down_trylock(&file_data->limit_write_sem)) {
retval = (done)?(0):(-EAGAIN);
goto exit;
}
} else {
retval = down_timeout(&file_data->limit_write_sem,
expire);
if (retval < 0) {
retval = -ETIMEDOUT;
goto error;
}
}
spin_lock_irq(&file_data->err_lock);
retval = file_data->out_status;
spin_unlock_irq(&file_data->err_lock);
if (retval < 0) {
up(&file_data->limit_write_sem);
goto error;
}
/* prepare next urb to send */
urb = usbtmc_create_urb();
if (!urb) {
retval = -ENOMEM;
up(&file_data->limit_write_sem);
goto error;
}
buffer = urb->transfer_buffer;
if (remaining > bufsize)
this_part = bufsize;
else
this_part = remaining;
if (copy_from_user(buffer, user_buffer + done, this_part)) {
retval = -EFAULT;
up(&file_data->limit_write_sem);
goto error;
}
print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE,
16, 1, buffer, this_part, true);
/* fill bulk with 32 bit alignment to meet USBTMC specification
* (size + 3 & ~3) rounds up and simplifies user code
*/
aligned = (this_part + 3) & ~3;
dev_dbg(dev, "write(size:%u align:%u done:%u)\n",
(unsigned int)this_part,
(unsigned int)aligned,
(unsigned int)done);
usb_fill_bulk_urb(urb, data->usb_dev,
usb_sndbulkpipe(data->usb_dev, data->bulk_out),
urb->transfer_buffer, aligned,
usbtmc_write_bulk_cb, file_data);
usb_anchor_urb(urb, &file_data->submitted);
retval = usb_submit_urb(urb, GFP_KERNEL);
if (unlikely(retval)) {
usb_unanchor_urb(urb);
up(&file_data->limit_write_sem);
goto error;
}
usb_free_urb(urb);
urb = NULL; /* urb will be finally released by usb driver */
remaining -= this_part;
done += this_part;
}
/* All urbs are on the fly */
if (!(flags & USBTMC_FLAG_ASYNC)) {
if (!usb_wait_anchor_empty_timeout(&file_data->submitted,
timeout)) {
retval = -ETIMEDOUT;
goto error;
}
}
retval = 0;
goto exit;
error:
usb_kill_anchored_urbs(&file_data->submitted);
exit:
usb_free_urb(urb);
spin_lock_irq(&file_data->err_lock);
if (!(flags & USBTMC_FLAG_ASYNC))
done = file_data->out_transfer_size;
if (!retval && file_data->out_status)
retval = file_data->out_status;
spin_unlock_irq(&file_data->err_lock);
*transferred = done;
dev_dbg(dev, "%s: done=%u, retval=%d, urbstat=%d\n",
__func__, done, retval, file_data->out_status);
return retval;
}
static ssize_t usbtmc_ioctl_generic_write(struct usbtmc_file_data *file_data,
void __user *arg)
{
struct usbtmc_message msg;
ssize_t retval = 0;
/* mutex already locked */
if (copy_from_user(&msg, arg, sizeof(struct usbtmc_message)))
return -EFAULT;
retval = usbtmc_generic_write(file_data, msg.message,
msg.transfer_size, &msg.transferred,
msg.flags);
if (put_user(msg.transferred,
&((struct usbtmc_message __user *)arg)->transferred))
return -EFAULT;
return retval;
}
/*
* Get the generic write result
*/
static ssize_t usbtmc_ioctl_write_result(struct usbtmc_file_data *file_data,
void __user *arg)
{
u32 transferred;
int retval;
spin_lock_irq(&file_data->err_lock);
transferred = file_data->out_transfer_size;
retval = file_data->out_status;
spin_unlock_irq(&file_data->err_lock);
if (put_user(transferred, (__u32 __user *)arg))
return -EFAULT;
return retval;
}
/*
* Sends a REQUEST_DEV_DEP_MSG_IN message on the Bulk-OUT endpoint.
* @transfer_size: number of bytes to request from the device.
*
* See the USBTMC specification, Table 4.
*
* Also updates bTag_last_write.
*/
static int send_request_dev_dep_msg_in(struct usbtmc_file_data *file_data,
u32 transfer_size)
{
struct usbtmc_device_data *data = file_data->data;
int retval;
u8 *buffer;
int actual;
buffer = kmalloc(USBTMC_HEADER_SIZE, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
/* Setup IO buffer for REQUEST_DEV_DEP_MSG_IN message
* Refer to class specs for details
*/
buffer[0] = 2;
buffer[1] = data->bTag;
buffer[2] = ~data->bTag;
buffer[3] = 0; /* Reserved */
buffer[4] = transfer_size >> 0;
buffer[5] = transfer_size >> 8;
buffer[6] = transfer_size >> 16;
buffer[7] = transfer_size >> 24;
buffer[8] = file_data->term_char_enabled * 2;
/* Use term character? */
buffer[9] = file_data->term_char;
buffer[10] = 0; /* Reserved */
buffer[11] = 0; /* Reserved */
/* Send bulk URB */
retval = usb_bulk_msg(data->usb_dev,
usb_sndbulkpipe(data->usb_dev,
data->bulk_out),
buffer, USBTMC_HEADER_SIZE,
&actual, file_data->timeout);
/* Store bTag (in case we need to abort) */
data->bTag_last_write = data->bTag;
/* Increment bTag -- and increment again if zero */
data->bTag++;
if (!data->bTag)
data->bTag++;
kfree(buffer);
if (retval < 0)
dev_err(&data->intf->dev, "%s returned %d\n",
__func__, retval);
return retval;
}
static ssize_t usbtmc_read(struct file *filp, char __user *buf,
size_t count, loff_t *f_pos)
{
struct usbtmc_file_data *file_data;
struct usbtmc_device_data *data;
struct device *dev;
const u32 bufsize = USBTMC_BUFSIZE;
u32 n_characters;
u8 *buffer;
int actual;
u32 done = 0;
u32 remaining;
int retval;
/* Get pointer to private data structure */
file_data = filp->private_data;
data = file_data->data;
dev = &data->intf->dev;
buffer = kmalloc(bufsize, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
mutex_lock(&data->io_mutex);
if (data->zombie) {
retval = -ENODEV;
goto exit;
}
if (count > INT_MAX)
count = INT_MAX;
dev_dbg(dev, "%s(count:%zu)\n", __func__, count);
retval = send_request_dev_dep_msg_in(file_data, count);
if (retval < 0) {
if (file_data->auto_abort)
usbtmc_ioctl_abort_bulk_out(data);
goto exit;
}
/* Loop until we have fetched everything we requested */
remaining = count;
actual = 0;
/* Send bulk URB */
retval = usb_bulk_msg(data->usb_dev,
usb_rcvbulkpipe(data->usb_dev,
data->bulk_in),
buffer, bufsize, &actual,
file_data->timeout);
dev_dbg(dev, "%s: bulk_msg retval(%u), actual(%d)\n",
__func__, retval, actual);
/* Store bTag (in case we need to abort) */
data->bTag_last_read = data->bTag;
if (retval < 0) {
if (file_data->auto_abort)
usbtmc_ioctl_abort_bulk_in(data);
goto exit;
}
/* Sanity checks for the header */
if (actual < USBTMC_HEADER_SIZE) {
dev_err(dev, "Device sent too small first packet: %u < %u\n",
actual, USBTMC_HEADER_SIZE);
if (file_data->auto_abort)
usbtmc_ioctl_abort_bulk_in(data);
goto exit;
}
if (buffer[0] != 2) {
dev_err(dev, "Device sent reply with wrong MsgID: %u != 2\n",
buffer[0]);
if (file_data->auto_abort)
usbtmc_ioctl_abort_bulk_in(data);
goto exit;
}
if (buffer[1] != data->bTag_last_write) {
dev_err(dev, "Device sent reply with wrong bTag: %u != %u\n",
buffer[1], data->bTag_last_write);
if (file_data->auto_abort)
usbtmc_ioctl_abort_bulk_in(data);
goto exit;
}
/* How many characters did the instrument send? */
n_characters = buffer[4] +
(buffer[5] << 8) +
(buffer[6] << 16) +
(buffer[7] << 24);
file_data->bmTransferAttributes = buffer[8];
dev_dbg(dev, "Bulk-IN header: N_characters(%u), bTransAttr(%u)\n",
n_characters, buffer[8]);
if (n_characters > remaining) {
dev_err(dev, "Device wants to return more data than requested: %u > %zu\n",
n_characters, count);
if (file_data->auto_abort)
usbtmc_ioctl_abort_bulk_in(data);
goto exit;
}
print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE,
16, 1, buffer, actual, true);
remaining = n_characters;
/* Remove the USBTMC header */
actual -= USBTMC_HEADER_SIZE;
/* Remove padding if it exists */
if (actual > remaining)
actual = remaining;
remaining -= actual;
/* Copy buffer to user space */
if (copy_to_user(buf, &buffer[USBTMC_HEADER_SIZE], actual)) {
/* There must have been an addressing problem */
retval = -EFAULT;
goto exit;
}
if ((actual + USBTMC_HEADER_SIZE) == bufsize) {
retval = usbtmc_generic_read(file_data, buf + actual,
remaining,
&done,
USBTMC_FLAG_IGNORE_TRAILER);
if (retval < 0)
goto exit;
}
done += actual;
/* Update file position value */
*f_pos = *f_pos + done;
retval = done;
exit:
mutex_unlock(&data->io_mutex);
kfree(buffer);
return retval;
}
static ssize_t usbtmc_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos)
{
struct usbtmc_file_data *file_data;
struct usbtmc_device_data *data;
struct urb *urb = NULL;
ssize_t retval = 0;
u8 *buffer;
u32 remaining, done;
u32 transfersize, aligned, buflen;
file_data = filp->private_data;
data = file_data->data;
mutex_lock(&data->io_mutex);
if (data->zombie) {
retval = -ENODEV;
goto exit;
}
done = 0;
spin_lock_irq(&file_data->err_lock);
file_data->out_transfer_size = 0;
file_data->out_status = 0;
spin_unlock_irq(&file_data->err_lock);
if (!count)
goto exit;
if (down_trylock(&file_data->limit_write_sem)) {
/* previous calls were async */
retval = -EBUSY;
goto exit;
}
urb = usbtmc_create_urb();
if (!urb) {
retval = -ENOMEM;
up(&file_data->limit_write_sem);
goto exit;
}
buffer = urb->transfer_buffer;
buflen = urb->transfer_buffer_length;
if (count > INT_MAX) {
transfersize = INT_MAX;
buffer[8] = 0;
} else {
transfersize = count;
buffer[8] = file_data->eom_val;
}
/* Setup IO buffer for DEV_DEP_MSG_OUT message */
buffer[0] = 1;
buffer[1] = data->bTag;
buffer[2] = ~data->bTag;
buffer[3] = 0; /* Reserved */
buffer[4] = transfersize >> 0;
buffer[5] = transfersize >> 8;
buffer[6] = transfersize >> 16;
buffer[7] = transfersize >> 24;
/* buffer[8] is set above... */
buffer[9] = 0; /* Reserved */
buffer[10] = 0; /* Reserved */
buffer[11] = 0; /* Reserved */
remaining = transfersize;
if (transfersize + USBTMC_HEADER_SIZE > buflen) {
transfersize = buflen - USBTMC_HEADER_SIZE;
aligned = buflen;
} else {
aligned = (transfersize + (USBTMC_HEADER_SIZE + 3)) & ~3;
}
if (copy_from_user(&buffer[USBTMC_HEADER_SIZE], buf, transfersize)) {
retval = -EFAULT;
up(&file_data->limit_write_sem);
goto exit;
}
dev_dbg(&data->intf->dev, "%s(size:%u align:%u)\n", __func__,
(unsigned int)transfersize, (unsigned int)aligned);
print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE,
16, 1, buffer, aligned, true);
usb_fill_bulk_urb(urb, data->usb_dev,
usb_sndbulkpipe(data->usb_dev, data->bulk_out),
urb->transfer_buffer, aligned,
usbtmc_write_bulk_cb, file_data);
usb_anchor_urb(urb, &file_data->submitted);
retval = usb_submit_urb(urb, GFP_KERNEL);
if (unlikely(retval)) {
usb_unanchor_urb(urb);
up(&file_data->limit_write_sem);
goto exit;
}
remaining -= transfersize;
data->bTag_last_write = data->bTag;
data->bTag++;
if (!data->bTag)
data->bTag++;
/* call generic_write even when remaining = 0 */
retval = usbtmc_generic_write(file_data, buf + transfersize, remaining,
&done, USBTMC_FLAG_APPEND);
/* truncate alignment bytes */
if (done > remaining)
done = remaining;
/*add size of first urb*/
done += transfersize;
if (retval < 0) {
usb_kill_anchored_urbs(&file_data->submitted);
dev_err(&data->intf->dev,
"Unable to send data, error %d\n", (int)retval);
if (file_data->auto_abort)
usbtmc_ioctl_abort_bulk_out(data);
goto exit;
}
retval = done;
exit:
usb_free_urb(urb);
mutex_unlock(&data->io_mutex);
return retval;
}
static int usbtmc_ioctl_clear(struct usbtmc_device_data *data)
{
struct device *dev;
u8 *buffer;
int rv;
int n;
int actual = 0;
dev = &data->intf->dev;
dev_dbg(dev, "Sending INITIATE_CLEAR request\n");
buffer = kmalloc(USBTMC_BUFSIZE, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
rv = usb_control_msg(data->usb_dev,
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_INITIATE_CLEAR,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, 0, buffer, 1, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto exit;
}
dev_dbg(dev, "INITIATE_CLEAR returned %x\n", buffer[0]);
if (buffer[0] != USBTMC_STATUS_SUCCESS) {
dev_err(dev, "INITIATE_CLEAR returned %x\n", buffer[0]);
rv = -EPERM;
goto exit;
}
n = 0;
usbtmc_clear_check_status:
dev_dbg(dev, "Sending CHECK_CLEAR_STATUS request\n");
rv = usb_control_msg(data->usb_dev,
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_CHECK_CLEAR_STATUS,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, 0, buffer, 2, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto exit;
}
dev_dbg(dev, "CHECK_CLEAR_STATUS returned %x\n", buffer[0]);
if (buffer[0] == USBTMC_STATUS_SUCCESS)
goto usbtmc_clear_bulk_out_halt;
if (buffer[0] != USBTMC_STATUS_PENDING) {
dev_err(dev, "CHECK_CLEAR_STATUS returned %x\n", buffer[0]);
rv = -EPERM;
goto exit;
}
if ((buffer[1] & 1) != 0) {
do {
dev_dbg(dev, "Reading from bulk in EP\n");
actual = 0;
rv = usb_bulk_msg(data->usb_dev,
usb_rcvbulkpipe(data->usb_dev,
data->bulk_in),
buffer, USBTMC_BUFSIZE,
&actual, USB_CTRL_GET_TIMEOUT);
print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE,
16, 1, buffer, actual, true);
n++;
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n",
rv);
goto exit;
}
} while ((actual == USBTMC_BUFSIZE) &&
(n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN));
} else {
/* do not stress device with subsequent requests */
msleep(50);
n++;
}
if (n >= USBTMC_MAX_READS_TO_CLEAR_BULK_IN) {
dev_err(dev, "Couldn't clear device buffer within %d cycles\n",
USBTMC_MAX_READS_TO_CLEAR_BULK_IN);
rv = -EPERM;
goto exit;
}
goto usbtmc_clear_check_status;
usbtmc_clear_bulk_out_halt:
rv = usb_clear_halt(data->usb_dev,
usb_sndbulkpipe(data->usb_dev, data->bulk_out));
if (rv < 0) {
dev_err(dev, "usb_clear_halt returned %d\n", rv);
goto exit;
}
rv = 0;
exit:
kfree(buffer);
return rv;
}
static int usbtmc_ioctl_clear_out_halt(struct usbtmc_device_data *data)
{
int rv;
rv = usb_clear_halt(data->usb_dev,
usb_sndbulkpipe(data->usb_dev, data->bulk_out));
if (rv < 0)
dev_err(&data->usb_dev->dev, "%s returned %d\n", __func__, rv);
return rv;
}
static int usbtmc_ioctl_clear_in_halt(struct usbtmc_device_data *data)
{
int rv;
rv = usb_clear_halt(data->usb_dev,
usb_rcvbulkpipe(data->usb_dev, data->bulk_in));
if (rv < 0)
dev_err(&data->usb_dev->dev, "%s returned %d\n", __func__, rv);
return rv;
}
static int usbtmc_ioctl_cancel_io(struct usbtmc_file_data *file_data)
{
spin_lock_irq(&file_data->err_lock);
file_data->in_status = -ECANCELED;
file_data->out_status = -ECANCELED;
spin_unlock_irq(&file_data->err_lock);
usb_kill_anchored_urbs(&file_data->submitted);
return 0;
}
static int usbtmc_ioctl_cleanup_io(struct usbtmc_file_data *file_data)
{
usb_kill_anchored_urbs(&file_data->submitted);
usb_scuttle_anchored_urbs(&file_data->in_anchor);
spin_lock_irq(&file_data->err_lock);
file_data->in_status = 0;
file_data->in_transfer_size = 0;
file_data->out_status = 0;
file_data->out_transfer_size = 0;
spin_unlock_irq(&file_data->err_lock);
file_data->in_urbs_used = 0;
return 0;
}
static int get_capabilities(struct usbtmc_device_data *data)
{
struct device *dev = &data->usb_dev->dev;
char *buffer;
int rv = 0;
buffer = kmalloc(0x18, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
rv = usb_control_msg(data->usb_dev, usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_GET_CAPABILITIES,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, 0, buffer, 0x18, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto err_out;
}
dev_dbg(dev, "GET_CAPABILITIES returned %x\n", buffer[0]);
if (buffer[0] != USBTMC_STATUS_SUCCESS) {
dev_err(dev, "GET_CAPABILITIES returned %x\n", buffer[0]);
rv = -EPERM;
goto err_out;
}
dev_dbg(dev, "Interface capabilities are %x\n", buffer[4]);
dev_dbg(dev, "Device capabilities are %x\n", buffer[5]);
dev_dbg(dev, "USB488 interface capabilities are %x\n", buffer[14]);
dev_dbg(dev, "USB488 device capabilities are %x\n", buffer[15]);
data->capabilities.interface_capabilities = buffer[4];
data->capabilities.device_capabilities = buffer[5];
data->capabilities.usb488_interface_capabilities = buffer[14];
data->capabilities.usb488_device_capabilities = buffer[15];
data->usb488_caps = (buffer[14] & 0x07) | ((buffer[15] & 0x0f) << 4);
rv = 0;
err_out:
kfree(buffer);
return rv;
}
#define capability_attribute(name) \
static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
struct usbtmc_device_data *data = usb_get_intfdata(intf); \
\
return sprintf(buf, "%d\n", data->capabilities.name); \
} \
static DEVICE_ATTR_RO(name)
capability_attribute(interface_capabilities);
capability_attribute(device_capabilities);
capability_attribute(usb488_interface_capabilities);
capability_attribute(usb488_device_capabilities);
static struct attribute *usbtmc_attrs[] = {
&dev_attr_interface_capabilities.attr,
&dev_attr_device_capabilities.attr,
&dev_attr_usb488_interface_capabilities.attr,
&dev_attr_usb488_device_capabilities.attr,
NULL,
};
ATTRIBUTE_GROUPS(usbtmc);
static int usbtmc_ioctl_indicator_pulse(struct usbtmc_device_data *data)
{
struct device *dev;
u8 *buffer;
int rv;
dev = &data->intf->dev;
buffer = kmalloc(2, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
rv = usb_control_msg(data->usb_dev,
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_INDICATOR_PULSE,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, 0, buffer, 0x01, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto exit;
}
dev_dbg(dev, "INDICATOR_PULSE returned %x\n", buffer[0]);
if (buffer[0] != USBTMC_STATUS_SUCCESS) {
dev_err(dev, "INDICATOR_PULSE returned %x\n", buffer[0]);
rv = -EPERM;
goto exit;
}
rv = 0;
exit:
kfree(buffer);
return rv;
}
static int usbtmc_ioctl_request(struct usbtmc_device_data *data,
void __user *arg)
{
struct device *dev = &data->intf->dev;
struct usbtmc_ctrlrequest request;
u8 *buffer = NULL;
int rv;
unsigned int is_in, pipe;
unsigned long res;
res = copy_from_user(&request, arg, sizeof(struct usbtmc_ctrlrequest));
if (res)
return -EFAULT;
if (request.req.wLength > USBTMC_BUFSIZE)
return -EMSGSIZE;
if (request.req.wLength == 0) /* Length-0 requests are never IN */
request.req.bRequestType &= ~USB_DIR_IN;
is_in = request.req.bRequestType & USB_DIR_IN;
if (request.req.wLength) {
buffer = kmalloc(request.req.wLength, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
if (!is_in) {
/* Send control data to device */
res = copy_from_user(buffer, request.data,
request.req.wLength);
if (res) {
rv = -EFAULT;
goto exit;
}
}
}
if (is_in)
pipe = usb_rcvctrlpipe(data->usb_dev, 0);
else
pipe = usb_sndctrlpipe(data->usb_dev, 0);
rv = usb_control_msg(data->usb_dev,
pipe,
request.req.bRequest,
request.req.bRequestType,
request.req.wValue,
request.req.wIndex,
buffer, request.req.wLength, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "%s failed %d\n", __func__, rv);
goto exit;
}
if (rv && is_in) {
/* Read control data from device */
res = copy_to_user(request.data, buffer, rv);
if (res)
rv = -EFAULT;
}
exit:
kfree(buffer);
return rv;
}
/*
* Get the usb timeout value
*/
static int usbtmc_ioctl_get_timeout(struct usbtmc_file_data *file_data,
void __user *arg)
{
u32 timeout;
timeout = file_data->timeout;
return put_user(timeout, (__u32 __user *)arg);
}
/*
* Set the usb timeout value
*/
static int usbtmc_ioctl_set_timeout(struct usbtmc_file_data *file_data,
void __user *arg)
{
u32 timeout;
if (get_user(timeout, (__u32 __user *)arg))
return -EFAULT;
/* Note that timeout = 0 means
* MAX_SCHEDULE_TIMEOUT in usb_control_msg
*/
if (timeout < USBTMC_MIN_TIMEOUT)
return -EINVAL;
file_data->timeout = timeout;
return 0;
}
/*
* enables/disables sending EOM on write
*/
static int usbtmc_ioctl_eom_enable(struct usbtmc_file_data *file_data,
void __user *arg)
{
u8 eom_enable;
if (copy_from_user(&eom_enable, arg, sizeof(eom_enable)))
return -EFAULT;
if (eom_enable > 1)
return -EINVAL;
file_data->eom_val = eom_enable;
return 0;
}
/*
* Configure termination character for read()
*/
static int usbtmc_ioctl_config_termc(struct usbtmc_file_data *file_data,
void __user *arg)
{
struct usbtmc_termchar termc;
if (copy_from_user(&termc, arg, sizeof(termc)))
return -EFAULT;
if ((termc.term_char_enabled > 1) ||
(termc.term_char_enabled &&
!(file_data->data->capabilities.device_capabilities & 1)))
return -EINVAL;
file_data->term_char = termc.term_char;
file_data->term_char_enabled = termc.term_char_enabled;
return 0;
}
static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct usbtmc_file_data *file_data;
struct usbtmc_device_data *data;
int retval = -EBADRQC;
__u8 tmp_byte;
file_data = file->private_data;
data = file_data->data;
mutex_lock(&data->io_mutex);
if (data->zombie) {
retval = -ENODEV;
goto skip_io_on_zombie;
}
switch (cmd) {
case USBTMC_IOCTL_CLEAR_OUT_HALT:
retval = usbtmc_ioctl_clear_out_halt(data);
break;
case USBTMC_IOCTL_CLEAR_IN_HALT:
retval = usbtmc_ioctl_clear_in_halt(data);
break;
case USBTMC_IOCTL_INDICATOR_PULSE:
retval = usbtmc_ioctl_indicator_pulse(data);
break;
case USBTMC_IOCTL_CLEAR:
retval = usbtmc_ioctl_clear(data);
break;
case USBTMC_IOCTL_ABORT_BULK_OUT:
retval = usbtmc_ioctl_abort_bulk_out(data);
break;
case USBTMC_IOCTL_ABORT_BULK_IN:
retval = usbtmc_ioctl_abort_bulk_in(data);
break;
case USBTMC_IOCTL_CTRL_REQUEST:
retval = usbtmc_ioctl_request(data, (void __user *)arg);
break;
case USBTMC_IOCTL_GET_TIMEOUT:
retval = usbtmc_ioctl_get_timeout(file_data,
(void __user *)arg);
break;
case USBTMC_IOCTL_SET_TIMEOUT:
retval = usbtmc_ioctl_set_timeout(file_data,
(void __user *)arg);
break;
case USBTMC_IOCTL_EOM_ENABLE:
retval = usbtmc_ioctl_eom_enable(file_data,
(void __user *)arg);
break;
case USBTMC_IOCTL_CONFIG_TERMCHAR:
retval = usbtmc_ioctl_config_termc(file_data,
(void __user *)arg);
break;
case USBTMC_IOCTL_WRITE:
retval = usbtmc_ioctl_generic_write(file_data,
(void __user *)arg);
break;
case USBTMC_IOCTL_READ:
retval = usbtmc_ioctl_generic_read(file_data,
(void __user *)arg);
break;
case USBTMC_IOCTL_WRITE_RESULT:
retval = usbtmc_ioctl_write_result(file_data,
(void __user *)arg);
break;
case USBTMC_IOCTL_API_VERSION:
retval = put_user(USBTMC_API_VERSION,
(__u32 __user *)arg);
break;
case USBTMC488_IOCTL_GET_CAPS:
retval = put_user(data->usb488_caps,
(unsigned char __user *)arg);
break;
case USBTMC488_IOCTL_READ_STB:
retval = usbtmc488_ioctl_read_stb(file_data,
(void __user *)arg);
break;
case USBTMC488_IOCTL_REN_CONTROL:
retval = usbtmc488_ioctl_simple(data, (void __user *)arg,
USBTMC488_REQUEST_REN_CONTROL);
break;
case USBTMC488_IOCTL_GOTO_LOCAL:
retval = usbtmc488_ioctl_simple(data, (void __user *)arg,
USBTMC488_REQUEST_GOTO_LOCAL);
break;
case USBTMC488_IOCTL_LOCAL_LOCKOUT:
retval = usbtmc488_ioctl_simple(data, (void __user *)arg,
USBTMC488_REQUEST_LOCAL_LOCKOUT);
break;
case USBTMC488_IOCTL_TRIGGER:
retval = usbtmc488_ioctl_trigger(file_data);
break;
case USBTMC488_IOCTL_WAIT_SRQ:
retval = usbtmc488_ioctl_wait_srq(file_data,
(__u32 __user *)arg);
break;
case USBTMC_IOCTL_MSG_IN_ATTR:
retval = put_user(file_data->bmTransferAttributes,
(__u8 __user *)arg);
break;
case USBTMC_IOCTL_AUTO_ABORT:
retval = get_user(tmp_byte, (unsigned char __user *)arg);
if (retval == 0)
file_data->auto_abort = !!tmp_byte;
break;
case USBTMC_IOCTL_GET_STB:
retval = usbtmc_get_stb(file_data, &tmp_byte);
if (retval > 0)
retval = put_user(tmp_byte, (__u8 __user *)arg);
break;
case USBTMC_IOCTL_GET_SRQ_STB:
retval = usbtmc_ioctl_get_srq_stb(file_data,
(void __user *)arg);
break;
case USBTMC_IOCTL_CANCEL_IO:
retval = usbtmc_ioctl_cancel_io(file_data);
break;
case USBTMC_IOCTL_CLEANUP_IO:
retval = usbtmc_ioctl_cleanup_io(file_data);
break;
}
skip_io_on_zombie:
mutex_unlock(&data->io_mutex);
return retval;
}
static int usbtmc_fasync(int fd, struct file *file, int on)
{
struct usbtmc_file_data *file_data = file->private_data;
return fasync_helper(fd, file, on, &file_data->data->fasync);
}
static __poll_t usbtmc_poll(struct file *file, poll_table *wait)
{
struct usbtmc_file_data *file_data = file->private_data;
struct usbtmc_device_data *data = file_data->data;
__poll_t mask;
mutex_lock(&data->io_mutex);
if (data->zombie) {
mask = EPOLLHUP | EPOLLERR;
goto no_poll;
}
poll_wait(file, &data->waitq, wait);
/* Note that EPOLLPRI is now assigned to SRQ, and
* EPOLLIN|EPOLLRDNORM to normal read data.
*/
mask = 0;
if (atomic_read(&file_data->srq_asserted))
mask |= EPOLLPRI;
/* Note that the anchor submitted includes all urbs for BULK IN
* and OUT. So EPOLLOUT is signaled when BULK OUT is empty and
* all BULK IN urbs are completed and moved to in_anchor.
*/
if (usb_anchor_empty(&file_data->submitted))
mask |= (EPOLLOUT | EPOLLWRNORM);
if (!usb_anchor_empty(&file_data->in_anchor))
mask |= (EPOLLIN | EPOLLRDNORM);
spin_lock_irq(&file_data->err_lock);
if (file_data->in_status || file_data->out_status)
mask |= EPOLLERR;
spin_unlock_irq(&file_data->err_lock);
dev_dbg(&data->intf->dev, "poll mask = %x\n", mask);
no_poll:
mutex_unlock(&data->io_mutex);
return mask;
}
static const struct file_operations fops = {
.owner = THIS_MODULE,
.read = usbtmc_read,
.write = usbtmc_write,
.open = usbtmc_open,
.release = usbtmc_release,
.flush = usbtmc_flush,
.unlocked_ioctl = usbtmc_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.fasync = usbtmc_fasync,
.poll = usbtmc_poll,
.llseek = default_llseek,
};
static struct usb_class_driver usbtmc_class = {
.name = "usbtmc%d",
.fops = &fops,
.minor_base = USBTMC_MINOR_BASE,
};
static void usbtmc_interrupt(struct urb *urb)
{
struct usbtmc_device_data *data = urb->context;
struct device *dev = &data->intf->dev;
int status = urb->status;
int rv;
dev_dbg(&data->intf->dev, "int status: %d len %d\n",
status, urb->actual_length);
switch (status) {
case 0: /* SUCCESS */
/* check for valid STB notification */
if (data->iin_buffer[0] > 0x81) {
data->bNotify1 = data->iin_buffer[0];
data->bNotify2 = data->iin_buffer[1];
atomic_set(&data->iin_data_valid, 1);
wake_up_interruptible(&data->waitq);
goto exit;
}
/* check for SRQ notification */
if (data->iin_buffer[0] == 0x81) {
unsigned long flags;
struct list_head *elem;
if (data->fasync)
kill_fasync(&data->fasync,
SIGIO, POLL_PRI);
spin_lock_irqsave(&data->dev_lock, flags);
list_for_each(elem, &data->file_list) {
struct usbtmc_file_data *file_data;
file_data = list_entry(elem,
struct usbtmc_file_data,
file_elem);
file_data->srq_byte = data->iin_buffer[1];
atomic_set(&file_data->srq_asserted, 1);
}
spin_unlock_irqrestore(&data->dev_lock, flags);
dev_dbg(dev, "srq received bTag %x stb %x\n",
(unsigned int)data->iin_buffer[0],
(unsigned int)data->iin_buffer[1]);
wake_up_interruptible_all(&data->waitq);
goto exit;
}
dev_warn(dev, "invalid notification: %x\n",
data->iin_buffer[0]);
break;
case -EOVERFLOW:
dev_err(dev, "overflow with length %d, actual length is %d\n",
data->iin_wMaxPacketSize, urb->actual_length);
fallthrough;
default:
/* urb terminated, clean up */
dev_dbg(dev, "urb terminated, status: %d\n", status);
return;
}
exit:
rv = usb_submit_urb(urb, GFP_ATOMIC);
if (rv)
dev_err(dev, "usb_submit_urb failed: %d\n", rv);
}
static void usbtmc_free_int(struct usbtmc_device_data *data)
{
if (!data->iin_ep_present || !data->iin_urb)
return;
usb_kill_urb(data->iin_urb);
kfree(data->iin_buffer);
data->iin_buffer = NULL;
usb_free_urb(data->iin_urb);
data->iin_urb = NULL;
kref_put(&data->kref, usbtmc_delete);
}
static int usbtmc_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usbtmc_device_data *data;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *bulk_in, *bulk_out, *int_in;
int retcode;
dev_dbg(&intf->dev, "%s called\n", __func__);
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->intf = intf;
data->id = id;
data->usb_dev = usb_get_dev(interface_to_usbdev(intf));
usb_set_intfdata(intf, data);
kref_init(&data->kref);
mutex_init(&data->io_mutex);
init_waitqueue_head(&data->waitq);
atomic_set(&data->iin_data_valid, 0);
INIT_LIST_HEAD(&data->file_list);
spin_lock_init(&data->dev_lock);
data->zombie = 0;
/* Initialize USBTMC bTag and other fields */
data->bTag = 1;
/* 2 <= bTag <= 127 USBTMC-USB488 subclass specification 4.3.1 */
data->iin_bTag = 2;
/* USBTMC devices have only one setting, so use that */
iface_desc = data->intf->cur_altsetting;
data->ifnum = iface_desc->desc.bInterfaceNumber;
/* Find bulk endpoints */
retcode = usb_find_common_endpoints(iface_desc,
&bulk_in, &bulk_out, NULL, NULL);
if (retcode) {
dev_err(&intf->dev, "bulk endpoints not found\n");
goto err_put;
}
retcode = -EINVAL;
data->bulk_in = bulk_in->bEndpointAddress;
data->wMaxPacketSize = usb_endpoint_maxp(bulk_in);
if (!data->wMaxPacketSize)
goto err_put;
dev_dbg(&intf->dev, "Found bulk in endpoint at %u\n", data->bulk_in);
data->bulk_out = bulk_out->bEndpointAddress;
dev_dbg(&intf->dev, "Found Bulk out endpoint at %u\n", data->bulk_out);
/* Find int endpoint */
retcode = usb_find_int_in_endpoint(iface_desc, &int_in);
if (!retcode) {
data->iin_ep_present = 1;
data->iin_ep = int_in->bEndpointAddress;
data->iin_wMaxPacketSize = usb_endpoint_maxp(int_in);
data->iin_interval = int_in->bInterval;
dev_dbg(&intf->dev, "Found Int in endpoint at %u\n",
data->iin_ep);
}
retcode = get_capabilities(data);
if (retcode)
dev_err(&intf->dev, "can't read capabilities\n");
if (data->iin_ep_present) {
/* allocate int urb */
data->iin_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!data->iin_urb) {
retcode = -ENOMEM;
goto error_register;
}
/* Protect interrupt in endpoint data until iin_urb is freed */
kref_get(&data->kref);
/* allocate buffer for interrupt in */
data->iin_buffer = kmalloc(data->iin_wMaxPacketSize,
GFP_KERNEL);
if (!data->iin_buffer) {
retcode = -ENOMEM;
goto error_register;
}
/* fill interrupt urb */
usb_fill_int_urb(data->iin_urb, data->usb_dev,
usb_rcvintpipe(data->usb_dev, data->iin_ep),
data->iin_buffer, data->iin_wMaxPacketSize,
usbtmc_interrupt,
data, data->iin_interval);
retcode = usb_submit_urb(data->iin_urb, GFP_KERNEL);
if (retcode) {
dev_err(&intf->dev, "Failed to submit iin_urb\n");
goto error_register;
}
}
retcode = usb_register_dev(intf, &usbtmc_class);
if (retcode) {
dev_err(&intf->dev, "Not able to get a minor (base %u, slice default): %d\n",
USBTMC_MINOR_BASE,
retcode);
goto error_register;
}
dev_dbg(&intf->dev, "Using minor number %d\n", intf->minor);
return 0;
error_register:
usbtmc_free_int(data);
err_put:
kref_put(&data->kref, usbtmc_delete);
return retcode;
}
static void usbtmc_disconnect(struct usb_interface *intf)
{
struct usbtmc_device_data *data = usb_get_intfdata(intf);
struct list_head *elem;
usb_deregister_dev(intf, &usbtmc_class);
mutex_lock(&data->io_mutex);
data->zombie = 1;
wake_up_interruptible_all(&data->waitq);
list_for_each(elem, &data->file_list) {
struct usbtmc_file_data *file_data;
file_data = list_entry(elem,
struct usbtmc_file_data,
file_elem);
usb_kill_anchored_urbs(&file_data->submitted);
usb_scuttle_anchored_urbs(&file_data->in_anchor);
}
mutex_unlock(&data->io_mutex);
usbtmc_free_int(data);
kref_put(&data->kref, usbtmc_delete);
}
static void usbtmc_draw_down(struct usbtmc_file_data *file_data)
{
int time;
time = usb_wait_anchor_empty_timeout(&file_data->submitted, 1000);
if (!time)
usb_kill_anchored_urbs(&file_data->submitted);
usb_scuttle_anchored_urbs(&file_data->in_anchor);
}
static int usbtmc_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbtmc_device_data *data = usb_get_intfdata(intf);
struct list_head *elem;
if (!data)
return 0;
mutex_lock(&data->io_mutex);
list_for_each(elem, &data->file_list) {
struct usbtmc_file_data *file_data;
file_data = list_entry(elem,
struct usbtmc_file_data,
file_elem);
usbtmc_draw_down(file_data);
}
if (data->iin_ep_present && data->iin_urb)
usb_kill_urb(data->iin_urb);
mutex_unlock(&data->io_mutex);
return 0;
}
static int usbtmc_resume(struct usb_interface *intf)
{
struct usbtmc_device_data *data = usb_get_intfdata(intf);
int retcode = 0;
if (data->iin_ep_present && data->iin_urb)
retcode = usb_submit_urb(data->iin_urb, GFP_KERNEL);
if (retcode)
dev_err(&intf->dev, "Failed to submit iin_urb\n");
return retcode;
}
static int usbtmc_pre_reset(struct usb_interface *intf)
{
struct usbtmc_device_data *data = usb_get_intfdata(intf);
struct list_head *elem;
if (!data)
return 0;
mutex_lock(&data->io_mutex);
list_for_each(elem, &data->file_list) {
struct usbtmc_file_data *file_data;
file_data = list_entry(elem,
struct usbtmc_file_data,
file_elem);
usbtmc_ioctl_cancel_io(file_data);
}
return 0;
}
static int usbtmc_post_reset(struct usb_interface *intf)
{
struct usbtmc_device_data *data = usb_get_intfdata(intf);
mutex_unlock(&data->io_mutex);
return 0;
}
static struct usb_driver usbtmc_driver = {
.name = "usbtmc",
.id_table = usbtmc_devices,
.probe = usbtmc_probe,
.disconnect = usbtmc_disconnect,
.suspend = usbtmc_suspend,
.resume = usbtmc_resume,
.pre_reset = usbtmc_pre_reset,
.post_reset = usbtmc_post_reset,
.dev_groups = usbtmc_groups,
};
module_usb_driver(usbtmc_driver);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/class/usbtmc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
*
* Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
*
* Authors: Felipe Balbi <[email protected]>,
* Sebastian Andrzej Siewior <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/dma-mapping.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include "debug.h"
#include "core.h"
#include "gadget.h"
#include "io.h"
#define DWC3_ALIGN_FRAME(d, n) (((d)->frame_number + ((d)->interval * (n))) \
& ~((d)->interval - 1))
/**
* dwc3_gadget_set_test_mode - enables usb2 test modes
* @dwc: pointer to our context structure
* @mode: the mode to set (J, K SE0 NAK, Force Enable)
*
* Caller should take care of locking. This function will return 0 on
* success or -EINVAL if wrong Test Selector is passed.
*/
int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
{
u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_TSTCTRL_MASK;
switch (mode) {
case USB_TEST_J:
case USB_TEST_K:
case USB_TEST_SE0_NAK:
case USB_TEST_PACKET:
case USB_TEST_FORCE_ENABLE:
reg |= mode << 1;
break;
default:
return -EINVAL;
}
dwc3_gadget_dctl_write_safe(dwc, reg);
return 0;
}
/**
* dwc3_gadget_get_link_state - gets current state of usb link
* @dwc: pointer to our context structure
*
* Caller should take care of locking. This function will
* return the link state on success (>= 0) or -ETIMEDOUT.
*/
int dwc3_gadget_get_link_state(struct dwc3 *dwc)
{
u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
return DWC3_DSTS_USBLNKST(reg);
}
/**
* dwc3_gadget_set_link_state - sets usb link to a particular state
* @dwc: pointer to our context structure
* @state: the state to put link into
*
* Caller should take care of locking. This function will
* return 0 on success or -ETIMEDOUT.
*/
int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
{
int retries = 10000;
u32 reg;
/*
* Wait until device controller is ready. Only applies to 1.94a and
* later RTL.
*/
if (!DWC3_VER_IS_PRIOR(DWC3, 194A)) {
while (--retries) {
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
if (reg & DWC3_DSTS_DCNRD)
udelay(5);
else
break;
}
if (retries <= 0)
return -ETIMEDOUT;
}
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
/* set no action before sending new link state change */
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
/* set requested state */
reg |= DWC3_DCTL_ULSTCHNGREQ(state);
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
/*
* The following code is racy when called from dwc3_gadget_wakeup,
* and is not needed, at least on newer versions
*/
if (!DWC3_VER_IS_PRIOR(DWC3, 194A))
return 0;
/* wait for a change in DSTS */
retries = 10000;
while (--retries) {
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
if (DWC3_DSTS_USBLNKST(reg) == state)
return 0;
udelay(5);
}
return -ETIMEDOUT;
}
static void dwc3_ep0_reset_state(struct dwc3 *dwc)
{
unsigned int dir;
if (dwc->ep0state != EP0_SETUP_PHASE) {
dir = !!dwc->ep0_expect_in;
if (dwc->ep0state == EP0_DATA_PHASE)
dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
else
dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
dwc->eps[0]->trb_enqueue = 0;
dwc->eps[1]->trb_enqueue = 0;
dwc3_ep0_stall_and_restart(dwc);
}
}
/**
* dwc3_ep_inc_trb - increment a trb index.
* @index: Pointer to the TRB index to increment.
*
* The index should never point to the link TRB. After incrementing,
* if it is point to the link TRB, wrap around to the beginning. The
* link TRB is always at the last TRB entry.
*/
static void dwc3_ep_inc_trb(u8 *index)
{
(*index)++;
if (*index == (DWC3_TRB_NUM - 1))
*index = 0;
}
/**
* dwc3_ep_inc_enq - increment endpoint's enqueue pointer
* @dep: The endpoint whose enqueue pointer we're incrementing
*/
static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
{
dwc3_ep_inc_trb(&dep->trb_enqueue);
}
/**
* dwc3_ep_inc_deq - increment endpoint's dequeue pointer
* @dep: The endpoint whose enqueue pointer we're incrementing
*/
static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
{
dwc3_ep_inc_trb(&dep->trb_dequeue);
}
static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
struct dwc3_request *req, int status)
{
struct dwc3 *dwc = dep->dwc;
list_del(&req->list);
req->remaining = 0;
req->needs_extra_trb = false;
req->num_trbs = 0;
if (req->request.status == -EINPROGRESS)
req->request.status = status;
if (req->trb)
usb_gadget_unmap_request_by_dev(dwc->sysdev,
&req->request, req->direction);
req->trb = NULL;
trace_dwc3_gadget_giveback(req);
if (dep->number > 1)
pm_runtime_put(dwc->dev);
}
/**
* dwc3_gadget_giveback - call struct usb_request's ->complete callback
* @dep: The endpoint to whom the request belongs to
* @req: The request we're giving back
* @status: completion code for the request
*
* Must be called with controller's lock held and interrupts disabled. This
* function will unmap @req and call its ->complete() callback to notify upper
* layers that it has completed.
*/
void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
int status)
{
struct dwc3 *dwc = dep->dwc;
dwc3_gadget_del_and_unmap_request(dep, req, status);
req->status = DWC3_REQUEST_STATUS_COMPLETED;
spin_unlock(&dwc->lock);
usb_gadget_giveback_request(&dep->endpoint, &req->request);
spin_lock(&dwc->lock);
}
/**
* dwc3_send_gadget_generic_command - issue a generic command for the controller
* @dwc: pointer to the controller context
* @cmd: the command to be issued
* @param: command parameter
*
* Caller should take care of locking. Issue @cmd with a given @param to @dwc
* and wait for its completion.
*/
int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned int cmd,
u32 param)
{
u32 timeout = 500;
int status = 0;
int ret = 0;
u32 reg;
dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
do {
reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
if (!(reg & DWC3_DGCMD_CMDACT)) {
status = DWC3_DGCMD_STATUS(reg);
if (status)
ret = -EINVAL;
break;
}
} while (--timeout);
if (!timeout) {
ret = -ETIMEDOUT;
status = -ETIMEDOUT;
}
trace_dwc3_gadget_generic_cmd(cmd, param, status);
return ret;
}
static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async);
/**
* dwc3_send_gadget_ep_cmd - issue an endpoint command
* @dep: the endpoint to which the command is going to be issued
* @cmd: the command to be issued
* @params: parameters to the command
*
* Caller should handle locking. This function will issue @cmd with given
* @params to @dep and wait for its completion.
*/
int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
struct dwc3_gadget_ep_cmd_params *params)
{
const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
struct dwc3 *dwc = dep->dwc;
u32 timeout = 5000;
u32 saved_config = 0;
u32 reg;
int cmd_status = 0;
int ret = -EINVAL;
/*
* When operating in USB 2.0 speeds (HS/FS), if GUSB2PHYCFG.ENBLSLPM or
* GUSB2PHYCFG.SUSPHY is set, it must be cleared before issuing an
* endpoint command.
*
* Save and clear both GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY
* settings. Restore them after the command is completed.
*
* DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2
*/
if (dwc->gadget->speed <= USB_SPEED_HIGH ||
DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_ENDTRANSFER) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
saved_config |= DWC3_GUSB2PHYCFG_SUSPHY;
reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
}
if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) {
saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM;
reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
}
if (saved_config)
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
}
if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
int link_state;
/*
* Initiate remote wakeup if the link state is in U3 when
* operating in SS/SSP or L1/L2 when operating in HS/FS. If the
* link state is in U1/U2, no remote wakeup is needed. The Start
* Transfer command will initiate the link recovery.
*/
link_state = dwc3_gadget_get_link_state(dwc);
switch (link_state) {
case DWC3_LINK_STATE_U2:
if (dwc->gadget->speed >= USB_SPEED_SUPER)
break;
fallthrough;
case DWC3_LINK_STATE_U3:
ret = __dwc3_gadget_wakeup(dwc, false);
dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
ret);
break;
}
}
/*
* For some commands such as Update Transfer command, DEPCMDPARn
* registers are reserved. Since the driver often sends Update Transfer
* command, don't write to DEPCMDPARn to avoid register write delays and
* improve performance.
*/
if (DWC3_DEPCMD_CMD(cmd) != DWC3_DEPCMD_UPDATETRANSFER) {
dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
}
/*
* Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're
* not relying on XferNotReady, we can make use of a special "No
* Response Update Transfer" command where we should clear both CmdAct
* and CmdIOC bits.
*
* With this, we don't need to wait for command completion and can
* straight away issue further commands to the endpoint.
*
* NOTICE: We're making an assumption that control endpoints will never
* make use of Update Transfer command. This is a safe assumption
* because we can never have more than one request at a time with
* Control Endpoints. If anybody changes that assumption, this chunk
* needs to be updated accordingly.
*/
if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER &&
!usb_endpoint_xfer_isoc(desc))
cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT);
else
cmd |= DWC3_DEPCMD_CMDACT;
dwc3_writel(dep->regs, DWC3_DEPCMD, cmd);
if (!(cmd & DWC3_DEPCMD_CMDACT) ||
(DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_ENDTRANSFER &&
!(cmd & DWC3_DEPCMD_CMDIOC))) {
ret = 0;
goto skip_status;
}
do {
reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
if (!(reg & DWC3_DEPCMD_CMDACT)) {
cmd_status = DWC3_DEPCMD_STATUS(reg);
switch (cmd_status) {
case 0:
ret = 0;
break;
case DEPEVT_TRANSFER_NO_RESOURCE:
dev_WARN(dwc->dev, "No resource for %s\n",
dep->name);
ret = -EINVAL;
break;
case DEPEVT_TRANSFER_BUS_EXPIRY:
/*
* SW issues START TRANSFER command to
* isochronous ep with future frame interval. If
* future interval time has already passed when
* core receives the command, it will respond
* with an error status of 'Bus Expiry'.
*
* Instead of always returning -EINVAL, let's
* give a hint to the gadget driver that this is
* the case by returning -EAGAIN.
*/
ret = -EAGAIN;
break;
default:
dev_WARN(dwc->dev, "UNKNOWN cmd status\n");
}
break;
}
} while (--timeout);
if (timeout == 0) {
ret = -ETIMEDOUT;
cmd_status = -ETIMEDOUT;
}
skip_status:
trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
if (ret == 0)
dep->flags |= DWC3_EP_TRANSFER_STARTED;
if (ret != -ETIMEDOUT)
dwc3_gadget_ep_get_transfer_index(dep);
}
if (saved_config) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
reg |= saved_config;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
}
return ret;
}
static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
struct dwc3_gadget_ep_cmd_params params;
u32 cmd = DWC3_DEPCMD_CLEARSTALL;
/*
* As of core revision 2.60a the recommended programming model
* is to set the ClearPendIN bit when issuing a Clear Stall EP
* command for IN endpoints. This is to prevent an issue where
* some (non-compliant) hosts may not send ACK TPs for pending
* IN transfers due to a mishandled error condition. Synopsys
* STAR 9000614252.
*/
if (dep->direction &&
!DWC3_VER_IS_PRIOR(DWC3, 260A) &&
(dwc->gadget->speed >= USB_SPEED_SUPER))
cmd |= DWC3_DEPCMD_CLEARPENDIN;
memset(¶ms, 0, sizeof(params));
return dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms);
}
static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
struct dwc3_trb *trb)
{
u32 offset = (char *) trb - (char *) dep->trb_pool;
return dep->trb_pool_dma + offset;
}
static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
if (dep->trb_pool)
return 0;
dep->trb_pool = dma_alloc_coherent(dwc->sysdev,
sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
&dep->trb_pool_dma, GFP_KERNEL);
if (!dep->trb_pool) {
dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
dep->name);
return -ENOMEM;
}
return 0;
}
static void dwc3_free_trb_pool(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
dep->trb_pool, dep->trb_pool_dma);
dep->trb_pool = NULL;
dep->trb_pool_dma = 0;
}
static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
memset(¶ms, 0x00, sizeof(params));
params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
¶ms);
}
/**
* dwc3_gadget_start_config - configure ep resources
* @dep: endpoint that is being enabled
*
* Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's
* completion, it will set Transfer Resource for all available endpoints.
*
* The assignment of transfer resources cannot perfectly follow the data book
* due to the fact that the controller driver does not have all knowledge of the
* configuration in advance. It is given this information piecemeal by the
* composite gadget framework after every SET_CONFIGURATION and
* SET_INTERFACE. Trying to follow the databook programming model in this
* scenario can cause errors. For two reasons:
*
* 1) The databook says to do %DWC3_DEPCMD_DEPSTARTCFG for every
* %USB_REQ_SET_CONFIGURATION and %USB_REQ_SET_INTERFACE (8.1.5). This is
* incorrect in the scenario of multiple interfaces.
*
* 2) The databook does not mention doing more %DWC3_DEPCMD_DEPXFERCFG for new
* endpoint on alt setting (8.1.6).
*
* The following simplified method is used instead:
*
* All hardware endpoints can be assigned a transfer resource and this setting
* will stay persistent until either a core reset or hibernation. So whenever we
* do a %DWC3_DEPCMD_DEPSTARTCFG(0) we can go ahead and do
* %DWC3_DEPCMD_DEPXFERCFG for every hardware endpoint as well. We are
* guaranteed that there are as many transfer resources as endpoints.
*
* This function is called for each endpoint when it is being enabled but is
* triggered only when called for EP0-out, which always happens first, and which
* should only happen in one of the above conditions.
*/
static int dwc3_gadget_start_config(struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
struct dwc3 *dwc;
u32 cmd;
int i;
int ret;
if (dep->number)
return 0;
memset(¶ms, 0x00, sizeof(params));
cmd = DWC3_DEPCMD_DEPSTARTCFG;
dwc = dep->dwc;
ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms);
if (ret)
return ret;
for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
struct dwc3_ep *dep = dwc->eps[i];
if (!dep)
continue;
ret = dwc3_gadget_set_xfer_resource(dep);
if (ret)
return ret;
}
return 0;
}
static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
{
const struct usb_ss_ep_comp_descriptor *comp_desc;
const struct usb_endpoint_descriptor *desc;
struct dwc3_gadget_ep_cmd_params params;
struct dwc3 *dwc = dep->dwc;
comp_desc = dep->endpoint.comp_desc;
desc = dep->endpoint.desc;
memset(¶ms, 0x00, sizeof(params));
params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
| DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
/* Burst size is only needed in SuperSpeed mode */
if (dwc->gadget->speed >= USB_SPEED_SUPER) {
u32 burst = dep->endpoint.maxburst;
params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
}
params.param0 |= action;
if (action == DWC3_DEPCFG_ACTION_RESTORE)
params.param2 |= dep->saved_state;
if (usb_endpoint_xfer_control(desc))
params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN;
if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc))
params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
| DWC3_DEPCFG_XFER_COMPLETE_EN
| DWC3_DEPCFG_STREAM_EVENT_EN;
dep->stream_capable = true;
}
if (!usb_endpoint_xfer_control(desc))
params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
/*
* We are doing 1:1 mapping for endpoints, meaning
* Physical Endpoints 2 maps to Logical Endpoint 2 and
* so on. We consider the direction bit as part of the physical
* endpoint number. So USB endpoint 0x81 is 0x03.
*/
params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
/*
* We must use the lower 16 TX FIFOs even though
* HW might have more
*/
if (dep->direction)
params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
if (desc->bInterval) {
u8 bInterval_m1;
/*
* Valid range for DEPCFG.bInterval_m1 is from 0 to 13.
*
* NOTE: The programming guide incorrectly stated bInterval_m1
* must be set to 0 when operating in fullspeed. Internally the
* controller does not have this limitation. See DWC_usb3x
* programming guide section 3.2.2.1.
*/
bInterval_m1 = min_t(u8, desc->bInterval - 1, 13);
if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
dwc->gadget->speed == USB_SPEED_FULL)
dep->interval = desc->bInterval;
else
dep->interval = 1 << (desc->bInterval - 1);
params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1);
}
return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, ¶ms);
}
/**
* dwc3_gadget_calc_tx_fifo_size - calculates the txfifo size value
* @dwc: pointer to the DWC3 context
* @mult: multiplier to be used when calculating the fifo_size
*
* Calculates the size value based on the equation below:
*
* DWC3 revision 280A and prior:
* fifo_size = mult * (max_packet / mdwidth) + 1;
*
* DWC3 revision 290A and onwards:
* fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1
*
* The max packet size is set to 1024, as the txfifo requirements mainly apply
* to super speed USB use cases. However, it is safe to overestimate the fifo
* allocations for other scenarios, i.e. high speed USB.
*/
static int dwc3_gadget_calc_tx_fifo_size(struct dwc3 *dwc, int mult)
{
int max_packet = 1024;
int fifo_size;
int mdwidth;
mdwidth = dwc3_mdwidth(dwc);
/* MDWIDTH is represented in bits, we need it in bytes */
mdwidth >>= 3;
if (DWC3_VER_IS_PRIOR(DWC3, 290A))
fifo_size = mult * (max_packet / mdwidth) + 1;
else
fifo_size = mult * ((max_packet + mdwidth) / mdwidth) + 1;
return fifo_size;
}
/**
* dwc3_gadget_clear_tx_fifos - Clears txfifo allocation
* @dwc: pointer to the DWC3 context
*
* Iterates through all the endpoint registers and clears the previous txfifo
* allocations.
*/
void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc)
{
struct dwc3_ep *dep;
int fifo_depth;
int size;
int num;
if (!dwc->do_fifo_resize)
return;
/* Read ep0IN related TXFIFO size */
dep = dwc->eps[1];
size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0));
if (DWC3_IP_IS(DWC3))
fifo_depth = DWC3_GTXFIFOSIZ_TXFDEP(size);
else
fifo_depth = DWC31_GTXFIFOSIZ_TXFDEP(size);
dwc->last_fifo_depth = fifo_depth;
/* Clear existing TXFIFO for all IN eps except ep0 */
for (num = 3; num < min_t(int, dwc->num_eps, DWC3_ENDPOINTS_NUM);
num += 2) {
dep = dwc->eps[num];
/* Don't change TXFRAMNUM on usb31 version */
size = DWC3_IP_IS(DWC3) ? 0 :
dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1)) &
DWC31_GTXFIFOSIZ_TXFRAMNUM;
dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1), size);
dep->flags &= ~DWC3_EP_TXFIFO_RESIZED;
}
dwc->num_ep_resized = 0;
}
/*
* dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
* @dwc: pointer to our context structure
*
* This function will a best effort FIFO allocation in order
* to improve FIFO usage and throughput, while still allowing
* us to enable as many endpoints as possible.
*
* Keep in mind that this operation will be highly dependent
* on the configured size for RAM1 - which contains TxFifo -,
* the amount of endpoints enabled on coreConsultant tool, and
* the width of the Master Bus.
*
* In general, FIFO depths are represented with the following equation:
*
* fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1
*
* In conjunction with dwc3_gadget_check_config(), this resizing logic will
* ensure that all endpoints will have enough internal memory for one max
* packet per endpoint.
*/
static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
int fifo_0_start;
int ram1_depth;
int fifo_size;
int min_depth;
int num_in_ep;
int remaining;
int num_fifos = 1;
int fifo;
int tmp;
if (!dwc->do_fifo_resize)
return 0;
/* resize IN endpoints except ep0 */
if (!usb_endpoint_dir_in(dep->endpoint.desc) || dep->number <= 1)
return 0;
/* bail if already resized */
if (dep->flags & DWC3_EP_TXFIFO_RESIZED)
return 0;
ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
if ((dep->endpoint.maxburst > 1 &&
usb_endpoint_xfer_bulk(dep->endpoint.desc)) ||
usb_endpoint_xfer_isoc(dep->endpoint.desc))
num_fifos = 3;
if (dep->endpoint.maxburst > 6 &&
(usb_endpoint_xfer_bulk(dep->endpoint.desc) ||
usb_endpoint_xfer_isoc(dep->endpoint.desc)) && DWC3_IP_IS(DWC31))
num_fifos = dwc->tx_fifo_resize_max_num;
/* FIFO size for a single buffer */
fifo = dwc3_gadget_calc_tx_fifo_size(dwc, 1);
/* Calculate the number of remaining EPs w/o any FIFO */
num_in_ep = dwc->max_cfg_eps;
num_in_ep -= dwc->num_ep_resized;
/* Reserve at least one FIFO for the number of IN EPs */
min_depth = num_in_ep * (fifo + 1);
remaining = ram1_depth - min_depth - dwc->last_fifo_depth;
remaining = max_t(int, 0, remaining);
/*
* We've already reserved 1 FIFO per EP, so check what we can fit in
* addition to it. If there is not enough remaining space, allocate
* all the remaining space to the EP.
*/
fifo_size = (num_fifos - 1) * fifo;
if (remaining < fifo_size)
fifo_size = remaining;
fifo_size += fifo;
/* Last increment according to the TX FIFO size equation */
fifo_size++;
/* Check if TXFIFOs start at non-zero addr */
tmp = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0));
fifo_0_start = DWC3_GTXFIFOSIZ_TXFSTADDR(tmp);
fifo_size |= (fifo_0_start + (dwc->last_fifo_depth << 16));
if (DWC3_IP_IS(DWC3))
dwc->last_fifo_depth += DWC3_GTXFIFOSIZ_TXFDEP(fifo_size);
else
dwc->last_fifo_depth += DWC31_GTXFIFOSIZ_TXFDEP(fifo_size);
/* Check fifo size allocation doesn't exceed available RAM size. */
if (dwc->last_fifo_depth >= ram1_depth) {
dev_err(dwc->dev, "Fifosize(%d) > RAM size(%d) %s depth:%d\n",
dwc->last_fifo_depth, ram1_depth,
dep->endpoint.name, fifo_size);
if (DWC3_IP_IS(DWC3))
fifo_size = DWC3_GTXFIFOSIZ_TXFDEP(fifo_size);
else
fifo_size = DWC31_GTXFIFOSIZ_TXFDEP(fifo_size);
dwc->last_fifo_depth -= fifo_size;
return -ENOMEM;
}
dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1), fifo_size);
dep->flags |= DWC3_EP_TXFIFO_RESIZED;
dwc->num_ep_resized++;
return 0;
}
/**
* __dwc3_gadget_ep_enable - initializes a hw endpoint
* @dep: endpoint to be initialized
* @action: one of INIT, MODIFY or RESTORE
*
* Caller should take care of locking. Execute all necessary commands to
* initialize a HW endpoint so it can be used by a gadget driver.
*/
static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
{
const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
struct dwc3 *dwc = dep->dwc;
u32 reg;
int ret;
if (!(dep->flags & DWC3_EP_ENABLED)) {
ret = dwc3_gadget_resize_tx_fifos(dep);
if (ret)
return ret;
ret = dwc3_gadget_start_config(dep);
if (ret)
return ret;
}
ret = dwc3_gadget_set_ep_config(dep, action);
if (ret)
return ret;
if (!(dep->flags & DWC3_EP_ENABLED)) {
struct dwc3_trb *trb_st_hw;
struct dwc3_trb *trb_link;
dep->type = usb_endpoint_type(desc);
dep->flags |= DWC3_EP_ENABLED;
reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
reg |= DWC3_DALEPENA_EP(dep->number);
dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
dep->trb_dequeue = 0;
dep->trb_enqueue = 0;
if (usb_endpoint_xfer_control(desc))
goto out;
/* Initialize the TRB ring */
memset(dep->trb_pool, 0,
sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
/* Link TRB. The HWO bit is never reset */
trb_st_hw = &dep->trb_pool[0];
trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
}
/*
* Issue StartTransfer here with no-op TRB so we can always rely on No
* Response Update Transfer command.
*/
if (usb_endpoint_xfer_bulk(desc) ||
usb_endpoint_xfer_int(desc)) {
struct dwc3_gadget_ep_cmd_params params;
struct dwc3_trb *trb;
dma_addr_t trb_dma;
u32 cmd;
memset(¶ms, 0, sizeof(params));
trb = &dep->trb_pool[0];
trb_dma = dwc3_trb_dma_offset(dep, trb);
params.param0 = upper_32_bits(trb_dma);
params.param1 = lower_32_bits(trb_dma);
cmd = DWC3_DEPCMD_STARTTRANSFER;
ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms);
if (ret < 0)
return ret;
if (dep->stream_capable) {
/*
* For streams, at start, there maybe a race where the
* host primes the endpoint before the function driver
* queues a request to initiate a stream. In that case,
* the controller will not see the prime to generate the
* ERDY and start stream. To workaround this, issue a
* no-op TRB as normal, but end it immediately. As a
* result, when the function driver queues the request,
* the next START_TRANSFER command will cause the
* controller to generate an ERDY to initiate the
* stream.
*/
dwc3_stop_active_transfer(dep, true, true);
/*
* All stream eps will reinitiate stream on NoStream
* rejection until we can determine that the host can
* prime after the first transfer.
*
* However, if the controller is capable of
* TXF_FLUSH_BYPASS, then IN direction endpoints will
* automatically restart the stream without the driver
* initiation.
*/
if (!dep->direction ||
!(dwc->hwparams.hwparams9 &
DWC3_GHWPARAMS9_DEV_TXF_FLUSH_BYPASS))
dep->flags |= DWC3_EP_FORCE_RESTART_STREAM;
}
}
out:
trace_dwc3_gadget_ep_enable(dep);
return 0;
}
void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep, int status)
{
struct dwc3_request *req;
dwc3_stop_active_transfer(dep, true, false);
/* If endxfer is delayed, avoid unmapping requests */
if (dep->flags & DWC3_EP_DELAY_STOP)
return;
/* - giveback all requests to gadget driver */
while (!list_empty(&dep->started_list)) {
req = next_request(&dep->started_list);
dwc3_gadget_giveback(dep, req, status);
}
while (!list_empty(&dep->pending_list)) {
req = next_request(&dep->pending_list);
dwc3_gadget_giveback(dep, req, status);
}
while (!list_empty(&dep->cancelled_list)) {
req = next_request(&dep->cancelled_list);
dwc3_gadget_giveback(dep, req, status);
}
}
/**
* __dwc3_gadget_ep_disable - disables a hw endpoint
* @dep: the endpoint to disable
*
* This function undoes what __dwc3_gadget_ep_enable did and also removes
* requests which are currently being processed by the hardware and those which
* are not yet scheduled.
*
* Caller should take care of locking.
*/
static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
u32 reg;
u32 mask;
trace_dwc3_gadget_ep_disable(dep);
/* make sure HW endpoint isn't stalled */
if (dep->flags & DWC3_EP_STALL)
__dwc3_gadget_ep_set_halt(dep, 0, false);
reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
reg &= ~DWC3_DALEPENA_EP(dep->number);
dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
dwc3_remove_requests(dwc, dep, -ESHUTDOWN);
dep->stream_capable = false;
dep->type = 0;
mask = DWC3_EP_TXFIFO_RESIZED;
/*
* dwc3_remove_requests() can exit early if DWC3 EP delayed stop is
* set. Do not clear DEP flags, so that the end transfer command will
* be reattempted during the next SETUP stage.
*/
if (dep->flags & DWC3_EP_DELAY_STOP)
mask |= (DWC3_EP_DELAY_STOP | DWC3_EP_TRANSFER_STARTED);
dep->flags &= mask;
/* Clear out the ep descriptors for non-ep0 */
if (dep->number > 1) {
dep->endpoint.comp_desc = NULL;
dep->endpoint.desc = NULL;
}
return 0;
}
/* -------------------------------------------------------------------------- */
static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
return -EINVAL;
}
static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
{
return -EINVAL;
}
/* -------------------------------------------------------------------------- */
static int dwc3_gadget_ep_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
struct dwc3_ep *dep;
struct dwc3 *dwc;
unsigned long flags;
int ret;
if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
pr_debug("dwc3: invalid parameters\n");
return -EINVAL;
}
if (!desc->wMaxPacketSize) {
pr_debug("dwc3: missing wMaxPacketSize\n");
return -EINVAL;
}
dep = to_dwc3_ep(ep);
dwc = dep->dwc;
if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED,
"%s is already enabled\n",
dep->name))
return 0;
spin_lock_irqsave(&dwc->lock, flags);
ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
static int dwc3_gadget_ep_disable(struct usb_ep *ep)
{
struct dwc3_ep *dep;
struct dwc3 *dwc;
unsigned long flags;
int ret;
if (!ep) {
pr_debug("dwc3: invalid parameters\n");
return -EINVAL;
}
dep = to_dwc3_ep(ep);
dwc = dep->dwc;
if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED),
"%s is already disabled\n",
dep->name))
return 0;
spin_lock_irqsave(&dwc->lock, flags);
ret = __dwc3_gadget_ep_disable(dep);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
gfp_t gfp_flags)
{
struct dwc3_request *req;
struct dwc3_ep *dep = to_dwc3_ep(ep);
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
return NULL;
req->direction = dep->direction;
req->epnum = dep->number;
req->dep = dep;
req->status = DWC3_REQUEST_STATUS_UNKNOWN;
trace_dwc3_alloc_request(req);
return &req->request;
}
static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
struct usb_request *request)
{
struct dwc3_request *req = to_dwc3_request(request);
trace_dwc3_free_request(req);
kfree(req);
}
/**
* dwc3_ep_prev_trb - returns the previous TRB in the ring
* @dep: The endpoint with the TRB ring
* @index: The index of the current TRB in the ring
*
* Returns the TRB prior to the one pointed to by the index. If the
* index is 0, we will wrap backwards, skip the link TRB, and return
* the one just before that.
*/
static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
{
u8 tmp = index;
if (!tmp)
tmp = DWC3_TRB_NUM - 1;
return &dep->trb_pool[tmp - 1];
}
static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
{
u8 trbs_left;
/*
* If the enqueue & dequeue are equal then the TRB ring is either full
* or empty. It's considered full when there are DWC3_TRB_NUM-1 of TRBs
* pending to be processed by the driver.
*/
if (dep->trb_enqueue == dep->trb_dequeue) {
/*
* If there is any request remained in the started_list at
* this point, that means there is no TRB available.
*/
if (!list_empty(&dep->started_list))
return 0;
return DWC3_TRB_NUM - 1;
}
trbs_left = dep->trb_dequeue - dep->trb_enqueue;
trbs_left &= (DWC3_TRB_NUM - 1);
if (dep->trb_dequeue < dep->trb_enqueue)
trbs_left--;
return trbs_left;
}
/**
* dwc3_prepare_one_trb - setup one TRB from one request
* @dep: endpoint for which this request is prepared
* @req: dwc3_request pointer
* @trb_length: buffer size of the TRB
* @chain: should this TRB be chained to the next?
* @node: only for isochronous endpoints. First TRB needs different type.
* @use_bounce_buffer: set to use bounce buffer
* @must_interrupt: set to interrupt on TRB completion
*/
static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
struct dwc3_request *req, unsigned int trb_length,
unsigned int chain, unsigned int node, bool use_bounce_buffer,
bool must_interrupt)
{
struct dwc3_trb *trb;
dma_addr_t dma;
unsigned int stream_id = req->request.stream_id;
unsigned int short_not_ok = req->request.short_not_ok;
unsigned int no_interrupt = req->request.no_interrupt;
unsigned int is_last = req->request.is_last;
struct dwc3 *dwc = dep->dwc;
struct usb_gadget *gadget = dwc->gadget;
enum usb_device_speed speed = gadget->speed;
if (use_bounce_buffer)
dma = dep->dwc->bounce_addr;
else if (req->request.num_sgs > 0)
dma = sg_dma_address(req->start_sg);
else
dma = req->request.dma;
trb = &dep->trb_pool[dep->trb_enqueue];
if (!req->trb) {
dwc3_gadget_move_started_request(req);
req->trb = trb;
req->trb_dma = dwc3_trb_dma_offset(dep, trb);
}
req->num_trbs++;
trb->size = DWC3_TRB_SIZE_LENGTH(trb_length);
trb->bpl = lower_32_bits(dma);
trb->bph = upper_32_bits(dma);
switch (usb_endpoint_type(dep->endpoint.desc)) {
case USB_ENDPOINT_XFER_CONTROL:
trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
break;
case USB_ENDPOINT_XFER_ISOC:
if (!node) {
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
/*
* USB Specification 2.0 Section 5.9.2 states that: "If
* there is only a single transaction in the microframe,
* only a DATA0 data packet PID is used. If there are
* two transactions per microframe, DATA1 is used for
* the first transaction data packet and DATA0 is used
* for the second transaction data packet. If there are
* three transactions per microframe, DATA2 is used for
* the first transaction data packet, DATA1 is used for
* the second, and DATA0 is used for the third."
*
* IOW, we should satisfy the following cases:
*
* 1) length <= maxpacket
* - DATA0
*
* 2) maxpacket < length <= (2 * maxpacket)
* - DATA1, DATA0
*
* 3) (2 * maxpacket) < length <= (3 * maxpacket)
* - DATA2, DATA1, DATA0
*/
if (speed == USB_SPEED_HIGH) {
struct usb_ep *ep = &dep->endpoint;
unsigned int mult = 2;
unsigned int maxp = usb_endpoint_maxp(ep->desc);
if (req->request.length <= (2 * maxp))
mult--;
if (req->request.length <= maxp)
mult--;
trb->size |= DWC3_TRB_SIZE_PCM1(mult);
}
} else {
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
}
if (!no_interrupt && !chain)
trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
break;
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
trb->ctrl = DWC3_TRBCTL_NORMAL;
break;
default:
/*
* This is only possible with faulty memory because we
* checked it already :)
*/
dev_WARN(dwc->dev, "Unknown endpoint type %d\n",
usb_endpoint_type(dep->endpoint.desc));
}
/*
* Enable Continue on Short Packet
* when endpoint is not a stream capable
*/
if (usb_endpoint_dir_out(dep->endpoint.desc)) {
if (!dep->stream_capable)
trb->ctrl |= DWC3_TRB_CTRL_CSP;
if (short_not_ok)
trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
}
/* All TRBs setup for MST must set CSP=1 when LST=0 */
if (dep->stream_capable && DWC3_MST_CAPABLE(&dwc->hwparams))
trb->ctrl |= DWC3_TRB_CTRL_CSP;
if ((!no_interrupt && !chain) || must_interrupt)
trb->ctrl |= DWC3_TRB_CTRL_IOC;
if (chain)
trb->ctrl |= DWC3_TRB_CTRL_CHN;
else if (dep->stream_capable && is_last &&
!DWC3_MST_CAPABLE(&dwc->hwparams))
trb->ctrl |= DWC3_TRB_CTRL_LST;
if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id);
/*
* As per data book 4.2.3.2TRB Control Bit Rules section
*
* The controller autonomously checks the HWO field of a TRB to determine if the
* entire TRB is valid. Therefore, software must ensure that the rest of the TRB
* is valid before setting the HWO field to '1'. In most systems, this means that
* software must update the fourth DWORD of a TRB last.
*
* However there is a possibility of CPU re-ordering here which can cause
* controller to observe the HWO bit set prematurely.
* Add a write memory barrier to prevent CPU re-ordering.
*/
wmb();
trb->ctrl |= DWC3_TRB_CTRL_HWO;
dwc3_ep_inc_enq(dep);
trace_dwc3_prepare_trb(dep, trb);
}
static bool dwc3_needs_extra_trb(struct dwc3_ep *dep, struct dwc3_request *req)
{
unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
unsigned int rem = req->request.length % maxp;
if ((req->request.length && req->request.zero && !rem &&
!usb_endpoint_xfer_isoc(dep->endpoint.desc)) ||
(!req->direction && rem))
return true;
return false;
}
/**
* dwc3_prepare_last_sg - prepare TRBs for the last SG entry
* @dep: The endpoint that the request belongs to
* @req: The request to prepare
* @entry_length: The last SG entry size
* @node: Indicates whether this is not the first entry (for isoc only)
*
* Return the number of TRBs prepared.
*/
static int dwc3_prepare_last_sg(struct dwc3_ep *dep,
struct dwc3_request *req, unsigned int entry_length,
unsigned int node)
{
unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
unsigned int rem = req->request.length % maxp;
unsigned int num_trbs = 1;
if (dwc3_needs_extra_trb(dep, req))
num_trbs++;
if (dwc3_calc_trbs_left(dep) < num_trbs)
return 0;
req->needs_extra_trb = num_trbs > 1;
/* Prepare a normal TRB */
if (req->direction || req->request.length)
dwc3_prepare_one_trb(dep, req, entry_length,
req->needs_extra_trb, node, false, false);
/* Prepare extra TRBs for ZLP and MPS OUT transfer alignment */
if ((!req->direction && !req->request.length) || req->needs_extra_trb)
dwc3_prepare_one_trb(dep, req,
req->direction ? 0 : maxp - rem,
false, 1, true, false);
return num_trbs;
}
static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
struct dwc3_request *req)
{
struct scatterlist *sg = req->start_sg;
struct scatterlist *s;
int i;
unsigned int length = req->request.length;
unsigned int remaining = req->request.num_mapped_sgs
- req->num_queued_sgs;
unsigned int num_trbs = req->num_trbs;
bool needs_extra_trb = dwc3_needs_extra_trb(dep, req);
/*
* If we resume preparing the request, then get the remaining length of
* the request and resume where we left off.
*/
for_each_sg(req->request.sg, s, req->num_queued_sgs, i)
length -= sg_dma_len(s);
for_each_sg(sg, s, remaining, i) {
unsigned int num_trbs_left = dwc3_calc_trbs_left(dep);
unsigned int trb_length;
bool must_interrupt = false;
bool last_sg = false;
trb_length = min_t(unsigned int, length, sg_dma_len(s));
length -= trb_length;
/*
* IOMMU driver is coalescing the list of sgs which shares a
* page boundary into one and giving it to USB driver. With
* this the number of sgs mapped is not equal to the number of
* sgs passed. So mark the chain bit to false if it isthe last
* mapped sg.
*/
if ((i == remaining - 1) || !length)
last_sg = true;
if (!num_trbs_left)
break;
if (last_sg) {
if (!dwc3_prepare_last_sg(dep, req, trb_length, i))
break;
} else {
/*
* Look ahead to check if we have enough TRBs for the
* next SG entry. If not, set interrupt on this TRB to
* resume preparing the next SG entry when more TRBs are
* free.
*/
if (num_trbs_left == 1 || (needs_extra_trb &&
num_trbs_left <= 2 &&
sg_dma_len(sg_next(s)) >= length)) {
struct dwc3_request *r;
/* Check if previous requests already set IOC */
list_for_each_entry(r, &dep->started_list, list) {
if (r != req && !r->request.no_interrupt)
break;
if (r == req)
must_interrupt = true;
}
}
dwc3_prepare_one_trb(dep, req, trb_length, 1, i, false,
must_interrupt);
}
/*
* There can be a situation where all sgs in sglist are not
* queued because of insufficient trb number. To handle this
* case, update start_sg to next sg to be queued, so that
* we have free trbs we can continue queuing from where we
* previously stopped
*/
if (!last_sg)
req->start_sg = sg_next(s);
req->num_queued_sgs++;
req->num_pending_sgs--;
/*
* The number of pending SG entries may not correspond to the
* number of mapped SG entries. If all the data are queued, then
* don't include unused SG entries.
*/
if (length == 0) {
req->num_pending_sgs = 0;
break;
}
if (must_interrupt)
break;
}
return req->num_trbs - num_trbs;
}
static int dwc3_prepare_trbs_linear(struct dwc3_ep *dep,
struct dwc3_request *req)
{
return dwc3_prepare_last_sg(dep, req, req->request.length, 0);
}
/*
* dwc3_prepare_trbs - setup TRBs from requests
* @dep: endpoint for which requests are being prepared
*
* The function goes through the requests list and sets up TRBs for the
* transfers. The function returns once there are no more TRBs available or
* it runs out of requests.
*
* Returns the number of TRBs prepared or negative errno.
*/
static int dwc3_prepare_trbs(struct dwc3_ep *dep)
{
struct dwc3_request *req, *n;
int ret = 0;
BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
/*
* We can get in a situation where there's a request in the started list
* but there weren't enough TRBs to fully kick it in the first time
* around, so it has been waiting for more TRBs to be freed up.
*
* In that case, we should check if we have a request with pending_sgs
* in the started list and prepare TRBs for that request first,
* otherwise we will prepare TRBs completely out of order and that will
* break things.
*/
list_for_each_entry(req, &dep->started_list, list) {
if (req->num_pending_sgs > 0) {
ret = dwc3_prepare_trbs_sg(dep, req);
if (!ret || req->num_pending_sgs)
return ret;
}
if (!dwc3_calc_trbs_left(dep))
return ret;
/*
* Don't prepare beyond a transfer. In DWC_usb32, its transfer
* burst capability may try to read and use TRBs beyond the
* active transfer instead of stopping.
*/
if (dep->stream_capable && req->request.is_last &&
!DWC3_MST_CAPABLE(&dep->dwc->hwparams))
return ret;
}
list_for_each_entry_safe(req, n, &dep->pending_list, list) {
struct dwc3 *dwc = dep->dwc;
ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request,
dep->direction);
if (ret)
return ret;
req->sg = req->request.sg;
req->start_sg = req->sg;
req->num_queued_sgs = 0;
req->num_pending_sgs = req->request.num_mapped_sgs;
if (req->num_pending_sgs > 0) {
ret = dwc3_prepare_trbs_sg(dep, req);
if (req->num_pending_sgs)
return ret;
} else {
ret = dwc3_prepare_trbs_linear(dep, req);
}
if (!ret || !dwc3_calc_trbs_left(dep))
return ret;
/*
* Don't prepare beyond a transfer. In DWC_usb32, its transfer
* burst capability may try to read and use TRBs beyond the
* active transfer instead of stopping.
*/
if (dep->stream_capable && req->request.is_last &&
!DWC3_MST_CAPABLE(&dwc->hwparams))
return ret;
}
return ret;
}
static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep);
static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
struct dwc3_request *req;
int starting;
int ret;
u32 cmd;
/*
* Note that it's normal to have no new TRBs prepared (i.e. ret == 0).
* This happens when we need to stop and restart a transfer such as in
* the case of reinitiating a stream or retrying an isoc transfer.
*/
ret = dwc3_prepare_trbs(dep);
if (ret < 0)
return ret;
starting = !(dep->flags & DWC3_EP_TRANSFER_STARTED);
/*
* If there's no new TRB prepared and we don't need to restart a
* transfer, there's no need to update the transfer.
*/
if (!ret && !starting)
return ret;
req = next_request(&dep->started_list);
if (!req) {
dep->flags |= DWC3_EP_PENDING_REQUEST;
return 0;
}
memset(¶ms, 0, sizeof(params));
if (starting) {
params.param0 = upper_32_bits(req->trb_dma);
params.param1 = lower_32_bits(req->trb_dma);
cmd = DWC3_DEPCMD_STARTTRANSFER;
if (dep->stream_capable)
cmd |= DWC3_DEPCMD_PARAM(req->request.stream_id);
if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
cmd |= DWC3_DEPCMD_PARAM(dep->frame_number);
} else {
cmd = DWC3_DEPCMD_UPDATETRANSFER |
DWC3_DEPCMD_PARAM(dep->resource_index);
}
ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms);
if (ret < 0) {
struct dwc3_request *tmp;
if (ret == -EAGAIN)
return ret;
dwc3_stop_active_transfer(dep, true, true);
list_for_each_entry_safe(req, tmp, &dep->started_list, list)
dwc3_gadget_move_cancelled_request(req, DWC3_REQUEST_STATUS_DEQUEUED);
/* If ep isn't started, then there's no end transfer pending */
if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
dwc3_gadget_ep_cleanup_cancelled_requests(dep);
return ret;
}
if (dep->stream_capable && req->request.is_last &&
!DWC3_MST_CAPABLE(&dep->dwc->hwparams))
dep->flags |= DWC3_EP_WAIT_TRANSFER_COMPLETE;
return 0;
}
static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
{
u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
return DWC3_DSTS_SOFFN(reg);
}
/**
* __dwc3_stop_active_transfer - stop the current active transfer
* @dep: isoc endpoint
* @force: set forcerm bit in the command
* @interrupt: command complete interrupt after End Transfer command
*
* When setting force, the ForceRM bit will be set. In that case
* the controller won't update the TRB progress on command
* completion. It also won't clear the HWO bit in the TRB.
* The command will also not complete immediately in that case.
*/
static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt)
{
struct dwc3 *dwc = dep->dwc;
struct dwc3_gadget_ep_cmd_params params;
u32 cmd;
int ret;
cmd = DWC3_DEPCMD_ENDTRANSFER;
cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
cmd |= interrupt ? DWC3_DEPCMD_CMDIOC : 0;
cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
memset(¶ms, 0, sizeof(params));
ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms);
/*
* If the End Transfer command was timed out while the device is
* not in SETUP phase, it's possible that an incoming Setup packet
* may prevent the command's completion. Let's retry when the
* ep0state returns to EP0_SETUP_PHASE.
*/
if (ret == -ETIMEDOUT && dep->dwc->ep0state != EP0_SETUP_PHASE) {
dep->flags |= DWC3_EP_DELAY_STOP;
return 0;
}
WARN_ON_ONCE(ret);
dep->resource_index = 0;
if (!interrupt) {
if (!DWC3_IP_IS(DWC3) || DWC3_VER_IS_PRIOR(DWC3, 310A))
mdelay(1);
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
} else if (!ret) {
dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
}
dep->flags &= ~DWC3_EP_DELAY_STOP;
return ret;
}
/**
* dwc3_gadget_start_isoc_quirk - workaround invalid frame number
* @dep: isoc endpoint
*
* This function tests for the correct combination of BIT[15:14] from the 16-bit
* microframe number reported by the XferNotReady event for the future frame
* number to start the isoc transfer.
*
* In DWC_usb31 version 1.70a-ea06 and prior, for highspeed and fullspeed
* isochronous IN, BIT[15:14] of the 16-bit microframe number reported by the
* XferNotReady event are invalid. The driver uses this number to schedule the
* isochronous transfer and passes it to the START TRANSFER command. Because
* this number is invalid, the command may fail. If BIT[15:14] matches the
* internal 16-bit microframe, the START TRANSFER command will pass and the
* transfer will start at the scheduled time, if it is off by 1, the command
* will still pass, but the transfer will start 2 seconds in the future. For all
* other conditions, the START TRANSFER command will fail with bus-expiry.
*
* In order to workaround this issue, we can test for the correct combination of
* BIT[15:14] by sending START TRANSFER commands with different values of
* BIT[15:14]: 'b00, 'b01, 'b10, and 'b11. Each combination is 2^14 uframe apart
* (or 2 seconds). 4 seconds into the future will result in a bus-expiry status.
* As the result, within the 4 possible combinations for BIT[15:14], there will
* be 2 successful and 2 failure START COMMAND status. One of the 2 successful
* command status will result in a 2-second delay start. The smaller BIT[15:14]
* value is the correct combination.
*
* Since there are only 4 outcomes and the results are ordered, we can simply
* test 2 START TRANSFER commands with BIT[15:14] combinations 'b00 and 'b01 to
* deduce the smaller successful combination.
*
* Let test0 = test status for combination 'b00 and test1 = test status for 'b01
* of BIT[15:14]. The correct combination is as follow:
*
* if test0 fails and test1 passes, BIT[15:14] is 'b01
* if test0 fails and test1 fails, BIT[15:14] is 'b10
* if test0 passes and test1 fails, BIT[15:14] is 'b11
* if test0 passes and test1 passes, BIT[15:14] is 'b00
*
* Synopsys STAR 9001202023: Wrong microframe number for isochronous IN
* endpoints.
*/
static int dwc3_gadget_start_isoc_quirk(struct dwc3_ep *dep)
{
int cmd_status = 0;
bool test0;
bool test1;
while (dep->combo_num < 2) {
struct dwc3_gadget_ep_cmd_params params;
u32 test_frame_number;
u32 cmd;
/*
* Check if we can start isoc transfer on the next interval or
* 4 uframes in the future with BIT[15:14] as dep->combo_num
*/
test_frame_number = dep->frame_number & DWC3_FRNUMBER_MASK;
test_frame_number |= dep->combo_num << 14;
test_frame_number += max_t(u32, 4, dep->interval);
params.param0 = upper_32_bits(dep->dwc->bounce_addr);
params.param1 = lower_32_bits(dep->dwc->bounce_addr);
cmd = DWC3_DEPCMD_STARTTRANSFER;
cmd |= DWC3_DEPCMD_PARAM(test_frame_number);
cmd_status = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms);
/* Redo if some other failure beside bus-expiry is received */
if (cmd_status && cmd_status != -EAGAIN) {
dep->start_cmd_status = 0;
dep->combo_num = 0;
return 0;
}
/* Store the first test status */
if (dep->combo_num == 0)
dep->start_cmd_status = cmd_status;
dep->combo_num++;
/*
* End the transfer if the START_TRANSFER command is successful
* to wait for the next XferNotReady to test the command again
*/
if (cmd_status == 0) {
dwc3_stop_active_transfer(dep, true, true);
return 0;
}
}
/* test0 and test1 are both completed at this point */
test0 = (dep->start_cmd_status == 0);
test1 = (cmd_status == 0);
if (!test0 && test1)
dep->combo_num = 1;
else if (!test0 && !test1)
dep->combo_num = 2;
else if (test0 && !test1)
dep->combo_num = 3;
else if (test0 && test1)
dep->combo_num = 0;
dep->frame_number &= DWC3_FRNUMBER_MASK;
dep->frame_number |= dep->combo_num << 14;
dep->frame_number += max_t(u32, 4, dep->interval);
/* Reinitialize test variables */
dep->start_cmd_status = 0;
dep->combo_num = 0;
return __dwc3_gadget_kick_transfer(dep);
}
static int __dwc3_gadget_start_isoc(struct dwc3_ep *dep)
{
const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
struct dwc3 *dwc = dep->dwc;
int ret;
int i;
if (list_empty(&dep->pending_list) &&
list_empty(&dep->started_list)) {
dep->flags |= DWC3_EP_PENDING_REQUEST;
return -EAGAIN;
}
if (!dwc->dis_start_transfer_quirk &&
(DWC3_VER_IS_PRIOR(DWC31, 170A) ||
DWC3_VER_TYPE_IS_WITHIN(DWC31, 170A, EA01, EA06))) {
if (dwc->gadget->speed <= USB_SPEED_HIGH && dep->direction)
return dwc3_gadget_start_isoc_quirk(dep);
}
if (desc->bInterval <= 14 &&
dwc->gadget->speed >= USB_SPEED_HIGH) {
u32 frame = __dwc3_gadget_get_frame(dwc);
bool rollover = frame <
(dep->frame_number & DWC3_FRNUMBER_MASK);
/*
* frame_number is set from XferNotReady and may be already
* out of date. DSTS only provides the lower 14 bit of the
* current frame number. So add the upper two bits of
* frame_number and handle a possible rollover.
* This will provide the correct frame_number unless more than
* rollover has happened since XferNotReady.
*/
dep->frame_number = (dep->frame_number & ~DWC3_FRNUMBER_MASK) |
frame;
if (rollover)
dep->frame_number += BIT(14);
}
for (i = 0; i < DWC3_ISOC_MAX_RETRIES; i++) {
int future_interval = i + 1;
/* Give the controller at least 500us to schedule transfers */
if (desc->bInterval < 3)
future_interval += 3 - desc->bInterval;
dep->frame_number = DWC3_ALIGN_FRAME(dep, future_interval);
ret = __dwc3_gadget_kick_transfer(dep);
if (ret != -EAGAIN)
break;
}
/*
* After a number of unsuccessful start attempts due to bus-expiry
* status, issue END_TRANSFER command and retry on the next XferNotReady
* event.
*/
if (ret == -EAGAIN)
ret = __dwc3_stop_active_transfer(dep, false, true);
return ret;
}
static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
{
struct dwc3 *dwc = dep->dwc;
if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) {
dev_dbg(dwc->dev, "%s: can't queue to disabled endpoint\n",
dep->name);
return -ESHUTDOWN;
}
if (WARN(req->dep != dep, "request %pK belongs to '%s'\n",
&req->request, req->dep->name))
return -EINVAL;
if (WARN(req->status < DWC3_REQUEST_STATUS_COMPLETED,
"%s: request %pK already in flight\n",
dep->name, &req->request))
return -EINVAL;
pm_runtime_get(dwc->dev);
req->request.actual = 0;
req->request.status = -EINPROGRESS;
trace_dwc3_ep_queue(req);
list_add_tail(&req->list, &dep->pending_list);
req->status = DWC3_REQUEST_STATUS_QUEUED;
if (dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE)
return 0;
/*
* Start the transfer only after the END_TRANSFER is completed
* and endpoint STALL is cleared.
*/
if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) ||
(dep->flags & DWC3_EP_WEDGE) ||
(dep->flags & DWC3_EP_DELAY_STOP) ||
(dep->flags & DWC3_EP_STALL)) {
dep->flags |= DWC3_EP_DELAY_START;
return 0;
}
/*
* NOTICE: Isochronous endpoints should NEVER be prestarted. We must
* wait for a XferNotReady event so we will know what's the current
* (micro-)frame number.
*
* Without this trick, we are very, very likely gonna get Bus Expiry
* errors which will force us issue EndTransfer command.
*/
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) {
if ((dep->flags & DWC3_EP_PENDING_REQUEST))
return __dwc3_gadget_start_isoc(dep);
return 0;
}
}
__dwc3_gadget_kick_transfer(dep);
return 0;
}
static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
gfp_t gfp_flags)
{
struct dwc3_request *req = to_dwc3_request(request);
struct dwc3_ep *dep = to_dwc3_ep(ep);
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
int ret;
spin_lock_irqsave(&dwc->lock, flags);
ret = __dwc3_gadget_ep_queue(dep, req);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *req)
{
int i;
/* If req->trb is not set, then the request has not started */
if (!req->trb)
return;
/*
* If request was already started, this means we had to
* stop the transfer. With that we also need to ignore
* all TRBs used by the request, however TRBs can only
* be modified after completion of END_TRANSFER
* command. So what we do here is that we wait for
* END_TRANSFER completion and only after that, we jump
* over TRBs by clearing HWO and incrementing dequeue
* pointer.
*/
for (i = 0; i < req->num_trbs; i++) {
struct dwc3_trb *trb;
trb = &dep->trb_pool[dep->trb_dequeue];
trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
dwc3_ep_inc_deq(dep);
}
req->num_trbs = 0;
}
static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep)
{
struct dwc3_request *req;
struct dwc3 *dwc = dep->dwc;
while (!list_empty(&dep->cancelled_list)) {
req = next_request(&dep->cancelled_list);
dwc3_gadget_ep_skip_trbs(dep, req);
switch (req->status) {
case DWC3_REQUEST_STATUS_DISCONNECTED:
dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
break;
case DWC3_REQUEST_STATUS_DEQUEUED:
dwc3_gadget_giveback(dep, req, -ECONNRESET);
break;
case DWC3_REQUEST_STATUS_STALLED:
dwc3_gadget_giveback(dep, req, -EPIPE);
break;
default:
dev_err(dwc->dev, "request cancelled with wrong reason:%d\n", req->status);
dwc3_gadget_giveback(dep, req, -ECONNRESET);
break;
}
/*
* The endpoint is disabled, let the dwc3_remove_requests()
* handle the cleanup.
*/
if (!dep->endpoint.desc)
break;
}
}
static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
struct usb_request *request)
{
struct dwc3_request *req = to_dwc3_request(request);
struct dwc3_request *r = NULL;
struct dwc3_ep *dep = to_dwc3_ep(ep);
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
int ret = 0;
trace_dwc3_ep_dequeue(req);
spin_lock_irqsave(&dwc->lock, flags);
list_for_each_entry(r, &dep->cancelled_list, list) {
if (r == req)
goto out;
}
list_for_each_entry(r, &dep->pending_list, list) {
if (r == req) {
dwc3_gadget_giveback(dep, req, -ECONNRESET);
goto out;
}
}
list_for_each_entry(r, &dep->started_list, list) {
if (r == req) {
struct dwc3_request *t;
/* wait until it is processed */
dwc3_stop_active_transfer(dep, true, true);
/*
* Remove any started request if the transfer is
* cancelled.
*/
list_for_each_entry_safe(r, t, &dep->started_list, list)
dwc3_gadget_move_cancelled_request(r,
DWC3_REQUEST_STATUS_DEQUEUED);
dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
goto out;
}
}
dev_err(dwc->dev, "request %pK was not queued to %s\n",
request, ep->name);
ret = -EINVAL;
out:
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
{
struct dwc3_gadget_ep_cmd_params params;
struct dwc3 *dwc = dep->dwc;
struct dwc3_request *req;
struct dwc3_request *tmp;
int ret;
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
return -EINVAL;
}
memset(¶ms, 0x00, sizeof(params));
if (value) {
struct dwc3_trb *trb;
unsigned int transfer_in_flight;
unsigned int started;
if (dep->number > 1)
trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
else
trb = &dwc->ep0_trb[dep->trb_enqueue];
transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO;
started = !list_empty(&dep->started_list);
if (!protocol && ((dep->direction && transfer_in_flight) ||
(!dep->direction && started))) {
return -EAGAIN;
}
ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
¶ms);
if (ret)
dev_err(dwc->dev, "failed to set STALL on %s\n",
dep->name);
else
dep->flags |= DWC3_EP_STALL;
} else {
/*
* Don't issue CLEAR_STALL command to control endpoints. The
* controller automatically clears the STALL when it receives
* the SETUP token.
*/
if (dep->number <= 1) {
dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
return 0;
}
dwc3_stop_active_transfer(dep, true, true);
list_for_each_entry_safe(req, tmp, &dep->started_list, list)
dwc3_gadget_move_cancelled_request(req, DWC3_REQUEST_STATUS_STALLED);
if (dep->flags & DWC3_EP_END_TRANSFER_PENDING ||
(dep->flags & DWC3_EP_DELAY_STOP)) {
dep->flags |= DWC3_EP_PENDING_CLEAR_STALL;
if (protocol)
dwc->clear_stall_protocol = dep->number;
return 0;
}
dwc3_gadget_ep_cleanup_cancelled_requests(dep);
ret = dwc3_send_clear_stall_ep_cmd(dep);
if (ret) {
dev_err(dwc->dev, "failed to clear STALL on %s\n",
dep->name);
return ret;
}
dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
if ((dep->flags & DWC3_EP_DELAY_START) &&
!usb_endpoint_xfer_isoc(dep->endpoint.desc))
__dwc3_gadget_kick_transfer(dep);
dep->flags &= ~DWC3_EP_DELAY_START;
}
return ret;
}
static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
{
struct dwc3_ep *dep = to_dwc3_ep(ep);
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
int ret;
spin_lock_irqsave(&dwc->lock, flags);
ret = __dwc3_gadget_ep_set_halt(dep, value, false);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
{
struct dwc3_ep *dep = to_dwc3_ep(ep);
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
int ret;
spin_lock_irqsave(&dwc->lock, flags);
dep->flags |= DWC3_EP_WEDGE;
if (dep->number == 0 || dep->number == 1)
ret = __dwc3_gadget_ep0_set_halt(ep, 1);
else
ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
/* -------------------------------------------------------------------------- */
static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
};
static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
.enable = dwc3_gadget_ep0_enable,
.disable = dwc3_gadget_ep0_disable,
.alloc_request = dwc3_gadget_ep_alloc_request,
.free_request = dwc3_gadget_ep_free_request,
.queue = dwc3_gadget_ep0_queue,
.dequeue = dwc3_gadget_ep_dequeue,
.set_halt = dwc3_gadget_ep0_set_halt,
.set_wedge = dwc3_gadget_ep_set_wedge,
};
static const struct usb_ep_ops dwc3_gadget_ep_ops = {
.enable = dwc3_gadget_ep_enable,
.disable = dwc3_gadget_ep_disable,
.alloc_request = dwc3_gadget_ep_alloc_request,
.free_request = dwc3_gadget_ep_free_request,
.queue = dwc3_gadget_ep_queue,
.dequeue = dwc3_gadget_ep_dequeue,
.set_halt = dwc3_gadget_ep_set_halt,
.set_wedge = dwc3_gadget_ep_set_wedge,
};
/* -------------------------------------------------------------------------- */
static void dwc3_gadget_enable_linksts_evts(struct dwc3 *dwc, bool set)
{
u32 reg;
if (DWC3_VER_IS_PRIOR(DWC3, 250A))
return;
reg = dwc3_readl(dwc->regs, DWC3_DEVTEN);
if (set)
reg |= DWC3_DEVTEN_ULSTCNGEN;
else
reg &= ~DWC3_DEVTEN_ULSTCNGEN;
dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
}
static int dwc3_gadget_get_frame(struct usb_gadget *g)
{
struct dwc3 *dwc = gadget_to_dwc(g);
return __dwc3_gadget_get_frame(dwc);
}
static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async)
{
int retries;
int ret;
u32 reg;
u8 link_state;
/*
* According to the Databook Remote wakeup request should
* be issued only when the device is in early suspend state.
*
* We can check that via USB Link State bits in DSTS register.
*/
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
link_state = DWC3_DSTS_USBLNKST(reg);
switch (link_state) {
case DWC3_LINK_STATE_RESET:
case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
case DWC3_LINK_STATE_U2: /* in HS, means Sleep (L1) */
case DWC3_LINK_STATE_U1:
case DWC3_LINK_STATE_RESUME:
break;
default:
return -EINVAL;
}
if (async)
dwc3_gadget_enable_linksts_evts(dwc, true);
ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
if (ret < 0) {
dev_err(dwc->dev, "failed to put link in Recovery\n");
dwc3_gadget_enable_linksts_evts(dwc, false);
return ret;
}
/* Recent versions do this automatically */
if (DWC3_VER_IS_PRIOR(DWC3, 194A)) {
/* write zeroes to Link Change Request */
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
}
/*
* Since link status change events are enabled we will receive
* an U0 event when wakeup is successful. So bail out.
*/
if (async)
return 0;
/* poll until Link State changes to ON */
retries = 20000;
while (retries--) {
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
/* in HS, means ON */
if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
break;
}
if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
dev_err(dwc->dev, "failed to send remote wakeup\n");
return -EINVAL;
}
return 0;
}
static int dwc3_gadget_wakeup(struct usb_gadget *g)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
int ret;
if (!dwc->wakeup_configured) {
dev_err(dwc->dev, "remote wakeup not configured\n");
return -EINVAL;
}
spin_lock_irqsave(&dwc->lock, flags);
if (!dwc->gadget->wakeup_armed) {
dev_err(dwc->dev, "not armed for remote wakeup\n");
spin_unlock_irqrestore(&dwc->lock, flags);
return -EINVAL;
}
ret = __dwc3_gadget_wakeup(dwc, true);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
static void dwc3_resume_gadget(struct dwc3 *dwc);
static int dwc3_gadget_func_wakeup(struct usb_gadget *g, int intf_id)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
int ret;
int link_state;
if (!dwc->wakeup_configured) {
dev_err(dwc->dev, "remote wakeup not configured\n");
return -EINVAL;
}
spin_lock_irqsave(&dwc->lock, flags);
/*
* If the link is in U3, signal for remote wakeup and wait for the
* link to transition to U0 before sending device notification.
*/
link_state = dwc3_gadget_get_link_state(dwc);
if (link_state == DWC3_LINK_STATE_U3) {
ret = __dwc3_gadget_wakeup(dwc, false);
if (ret) {
spin_unlock_irqrestore(&dwc->lock, flags);
return -EINVAL;
}
dwc3_resume_gadget(dwc);
dwc->suspended = false;
dwc->link_state = DWC3_LINK_STATE_U0;
}
ret = dwc3_send_gadget_generic_command(dwc, DWC3_DGCMD_DEV_NOTIFICATION,
DWC3_DGCMDPAR_DN_FUNC_WAKE |
DWC3_DGCMDPAR_INTF_SEL(intf_id));
if (ret)
dev_err(dwc->dev, "function remote wakeup failed, ret:%d\n", ret);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
static int dwc3_gadget_set_remote_wakeup(struct usb_gadget *g, int set)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
dwc->wakeup_configured = !!set;
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
}
static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
int is_selfpowered)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
g->is_selfpowered = !!is_selfpowered;
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
}
static void dwc3_stop_active_transfers(struct dwc3 *dwc)
{
u32 epnum;
for (epnum = 2; epnum < dwc->num_eps; epnum++) {
struct dwc3_ep *dep;
dep = dwc->eps[epnum];
if (!dep)
continue;
dwc3_remove_requests(dwc, dep, -ESHUTDOWN);
}
}
static void __dwc3_gadget_set_ssp_rate(struct dwc3 *dwc)
{
enum usb_ssp_rate ssp_rate = dwc->gadget_ssp_rate;
u32 reg;
if (ssp_rate == USB_SSP_GEN_UNKNOWN)
ssp_rate = dwc->max_ssp_rate;
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~DWC3_DCFG_SPEED_MASK;
reg &= ~DWC3_DCFG_NUMLANES(~0);
if (ssp_rate == USB_SSP_GEN_1x2)
reg |= DWC3_DCFG_SUPERSPEED;
else if (dwc->max_ssp_rate != USB_SSP_GEN_1x2)
reg |= DWC3_DCFG_SUPERSPEED_PLUS;
if (ssp_rate != USB_SSP_GEN_2x1 &&
dwc->max_ssp_rate != USB_SSP_GEN_2x1)
reg |= DWC3_DCFG_NUMLANES(1);
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
}
static void __dwc3_gadget_set_speed(struct dwc3 *dwc)
{
enum usb_device_speed speed;
u32 reg;
speed = dwc->gadget_max_speed;
if (speed == USB_SPEED_UNKNOWN || speed > dwc->maximum_speed)
speed = dwc->maximum_speed;
if (speed == USB_SPEED_SUPER_PLUS &&
DWC3_IP_IS(DWC32)) {
__dwc3_gadget_set_ssp_rate(dwc);
return;
}
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~(DWC3_DCFG_SPEED_MASK);
/*
* WORKAROUND: DWC3 revision < 2.20a have an issue
* which would cause metastability state on Run/Stop
* bit if we try to force the IP to USB2-only mode.
*
* Because of that, we cannot configure the IP to any
* speed other than the SuperSpeed
*
* Refers to:
*
* STAR#9000525659: Clock Domain Crossing on DCTL in
* USB 2.0 Mode
*/
if (DWC3_VER_IS_PRIOR(DWC3, 220A) &&
!dwc->dis_metastability_quirk) {
reg |= DWC3_DCFG_SUPERSPEED;
} else {
switch (speed) {
case USB_SPEED_FULL:
reg |= DWC3_DCFG_FULLSPEED;
break;
case USB_SPEED_HIGH:
reg |= DWC3_DCFG_HIGHSPEED;
break;
case USB_SPEED_SUPER:
reg |= DWC3_DCFG_SUPERSPEED;
break;
case USB_SPEED_SUPER_PLUS:
if (DWC3_IP_IS(DWC3))
reg |= DWC3_DCFG_SUPERSPEED;
else
reg |= DWC3_DCFG_SUPERSPEED_PLUS;
break;
default:
dev_err(dwc->dev, "invalid speed (%d)\n", speed);
if (DWC3_IP_IS(DWC3))
reg |= DWC3_DCFG_SUPERSPEED;
else
reg |= DWC3_DCFG_SUPERSPEED_PLUS;
}
}
if (DWC3_IP_IS(DWC32) &&
speed > USB_SPEED_UNKNOWN &&
speed < USB_SPEED_SUPER_PLUS)
reg &= ~DWC3_DCFG_NUMLANES(~0);
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
}
static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
{
u32 reg;
u32 timeout = 2000;
if (pm_runtime_suspended(dwc->dev))
return 0;
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (is_on) {
if (DWC3_VER_IS_WITHIN(DWC3, ANY, 187A)) {
reg &= ~DWC3_DCTL_TRGTULST_MASK;
reg |= DWC3_DCTL_TRGTULST_RX_DET;
}
if (!DWC3_VER_IS_PRIOR(DWC3, 194A))
reg &= ~DWC3_DCTL_KEEP_CONNECT;
reg |= DWC3_DCTL_RUN_STOP;
__dwc3_gadget_set_speed(dwc);
dwc->pullups_connected = true;
} else {
reg &= ~DWC3_DCTL_RUN_STOP;
dwc->pullups_connected = false;
}
dwc3_gadget_dctl_write_safe(dwc, reg);
do {
usleep_range(1000, 2000);
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
reg &= DWC3_DSTS_DEVCTRLHLT;
} while (--timeout && !(!is_on ^ !reg));
if (!timeout)
return -ETIMEDOUT;
return 0;
}
static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
static void __dwc3_gadget_stop(struct dwc3 *dwc);
static int __dwc3_gadget_start(struct dwc3 *dwc);
static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&dwc->lock, flags);
dwc->connected = false;
/*
* Attempt to end pending SETUP status phase, and not wait for the
* function to do so.
*/
if (dwc->delayed_status)
dwc3_ep0_send_delayed_status(dwc);
/*
* In the Synopsys DesignWare Cores USB3 Databook Rev. 3.30a
* Section 4.1.8 Table 4-7, it states that for a device-initiated
* disconnect, the SW needs to ensure that it sends "a DEPENDXFER
* command for any active transfers" before clearing the RunStop
* bit.
*/
dwc3_stop_active_transfers(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
/*
* Per databook, when we want to stop the gadget, if a control transfer
* is still in process, complete it and get the core into setup phase.
* In case the host is unresponsive to a SETUP transaction, forcefully
* stall the transfer, and move back to the SETUP phase, so that any
* pending endxfers can be executed.
*/
if (dwc->ep0state != EP0_SETUP_PHASE) {
reinit_completion(&dwc->ep0_in_setup);
ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
if (ret == 0) {
dev_warn(dwc->dev, "wait for SETUP phase timed out\n");
spin_lock_irqsave(&dwc->lock, flags);
dwc3_ep0_reset_state(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
}
}
/*
* Note: if the GEVNTCOUNT indicates events in the event buffer, the
* driver needs to acknowledge them before the controller can halt.
* Simply let the interrupt handler acknowledges and handle the
* remaining event generated by the controller while polling for
* DSTS.DEVCTLHLT.
*/
ret = dwc3_gadget_run_stop(dwc, false);
/*
* Stop the gadget after controller is halted, so that if needed, the
* events to update EP0 state can still occur while the run/stop
* routine polls for the halted state. DEVTEN is cleared as part of
* gadget stop.
*/
spin_lock_irqsave(&dwc->lock, flags);
__dwc3_gadget_stop(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
static int dwc3_gadget_soft_connect(struct dwc3 *dwc)
{
int ret;
/*
* In the Synopsys DWC_usb31 1.90a programming guide section
* 4.1.9, it specifies that for a reconnect after a
* device-initiated disconnect requires a core soft reset
* (DCTL.CSftRst) before enabling the run/stop bit.
*/
ret = dwc3_core_soft_reset(dwc);
if (ret)
return ret;
dwc3_event_buffers_setup(dwc);
__dwc3_gadget_start(dwc);
return dwc3_gadget_run_stop(dwc, true);
}
static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
{
struct dwc3 *dwc = gadget_to_dwc(g);
int ret;
is_on = !!is_on;
dwc->softconnect = is_on;
/*
* Avoid issuing a runtime resume if the device is already in the
* suspended state during gadget disconnect. DWC3 gadget was already
* halted/stopped during runtime suspend.
*/
if (!is_on) {
pm_runtime_barrier(dwc->dev);
if (pm_runtime_suspended(dwc->dev))
return 0;
}
/*
* Check the return value for successful resume, or error. For a
* successful resume, the DWC3 runtime PM resume routine will handle
* the run stop sequence, so avoid duplicate operations here.
*/
ret = pm_runtime_get_sync(dwc->dev);
if (!ret || ret < 0) {
pm_runtime_put(dwc->dev);
if (ret < 0)
pm_runtime_set_suspended(dwc->dev);
return ret;
}
if (dwc->pullups_connected == is_on) {
pm_runtime_put(dwc->dev);
return 0;
}
synchronize_irq(dwc->irq_gadget);
if (!is_on)
ret = dwc3_gadget_soft_disconnect(dwc);
else
ret = dwc3_gadget_soft_connect(dwc);
pm_runtime_put(dwc->dev);
return ret;
}
static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
{
u32 reg;
/* Enable all but Start and End of Frame IRQs */
reg = (DWC3_DEVTEN_EVNTOVERFLOWEN |
DWC3_DEVTEN_CMDCMPLTEN |
DWC3_DEVTEN_ERRTICERREN |
DWC3_DEVTEN_WKUPEVTEN |
DWC3_DEVTEN_CONNECTDONEEN |
DWC3_DEVTEN_USBRSTEN |
DWC3_DEVTEN_DISCONNEVTEN);
if (DWC3_VER_IS_PRIOR(DWC3, 250A))
reg |= DWC3_DEVTEN_ULSTCNGEN;
/* On 2.30a and above this bit enables U3/L2-L1 Suspend Events */
if (!DWC3_VER_IS_PRIOR(DWC3, 230A))
reg |= DWC3_DEVTEN_U3L2L1SUSPEN;
dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
}
static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
{
/* mask all interrupts */
dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
}
static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
/**
* dwc3_gadget_setup_nump - calculate and initialize NUMP field of %DWC3_DCFG
* @dwc: pointer to our context structure
*
* The following looks like complex but it's actually very simple. In order to
* calculate the number of packets we can burst at once on OUT transfers, we're
* gonna use RxFIFO size.
*
* To calculate RxFIFO size we need two numbers:
* MDWIDTH = size, in bits, of the internal memory bus
* RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits)
*
* Given these two numbers, the formula is simple:
*
* RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16;
*
* 24 bytes is for 3x SETUP packets
* 16 bytes is a clock domain crossing tolerance
*
* Given RxFIFO Size, NUMP = RxFIFOSize / 1024;
*/
static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
{
u32 ram2_depth;
u32 mdwidth;
u32 nump;
u32 reg;
ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
mdwidth = dwc3_mdwidth(dwc);
nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
nump = min_t(u32, nump, 16);
/* update NumP */
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~DWC3_DCFG_NUMP_MASK;
reg |= nump << DWC3_DCFG_NUMP_SHIFT;
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
}
static int __dwc3_gadget_start(struct dwc3 *dwc)
{
struct dwc3_ep *dep;
int ret = 0;
u32 reg;
/*
* Use IMOD if enabled via dwc->imod_interval. Otherwise, if
* the core supports IMOD, disable it.
*/
if (dwc->imod_interval) {
dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
} else if (dwc3_has_imod(dwc)) {
dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0);
}
/*
* We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP
* field instead of letting dwc3 itself calculate that automatically.
*
* This way, we maximize the chances that we'll be able to get several
* bursts of data without going through any sort of endpoint throttling.
*/
reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
if (DWC3_IP_IS(DWC3))
reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
else
reg &= ~DWC31_GRXTHRCFG_PKTCNTSEL;
dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
dwc3_gadget_setup_nump(dwc);
/*
* Currently the controller handles single stream only. So, Ignore
* Packet Pending bit for stream selection and don't search for another
* stream if the host sends Data Packet with PP=0 (for OUT direction) or
* ACK with NumP=0 and PP=0 (for IN direction). This slightly improves
* the stream performance.
*/
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg |= DWC3_DCFG_IGNSTRMPP;
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
/* Enable MST by default if the device is capable of MST */
if (DWC3_MST_CAPABLE(&dwc->hwparams)) {
reg = dwc3_readl(dwc->regs, DWC3_DCFG1);
reg &= ~DWC3_DCFG1_DIS_MST_ENH;
dwc3_writel(dwc->regs, DWC3_DCFG1, reg);
}
/* Start with SuperSpeed Default */
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dep = dwc->eps[0];
dep->flags = 0;
ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
goto err0;
}
dep = dwc->eps[1];
dep->flags = 0;
ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
goto err1;
}
/* begin to receive SETUP packets */
dwc->ep0state = EP0_SETUP_PHASE;
dwc->ep0_bounced = false;
dwc->link_state = DWC3_LINK_STATE_SS_DIS;
dwc->delayed_status = false;
dwc3_ep0_out_start(dwc);
dwc3_gadget_enable_irq(dwc);
return 0;
err1:
__dwc3_gadget_ep_disable(dwc->eps[0]);
err0:
return ret;
}
static int dwc3_gadget_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
int ret;
int irq;
irq = dwc->irq_gadget;
ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
IRQF_SHARED, "dwc3", dwc->ev_buf);
if (ret) {
dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
irq, ret);
return ret;
}
spin_lock_irqsave(&dwc->lock, flags);
dwc->gadget_driver = driver;
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
}
static void __dwc3_gadget_stop(struct dwc3 *dwc)
{
dwc3_gadget_disable_irq(dwc);
__dwc3_gadget_ep_disable(dwc->eps[0]);
__dwc3_gadget_ep_disable(dwc->eps[1]);
}
static int dwc3_gadget_stop(struct usb_gadget *g)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
dwc->gadget_driver = NULL;
dwc->max_cfg_eps = 0;
spin_unlock_irqrestore(&dwc->lock, flags);
free_irq(dwc->irq_gadget, dwc->ev_buf);
return 0;
}
static void dwc3_gadget_config_params(struct usb_gadget *g,
struct usb_dcd_config_params *params)
{
struct dwc3 *dwc = gadget_to_dwc(g);
params->besl_baseline = USB_DEFAULT_BESL_UNSPECIFIED;
params->besl_deep = USB_DEFAULT_BESL_UNSPECIFIED;
/* Recommended BESL */
if (!dwc->dis_enblslpm_quirk) {
/*
* If the recommended BESL baseline is 0 or if the BESL deep is
* less than 2, Microsoft's Windows 10 host usb stack will issue
* a usb reset immediately after it receives the extended BOS
* descriptor and the enumeration will fail. To maintain
* compatibility with the Windows' usb stack, let's set the
* recommended BESL baseline to 1 and clamp the BESL deep to be
* within 2 to 15.
*/
params->besl_baseline = 1;
if (dwc->is_utmi_l1_suspend)
params->besl_deep =
clamp_t(u8, dwc->hird_threshold, 2, 15);
}
/* U1 Device exit Latency */
if (dwc->dis_u1_entry_quirk)
params->bU1devExitLat = 0;
else
params->bU1devExitLat = DWC3_DEFAULT_U1_DEV_EXIT_LAT;
/* U2 Device exit Latency */
if (dwc->dis_u2_entry_quirk)
params->bU2DevExitLat = 0;
else
params->bU2DevExitLat =
cpu_to_le16(DWC3_DEFAULT_U2_DEV_EXIT_LAT);
}
static void dwc3_gadget_set_speed(struct usb_gadget *g,
enum usb_device_speed speed)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
dwc->gadget_max_speed = speed;
spin_unlock_irqrestore(&dwc->lock, flags);
}
static void dwc3_gadget_set_ssp_rate(struct usb_gadget *g,
enum usb_ssp_rate rate)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
dwc->gadget_max_speed = USB_SPEED_SUPER_PLUS;
dwc->gadget_ssp_rate = rate;
spin_unlock_irqrestore(&dwc->lock, flags);
}
static int dwc3_gadget_vbus_draw(struct usb_gadget *g, unsigned int mA)
{
struct dwc3 *dwc = gadget_to_dwc(g);
union power_supply_propval val = {0};
int ret;
if (dwc->usb2_phy)
return usb_phy_set_power(dwc->usb2_phy, mA);
if (!dwc->usb_psy)
return -EOPNOTSUPP;
val.intval = 1000 * mA;
ret = power_supply_set_property(dwc->usb_psy, POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, &val);
return ret;
}
/**
* dwc3_gadget_check_config - ensure dwc3 can support the USB configuration
* @g: pointer to the USB gadget
*
* Used to record the maximum number of endpoints being used in a USB composite
* device. (across all configurations) This is to be used in the calculation
* of the TXFIFO sizes when resizing internal memory for individual endpoints.
* It will help ensured that the resizing logic reserves enough space for at
* least one max packet.
*/
static int dwc3_gadget_check_config(struct usb_gadget *g)
{
struct dwc3 *dwc = gadget_to_dwc(g);
struct usb_ep *ep;
int fifo_size = 0;
int ram1_depth;
int ep_num = 0;
if (!dwc->do_fifo_resize)
return 0;
list_for_each_entry(ep, &g->ep_list, ep_list) {
/* Only interested in the IN endpoints */
if (ep->claimed && (ep->address & USB_DIR_IN))
ep_num++;
}
if (ep_num <= dwc->max_cfg_eps)
return 0;
/* Update the max number of eps in the composition */
dwc->max_cfg_eps = ep_num;
fifo_size = dwc3_gadget_calc_tx_fifo_size(dwc, dwc->max_cfg_eps);
/* Based on the equation, increment by one for every ep */
fifo_size += dwc->max_cfg_eps;
/* Check if we can fit a single fifo per endpoint */
ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
if (fifo_size > ram1_depth)
return -ENOMEM;
return 0;
}
static void dwc3_gadget_async_callbacks(struct usb_gadget *g, bool enable)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
dwc->async_callbacks = enable;
spin_unlock_irqrestore(&dwc->lock, flags);
}
static const struct usb_gadget_ops dwc3_gadget_ops = {
.get_frame = dwc3_gadget_get_frame,
.wakeup = dwc3_gadget_wakeup,
.func_wakeup = dwc3_gadget_func_wakeup,
.set_remote_wakeup = dwc3_gadget_set_remote_wakeup,
.set_selfpowered = dwc3_gadget_set_selfpowered,
.pullup = dwc3_gadget_pullup,
.udc_start = dwc3_gadget_start,
.udc_stop = dwc3_gadget_stop,
.udc_set_speed = dwc3_gadget_set_speed,
.udc_set_ssp_rate = dwc3_gadget_set_ssp_rate,
.get_config_params = dwc3_gadget_config_params,
.vbus_draw = dwc3_gadget_vbus_draw,
.check_config = dwc3_gadget_check_config,
.udc_async_callbacks = dwc3_gadget_async_callbacks,
};
/* -------------------------------------------------------------------------- */
static int dwc3_gadget_init_control_endpoint(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
dep->endpoint.maxburst = 1;
dep->endpoint.ops = &dwc3_gadget_ep0_ops;
if (!dep->direction)
dwc->gadget->ep0 = &dep->endpoint;
dep->endpoint.caps.type_control = true;
return 0;
}
static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
u32 mdwidth;
int size;
int maxpacket;
mdwidth = dwc3_mdwidth(dwc);
/* MDWIDTH is represented in bits, we need it in bytes */
mdwidth /= 8;
size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1));
if (DWC3_IP_IS(DWC3))
size = DWC3_GTXFIFOSIZ_TXFDEP(size);
else
size = DWC31_GTXFIFOSIZ_TXFDEP(size);
/*
* maxpacket size is determined as part of the following, after assuming
* a mult value of one maxpacket:
* DWC3 revision 280A and prior:
* fifo_size = mult * (max_packet / mdwidth) + 1;
* maxpacket = mdwidth * (fifo_size - 1);
*
* DWC3 revision 290A and onwards:
* fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1
* maxpacket = mdwidth * ((fifo_size - 1) - 1) - mdwidth;
*/
if (DWC3_VER_IS_PRIOR(DWC3, 290A))
maxpacket = mdwidth * (size - 1);
else
maxpacket = mdwidth * ((size - 1) - 1) - mdwidth;
/* Functionally, space for one max packet is sufficient */
size = min_t(int, maxpacket, 1024);
usb_ep_set_maxpacket_limit(&dep->endpoint, size);
dep->endpoint.max_streams = 16;
dep->endpoint.ops = &dwc3_gadget_ep_ops;
list_add_tail(&dep->endpoint.ep_list,
&dwc->gadget->ep_list);
dep->endpoint.caps.type_iso = true;
dep->endpoint.caps.type_bulk = true;
dep->endpoint.caps.type_int = true;
return dwc3_alloc_trb_pool(dep);
}
static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
u32 mdwidth;
int size;
mdwidth = dwc3_mdwidth(dwc);
/* MDWIDTH is represented in bits, convert to bytes */
mdwidth /= 8;
/* All OUT endpoints share a single RxFIFO space */
size = dwc3_readl(dwc->regs, DWC3_GRXFIFOSIZ(0));
if (DWC3_IP_IS(DWC3))
size = DWC3_GRXFIFOSIZ_RXFDEP(size);
else
size = DWC31_GRXFIFOSIZ_RXFDEP(size);
/* FIFO depth is in MDWDITH bytes */
size *= mdwidth;
/*
* To meet performance requirement, a minimum recommended RxFIFO size
* is defined as follow:
* RxFIFO size >= (3 x MaxPacketSize) +
* (3 x 8 bytes setup packets size) + (16 bytes clock crossing margin)
*
* Then calculate the max packet limit as below.
*/
size -= (3 * 8) + 16;
if (size < 0)
size = 0;
else
size /= 3;
usb_ep_set_maxpacket_limit(&dep->endpoint, size);
dep->endpoint.max_streams = 16;
dep->endpoint.ops = &dwc3_gadget_ep_ops;
list_add_tail(&dep->endpoint.ep_list,
&dwc->gadget->ep_list);
dep->endpoint.caps.type_iso = true;
dep->endpoint.caps.type_bulk = true;
dep->endpoint.caps.type_int = true;
return dwc3_alloc_trb_pool(dep);
}
static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum)
{
struct dwc3_ep *dep;
bool direction = epnum & 1;
int ret;
u8 num = epnum >> 1;
dep = kzalloc(sizeof(*dep), GFP_KERNEL);
if (!dep)
return -ENOMEM;
dep->dwc = dwc;
dep->number = epnum;
dep->direction = direction;
dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
dwc->eps[epnum] = dep;
dep->combo_num = 0;
dep->start_cmd_status = 0;
snprintf(dep->name, sizeof(dep->name), "ep%u%s", num,
direction ? "in" : "out");
dep->endpoint.name = dep->name;
if (!(dep->number > 1)) {
dep->endpoint.desc = &dwc3_gadget_ep0_desc;
dep->endpoint.comp_desc = NULL;
}
if (num == 0)
ret = dwc3_gadget_init_control_endpoint(dep);
else if (direction)
ret = dwc3_gadget_init_in_endpoint(dep);
else
ret = dwc3_gadget_init_out_endpoint(dep);
if (ret)
return ret;
dep->endpoint.caps.dir_in = direction;
dep->endpoint.caps.dir_out = !direction;
INIT_LIST_HEAD(&dep->pending_list);
INIT_LIST_HEAD(&dep->started_list);
INIT_LIST_HEAD(&dep->cancelled_list);
dwc3_debugfs_create_endpoint_dir(dep);
return 0;
}
static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 total)
{
u8 epnum;
INIT_LIST_HEAD(&dwc->gadget->ep_list);
for (epnum = 0; epnum < total; epnum++) {
int ret;
ret = dwc3_gadget_init_endpoint(dwc, epnum);
if (ret)
return ret;
}
return 0;
}
static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
{
struct dwc3_ep *dep;
u8 epnum;
for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
dep = dwc->eps[epnum];
if (!dep)
continue;
/*
* Physical endpoints 0 and 1 are special; they form the
* bi-directional USB endpoint 0.
*
* For those two physical endpoints, we don't allocate a TRB
* pool nor do we add them the endpoints list. Due to that, we
* shouldn't do these two operations otherwise we would end up
* with all sorts of bugs when removing dwc3.ko.
*/
if (epnum != 0 && epnum != 1) {
dwc3_free_trb_pool(dep);
list_del(&dep->endpoint.ep_list);
}
dwc3_debugfs_remove_endpoint_dir(dep);
kfree(dep);
}
}
/* -------------------------------------------------------------------------- */
static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
struct dwc3_request *req, struct dwc3_trb *trb,
const struct dwc3_event_depevt *event, int status, int chain)
{
unsigned int count;
dwc3_ep_inc_deq(dep);
trace_dwc3_complete_trb(dep, trb);
req->num_trbs--;
/*
* If we're in the middle of series of chained TRBs and we
* receive a short transfer along the way, DWC3 will skip
* through all TRBs including the last TRB in the chain (the
* where CHN bit is zero. DWC3 will also avoid clearing HWO
* bit and SW has to do it manually.
*
* We're going to do that here to avoid problems of HW trying
* to use bogus TRBs for transfers.
*/
if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO))
trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
/*
* For isochronous transfers, the first TRB in a service interval must
* have the Isoc-First type. Track and report its interval frame number.
*/
if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
(trb->ctrl & DWC3_TRBCTL_ISOCHRONOUS_FIRST)) {
unsigned int frame_number;
frame_number = DWC3_TRB_CTRL_GET_SID_SOFN(trb->ctrl);
frame_number &= ~(dep->interval - 1);
req->request.frame_number = frame_number;
}
/*
* We use bounce buffer for requests that needs extra TRB or OUT ZLP. If
* this TRB points to the bounce buffer address, it's a MPS alignment
* TRB. Don't add it to req->remaining calculation.
*/
if (trb->bpl == lower_32_bits(dep->dwc->bounce_addr) &&
trb->bph == upper_32_bits(dep->dwc->bounce_addr)) {
trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
return 1;
}
count = trb->size & DWC3_TRB_SIZE_MASK;
req->remaining += count;
if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
return 1;
if (event->status & DEPEVT_STATUS_SHORT && !chain)
return 1;
if ((trb->ctrl & DWC3_TRB_CTRL_ISP_IMI) &&
DWC3_TRB_SIZE_TRBSTS(trb->size) == DWC3_TRBSTS_MISSED_ISOC)
return 1;
if ((trb->ctrl & DWC3_TRB_CTRL_IOC) ||
(trb->ctrl & DWC3_TRB_CTRL_LST))
return 1;
return 0;
}
static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
struct dwc3_request *req, const struct dwc3_event_depevt *event,
int status)
{
struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
struct scatterlist *sg = req->sg;
struct scatterlist *s;
unsigned int num_queued = req->num_queued_sgs;
unsigned int i;
int ret = 0;
for_each_sg(sg, s, num_queued, i) {
trb = &dep->trb_pool[dep->trb_dequeue];
req->sg = sg_next(s);
req->num_queued_sgs--;
ret = dwc3_gadget_ep_reclaim_completed_trb(dep, req,
trb, event, status, true);
if (ret)
break;
}
return ret;
}
static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
struct dwc3_request *req, const struct dwc3_event_depevt *event,
int status)
{
struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
return dwc3_gadget_ep_reclaim_completed_trb(dep, req, trb,
event, status, false);
}
static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
{
return req->num_pending_sgs == 0 && req->num_queued_sgs == 0;
}
static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event,
struct dwc3_request *req, int status)
{
int request_status;
int ret;
if (req->request.num_mapped_sgs)
ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event,
status);
else
ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
status);
req->request.actual = req->request.length - req->remaining;
if (!dwc3_gadget_ep_request_completed(req))
goto out;
if (req->needs_extra_trb) {
ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
status);
req->needs_extra_trb = false;
}
/*
* The event status only reflects the status of the TRB with IOC set.
* For the requests that don't set interrupt on completion, the driver
* needs to check and return the status of the completed TRBs associated
* with the request. Use the status of the last TRB of the request.
*/
if (req->request.no_interrupt) {
struct dwc3_trb *trb;
trb = dwc3_ep_prev_trb(dep, dep->trb_dequeue);
switch (DWC3_TRB_SIZE_TRBSTS(trb->size)) {
case DWC3_TRBSTS_MISSED_ISOC:
/* Isoc endpoint only */
request_status = -EXDEV;
break;
case DWC3_TRB_STS_XFER_IN_PROG:
/* Applicable when End Transfer with ForceRM=0 */
case DWC3_TRBSTS_SETUP_PENDING:
/* Control endpoint only */
case DWC3_TRBSTS_OK:
default:
request_status = 0;
break;
}
} else {
request_status = status;
}
dwc3_gadget_giveback(dep, req, request_status);
out:
return ret;
}
static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event, int status)
{
struct dwc3_request *req;
while (!list_empty(&dep->started_list)) {
int ret;
req = next_request(&dep->started_list);
ret = dwc3_gadget_ep_cleanup_completed_request(dep, event,
req, status);
if (ret)
break;
/*
* The endpoint is disabled, let the dwc3_remove_requests()
* handle the cleanup.
*/
if (!dep->endpoint.desc)
break;
}
}
static bool dwc3_gadget_ep_should_continue(struct dwc3_ep *dep)
{
struct dwc3_request *req;
struct dwc3 *dwc = dep->dwc;
if (!dep->endpoint.desc || !dwc->pullups_connected ||
!dwc->connected)
return false;
if (!list_empty(&dep->pending_list))
return true;
/*
* We only need to check the first entry of the started list. We can
* assume the completed requests are removed from the started list.
*/
req = next_request(&dep->started_list);
if (!req)
return false;
return !dwc3_gadget_ep_request_completed(req);
}
static void dwc3_gadget_endpoint_frame_from_event(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event)
{
dep->frame_number = event->parameters;
}
static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event, int status)
{
struct dwc3 *dwc = dep->dwc;
bool no_started_trb = true;
dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
goto out;
if (!dep->endpoint.desc)
return no_started_trb;
if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
list_empty(&dep->started_list) &&
(list_empty(&dep->pending_list) || status == -EXDEV))
dwc3_stop_active_transfer(dep, true, true);
else if (dwc3_gadget_ep_should_continue(dep))
if (__dwc3_gadget_kick_transfer(dep) == 0)
no_started_trb = false;
out:
/*
* WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
* See dwc3_gadget_linksts_change_interrupt() for 1st half.
*/
if (DWC3_VER_IS_PRIOR(DWC3, 183A)) {
u32 reg;
int i;
for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
dep = dwc->eps[i];
if (!(dep->flags & DWC3_EP_ENABLED))
continue;
if (!list_empty(&dep->started_list))
return no_started_trb;
}
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg |= dwc->u1u2;
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
dwc->u1u2 = 0;
}
return no_started_trb;
}
static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event)
{
int status = 0;
if (!dep->endpoint.desc)
return;
if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
dwc3_gadget_endpoint_frame_from_event(dep, event);
if (event->status & DEPEVT_STATUS_BUSERR)
status = -ECONNRESET;
if (event->status & DEPEVT_STATUS_MISSED_ISOC)
status = -EXDEV;
dwc3_gadget_endpoint_trbs_complete(dep, event, status);
}
static void dwc3_gadget_endpoint_transfer_complete(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event)
{
int status = 0;
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
if (event->status & DEPEVT_STATUS_BUSERR)
status = -ECONNRESET;
if (dwc3_gadget_endpoint_trbs_complete(dep, event, status))
dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
}
static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event)
{
dwc3_gadget_endpoint_frame_from_event(dep, event);
/*
* The XferNotReady event is generated only once before the endpoint
* starts. It will be generated again when END_TRANSFER command is
* issued. For some controller versions, the XferNotReady event may be
* generated while the END_TRANSFER command is still in process. Ignore
* it and wait for the next XferNotReady event after the command is
* completed.
*/
if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
return;
(void) __dwc3_gadget_start_isoc(dep);
}
static void dwc3_gadget_endpoint_command_complete(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event)
{
u8 cmd = DEPEVT_PARAMETER_CMD(event->parameters);
if (cmd != DWC3_DEPCMD_ENDTRANSFER)
return;
/*
* The END_TRANSFER command will cause the controller to generate a
* NoStream Event, and it's not due to the host DP NoStream rejection.
* Ignore the next NoStream event.
*/
if (dep->stream_capable)
dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM;
dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
dwc3_gadget_ep_cleanup_cancelled_requests(dep);
if (dep->flags & DWC3_EP_PENDING_CLEAR_STALL) {
struct dwc3 *dwc = dep->dwc;
dep->flags &= ~DWC3_EP_PENDING_CLEAR_STALL;
if (dwc3_send_clear_stall_ep_cmd(dep)) {
struct usb_ep *ep0 = &dwc->eps[0]->endpoint;
dev_err(dwc->dev, "failed to clear STALL on %s\n", dep->name);
if (dwc->delayed_status)
__dwc3_gadget_ep0_set_halt(ep0, 1);
return;
}
dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
if (dwc->clear_stall_protocol == dep->number)
dwc3_ep0_send_delayed_status(dwc);
}
if ((dep->flags & DWC3_EP_DELAY_START) &&
!usb_endpoint_xfer_isoc(dep->endpoint.desc))
__dwc3_gadget_kick_transfer(dep);
dep->flags &= ~DWC3_EP_DELAY_START;
}
static void dwc3_gadget_endpoint_stream_event(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event)
{
struct dwc3 *dwc = dep->dwc;
if (event->status == DEPEVT_STREAMEVT_FOUND) {
dep->flags |= DWC3_EP_FIRST_STREAM_PRIMED;
goto out;
}
/* Note: NoStream rejection event param value is 0 and not 0xFFFF */
switch (event->parameters) {
case DEPEVT_STREAM_PRIME:
/*
* If the host can properly transition the endpoint state from
* idle to prime after a NoStream rejection, there's no need to
* force restarting the endpoint to reinitiate the stream. To
* simplify the check, assume the host follows the USB spec if
* it primed the endpoint more than once.
*/
if (dep->flags & DWC3_EP_FORCE_RESTART_STREAM) {
if (dep->flags & DWC3_EP_FIRST_STREAM_PRIMED)
dep->flags &= ~DWC3_EP_FORCE_RESTART_STREAM;
else
dep->flags |= DWC3_EP_FIRST_STREAM_PRIMED;
}
break;
case DEPEVT_STREAM_NOSTREAM:
if ((dep->flags & DWC3_EP_IGNORE_NEXT_NOSTREAM) ||
!(dep->flags & DWC3_EP_FORCE_RESTART_STREAM) ||
(!DWC3_MST_CAPABLE(&dwc->hwparams) &&
!(dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE)))
break;
/*
* If the host rejects a stream due to no active stream, by the
* USB and xHCI spec, the endpoint will be put back to idle
* state. When the host is ready (buffer added/updated), it will
* prime the endpoint to inform the usb device controller. This
* triggers the device controller to issue ERDY to restart the
* stream. However, some hosts don't follow this and keep the
* endpoint in the idle state. No prime will come despite host
* streams are updated, and the device controller will not be
* triggered to generate ERDY to move the next stream data. To
* workaround this and maintain compatibility with various
* hosts, force to reinitiate the stream until the host is ready
* instead of waiting for the host to prime the endpoint.
*/
if (DWC3_VER_IS_WITHIN(DWC32, 100A, ANY)) {
unsigned int cmd = DWC3_DGCMD_SET_ENDPOINT_PRIME;
dwc3_send_gadget_generic_command(dwc, cmd, dep->number);
} else {
dep->flags |= DWC3_EP_DELAY_START;
dwc3_stop_active_transfer(dep, true, true);
return;
}
break;
}
out:
dep->flags &= ~DWC3_EP_IGNORE_NEXT_NOSTREAM;
}
static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
struct dwc3_ep *dep;
u8 epnum = event->endpoint_number;
dep = dwc->eps[epnum];
if (!(dep->flags & DWC3_EP_ENABLED)) {
if ((epnum > 1) && !(dep->flags & DWC3_EP_TRANSFER_STARTED))
return;
/* Handle only EPCMDCMPLT when EP disabled */
if ((event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT) &&
!(epnum <= 1 && event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE))
return;
}
if (epnum == 0 || epnum == 1) {
dwc3_ep0_interrupt(dwc, event);
return;
}
switch (event->endpoint_event) {
case DWC3_DEPEVT_XFERINPROGRESS:
dwc3_gadget_endpoint_transfer_in_progress(dep, event);
break;
case DWC3_DEPEVT_XFERNOTREADY:
dwc3_gadget_endpoint_transfer_not_ready(dep, event);
break;
case DWC3_DEPEVT_EPCMDCMPLT:
dwc3_gadget_endpoint_command_complete(dep, event);
break;
case DWC3_DEPEVT_XFERCOMPLETE:
dwc3_gadget_endpoint_transfer_complete(dep, event);
break;
case DWC3_DEPEVT_STREAMEVT:
dwc3_gadget_endpoint_stream_event(dep, event);
break;
case DWC3_DEPEVT_RXTXFIFOEVT:
break;
default:
dev_err(dwc->dev, "unknown endpoint event %d\n", event->endpoint_event);
break;
}
}
static void dwc3_disconnect_gadget(struct dwc3 *dwc)
{
if (dwc->async_callbacks && dwc->gadget_driver->disconnect) {
spin_unlock(&dwc->lock);
dwc->gadget_driver->disconnect(dwc->gadget);
spin_lock(&dwc->lock);
}
}
static void dwc3_suspend_gadget(struct dwc3 *dwc)
{
if (dwc->async_callbacks && dwc->gadget_driver->suspend) {
spin_unlock(&dwc->lock);
dwc->gadget_driver->suspend(dwc->gadget);
spin_lock(&dwc->lock);
}
}
static void dwc3_resume_gadget(struct dwc3 *dwc)
{
if (dwc->async_callbacks && dwc->gadget_driver->resume) {
spin_unlock(&dwc->lock);
dwc->gadget_driver->resume(dwc->gadget);
spin_lock(&dwc->lock);
}
}
static void dwc3_reset_gadget(struct dwc3 *dwc)
{
if (!dwc->gadget_driver)
return;
if (dwc->async_callbacks && dwc->gadget->speed != USB_SPEED_UNKNOWN) {
spin_unlock(&dwc->lock);
usb_gadget_udc_reset(dwc->gadget, dwc->gadget_driver);
spin_lock(&dwc->lock);
}
}
void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
bool interrupt)
{
struct dwc3 *dwc = dep->dwc;
/*
* Only issue End Transfer command to the control endpoint of a started
* Data Phase. Typically we should only do so in error cases such as
* invalid/unexpected direction as described in the control transfer
* flow of the programming guide.
*/
if (dep->number <= 1 && dwc->ep0state != EP0_DATA_PHASE)
return;
if (interrupt && (dep->flags & DWC3_EP_DELAY_STOP))
return;
if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) ||
(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
return;
/*
* If a Setup packet is received but yet to DMA out, the controller will
* not process the End Transfer command of any endpoint. Polling of its
* DEPCMD.CmdAct may block setting up TRB for Setup packet, causing a
* timeout. Delay issuing the End Transfer command until the Setup TRB is
* prepared.
*/
if (dwc->ep0state != EP0_SETUP_PHASE && !dwc->delayed_status) {
dep->flags |= DWC3_EP_DELAY_STOP;
return;
}
/*
* NOTICE: We are violating what the Databook says about the
* EndTransfer command. Ideally we would _always_ wait for the
* EndTransfer Command Completion IRQ, but that's causing too
* much trouble synchronizing between us and gadget driver.
*
* We have discussed this with the IP Provider and it was
* suggested to giveback all requests here.
*
* Note also that a similar handling was tested by Synopsys
* (thanks a lot Paul) and nothing bad has come out of it.
* In short, what we're doing is issuing EndTransfer with
* CMDIOC bit set and delay kicking transfer until the
* EndTransfer command had completed.
*
* As of IP version 3.10a of the DWC_usb3 IP, the controller
* supports a mode to work around the above limitation. The
* software can poll the CMDACT bit in the DEPCMD register
* after issuing a EndTransfer command. This mode is enabled
* by writing GUCTL2[14]. This polling is already done in the
* dwc3_send_gadget_ep_cmd() function so if the mode is
* enabled, the EndTransfer command will have completed upon
* returning from this function.
*
* This mode is NOT available on the DWC_usb31 IP. In this
* case, if the IOC bit is not set, then delay by 1ms
* after issuing the EndTransfer command. This allows for the
* controller to handle the command completely before DWC3
* remove requests attempts to unmap USB request buffers.
*/
__dwc3_stop_active_transfer(dep, force, interrupt);
}
static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
{
u32 epnum;
for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
struct dwc3_ep *dep;
int ret;
dep = dwc->eps[epnum];
if (!dep)
continue;
if (!(dep->flags & DWC3_EP_STALL))
continue;
dep->flags &= ~DWC3_EP_STALL;
ret = dwc3_send_clear_stall_ep_cmd(dep);
WARN_ON_ONCE(ret);
}
}
static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
{
int reg;
dwc->suspended = false;
dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RX_DET);
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_INITU1ENA;
reg &= ~DWC3_DCTL_INITU2ENA;
dwc3_gadget_dctl_write_safe(dwc, reg);
dwc->connected = false;
dwc3_disconnect_gadget(dwc);
dwc->gadget->speed = USB_SPEED_UNKNOWN;
dwc->setup_packet_pending = false;
dwc->gadget->wakeup_armed = false;
dwc3_gadget_enable_linksts_evts(dwc, false);
usb_gadget_set_state(dwc->gadget, USB_STATE_NOTATTACHED);
dwc3_ep0_reset_state(dwc);
}
static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
{
u32 reg;
dwc->suspended = false;
/*
* Ideally, dwc3_reset_gadget() would trigger the function
* drivers to stop any active transfers through ep disable.
* However, for functions which defer ep disable, such as mass
* storage, we will need to rely on the call to stop active
* transfers here, and avoid allowing of request queuing.
*/
dwc->connected = false;
/*
* WORKAROUND: DWC3 revisions <1.88a have an issue which
* would cause a missing Disconnect Event if there's a
* pending Setup Packet in the FIFO.
*
* There's no suggested workaround on the official Bug
* report, which states that "unless the driver/application
* is doing any special handling of a disconnect event,
* there is no functional issue".
*
* Unfortunately, it turns out that we _do_ some special
* handling of a disconnect event, namely complete all
* pending transfers, notify gadget driver of the
* disconnection, and so on.
*
* Our suggested workaround is to follow the Disconnect
* Event steps here, instead, based on a setup_packet_pending
* flag. Such flag gets set whenever we have a SETUP_PENDING
* status for EP0 TRBs and gets cleared on XferComplete for the
* same endpoint.
*
* Refers to:
*
* STAR#9000466709: RTL: Device : Disconnect event not
* generated if setup packet pending in FIFO
*/
if (DWC3_VER_IS_PRIOR(DWC3, 188A)) {
if (dwc->setup_packet_pending)
dwc3_gadget_disconnect_interrupt(dwc);
}
dwc3_reset_gadget(dwc);
/*
* From SNPS databook section 8.1.2, the EP0 should be in setup
* phase. So ensure that EP0 is in setup phase by issuing a stall
* and restart if EP0 is not in setup phase.
*/
dwc3_ep0_reset_state(dwc);
/*
* In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
* Section 4.1.2 Table 4-2, it states that during a USB reset, the SW
* needs to ensure that it sends "a DEPENDXFER command for any active
* transfers."
*/
dwc3_stop_active_transfers(dwc);
dwc->connected = true;
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_TSTCTRL_MASK;
dwc3_gadget_dctl_write_safe(dwc, reg);
dwc->test_mode = false;
dwc->gadget->wakeup_armed = false;
dwc3_gadget_enable_linksts_evts(dwc, false);
dwc3_clear_stall_all_ep(dwc);
/* Reset device address to zero */
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~(DWC3_DCFG_DEVADDR_MASK);
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
}
static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
{
struct dwc3_ep *dep;
int ret;
u32 reg;
u8 lanes = 1;
u8 speed;
if (!dwc->softconnect)
return;
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
speed = reg & DWC3_DSTS_CONNECTSPD;
dwc->speed = speed;
if (DWC3_IP_IS(DWC32))
lanes = DWC3_DSTS_CONNLANES(reg) + 1;
dwc->gadget->ssp_rate = USB_SSP_GEN_UNKNOWN;
/*
* RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
* each time on Connect Done.
*
* Currently we always use the reset value. If any platform
* wants to set this to a different value, we need to add a
* setting and update GCTL.RAMCLKSEL here.
*/
switch (speed) {
case DWC3_DSTS_SUPERSPEED_PLUS:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dwc->gadget->ep0->maxpacket = 512;
dwc->gadget->speed = USB_SPEED_SUPER_PLUS;
if (lanes > 1)
dwc->gadget->ssp_rate = USB_SSP_GEN_2x2;
else
dwc->gadget->ssp_rate = USB_SSP_GEN_2x1;
break;
case DWC3_DSTS_SUPERSPEED:
/*
* WORKAROUND: DWC3 revisions <1.90a have an issue which
* would cause a missing USB3 Reset event.
*
* In such situations, we should force a USB3 Reset
* event by calling our dwc3_gadget_reset_interrupt()
* routine.
*
* Refers to:
*
* STAR#9000483510: RTL: SS : USB3 reset event may
* not be generated always when the link enters poll
*/
if (DWC3_VER_IS_PRIOR(DWC3, 190A))
dwc3_gadget_reset_interrupt(dwc);
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dwc->gadget->ep0->maxpacket = 512;
dwc->gadget->speed = USB_SPEED_SUPER;
if (lanes > 1) {
dwc->gadget->speed = USB_SPEED_SUPER_PLUS;
dwc->gadget->ssp_rate = USB_SSP_GEN_1x2;
}
break;
case DWC3_DSTS_HIGHSPEED:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
dwc->gadget->ep0->maxpacket = 64;
dwc->gadget->speed = USB_SPEED_HIGH;
break;
case DWC3_DSTS_FULLSPEED:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
dwc->gadget->ep0->maxpacket = 64;
dwc->gadget->speed = USB_SPEED_FULL;
break;
}
dwc->eps[1]->endpoint.maxpacket = dwc->gadget->ep0->maxpacket;
/* Enable USB2 LPM Capability */
if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A) &&
!dwc->usb2_gadget_lpm_disable &&
(speed != DWC3_DSTS_SUPERSPEED) &&
(speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg |= DWC3_DCFG_LPM_CAP;
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold |
(dwc->is_utmi_l1_suspend << 4));
/*
* When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
* DCFG.LPMCap is set, core responses with an ACK and the
* BESL value in the LPM token is less than or equal to LPM
* NYET threshold.
*/
WARN_ONCE(DWC3_VER_IS_PRIOR(DWC3, 240A) && dwc->has_lpm_erratum,
"LPM Erratum not available on dwc3 revisions < 2.40a\n");
if (dwc->has_lpm_erratum && !DWC3_VER_IS_PRIOR(DWC3, 240A))
reg |= DWC3_DCTL_NYET_THRES(dwc->lpm_nyet_threshold);
dwc3_gadget_dctl_write_safe(dwc, reg);
} else {
if (dwc->usb2_gadget_lpm_disable) {
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~DWC3_DCFG_LPM_CAP;
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
}
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
dwc3_gadget_dctl_write_safe(dwc, reg);
}
dep = dwc->eps[0];
ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
return;
}
dep = dwc->eps[1];
ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
return;
}
/*
* Configure PHY via GUSB3PIPECTLn if required.
*
* Update GTXFIFOSIZn
*
* In both cases reset values should be sufficient.
*/
}
static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc, unsigned int evtinfo)
{
dwc->suspended = false;
/*
* TODO take core out of low power mode when that's
* implemented.
*/
if (dwc->async_callbacks && dwc->gadget_driver->resume) {
spin_unlock(&dwc->lock);
dwc->gadget_driver->resume(dwc->gadget);
spin_lock(&dwc->lock);
}
dwc->link_state = evtinfo & DWC3_LINK_STATE_MASK;
}
static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
unsigned int evtinfo)
{
enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
unsigned int pwropt;
/*
* WORKAROUND: DWC3 < 2.50a have an issue when configured without
* Hibernation mode enabled which would show up when device detects
* host-initiated U3 exit.
*
* In that case, device will generate a Link State Change Interrupt
* from U3 to RESUME which is only necessary if Hibernation is
* configured in.
*
* There are no functional changes due to such spurious event and we
* just need to ignore it.
*
* Refers to:
*
* STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
* operational mode
*/
pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
if (DWC3_VER_IS_PRIOR(DWC3, 250A) &&
(pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
(next == DWC3_LINK_STATE_RESUME)) {
return;
}
}
/*
* WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
* on the link partner, the USB session might do multiple entry/exit
* of low power states before a transfer takes place.
*
* Due to this problem, we might experience lower throughput. The
* suggested workaround is to disable DCTL[12:9] bits if we're
* transitioning from U1/U2 to U0 and enable those bits again
* after a transfer completes and there are no pending transfers
* on any of the enabled endpoints.
*
* This is the first half of that workaround.
*
* Refers to:
*
* STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
* core send LGO_Ux entering U0
*/
if (DWC3_VER_IS_PRIOR(DWC3, 183A)) {
if (next == DWC3_LINK_STATE_U0) {
u32 u1u2;
u32 reg;
switch (dwc->link_state) {
case DWC3_LINK_STATE_U1:
case DWC3_LINK_STATE_U2:
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
u1u2 = reg & (DWC3_DCTL_INITU2ENA
| DWC3_DCTL_ACCEPTU2ENA
| DWC3_DCTL_INITU1ENA
| DWC3_DCTL_ACCEPTU1ENA);
if (!dwc->u1u2)
dwc->u1u2 = reg & u1u2;
reg &= ~u1u2;
dwc3_gadget_dctl_write_safe(dwc, reg);
break;
default:
/* do nothing */
break;
}
}
}
switch (next) {
case DWC3_LINK_STATE_U0:
if (dwc->gadget->wakeup_armed) {
dwc3_gadget_enable_linksts_evts(dwc, false);
dwc3_resume_gadget(dwc);
dwc->suspended = false;
}
break;
case DWC3_LINK_STATE_U1:
if (dwc->speed == USB_SPEED_SUPER)
dwc3_suspend_gadget(dwc);
break;
case DWC3_LINK_STATE_U2:
case DWC3_LINK_STATE_U3:
dwc3_suspend_gadget(dwc);
break;
case DWC3_LINK_STATE_RESUME:
dwc3_resume_gadget(dwc);
break;
default:
/* do nothing */
break;
}
dwc->link_state = next;
}
static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
unsigned int evtinfo)
{
enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
if (!dwc->suspended && next == DWC3_LINK_STATE_U3) {
dwc->suspended = true;
dwc3_suspend_gadget(dwc);
}
dwc->link_state = next;
}
static void dwc3_gadget_interrupt(struct dwc3 *dwc,
const struct dwc3_event_devt *event)
{
switch (event->type) {
case DWC3_DEVICE_EVENT_DISCONNECT:
dwc3_gadget_disconnect_interrupt(dwc);
break;
case DWC3_DEVICE_EVENT_RESET:
dwc3_gadget_reset_interrupt(dwc);
break;
case DWC3_DEVICE_EVENT_CONNECT_DONE:
dwc3_gadget_conndone_interrupt(dwc);
break;
case DWC3_DEVICE_EVENT_WAKEUP:
dwc3_gadget_wakeup_interrupt(dwc, event->event_info);
break;
case DWC3_DEVICE_EVENT_HIBER_REQ:
dev_WARN_ONCE(dwc->dev, true, "unexpected hibernation event\n");
break;
case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
break;
case DWC3_DEVICE_EVENT_SUSPEND:
/* It changed to be suspend event for version 2.30a and above */
if (!DWC3_VER_IS_PRIOR(DWC3, 230A))
dwc3_gadget_suspend_interrupt(dwc, event->event_info);
break;
case DWC3_DEVICE_EVENT_SOF:
case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
case DWC3_DEVICE_EVENT_CMD_CMPL:
case DWC3_DEVICE_EVENT_OVERFLOW:
break;
default:
dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
}
}
static void dwc3_process_event_entry(struct dwc3 *dwc,
const union dwc3_event *event)
{
trace_dwc3_event(event->raw, dwc);
if (!event->type.is_devspec)
dwc3_endpoint_interrupt(dwc, &event->depevt);
else if (event->type.type == DWC3_EVENT_TYPE_DEV)
dwc3_gadget_interrupt(dwc, &event->devt);
else
dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
}
static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
{
struct dwc3 *dwc = evt->dwc;
irqreturn_t ret = IRQ_NONE;
int left;
left = evt->count;
if (!(evt->flags & DWC3_EVENT_PENDING))
return IRQ_NONE;
while (left > 0) {
union dwc3_event event;
event.raw = *(u32 *) (evt->cache + evt->lpos);
dwc3_process_event_entry(dwc, &event);
/*
* FIXME we wrap around correctly to the next entry as
* almost all entries are 4 bytes in size. There is one
* entry which has 12 bytes which is a regular entry
* followed by 8 bytes data. ATM I don't know how
* things are organized if we get next to the a
* boundary so I worry about that once we try to handle
* that.
*/
evt->lpos = (evt->lpos + 4) % evt->length;
left -= 4;
}
evt->count = 0;
ret = IRQ_HANDLED;
/* Unmask interrupt */
dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
DWC3_GEVNTSIZ_SIZE(evt->length));
if (dwc->imod_interval) {
dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
}
/* Keep the clearing of DWC3_EVENT_PENDING at the end */
evt->flags &= ~DWC3_EVENT_PENDING;
return ret;
}
static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
{
struct dwc3_event_buffer *evt = _evt;
struct dwc3 *dwc = evt->dwc;
unsigned long flags;
irqreturn_t ret = IRQ_NONE;
local_bh_disable();
spin_lock_irqsave(&dwc->lock, flags);
ret = dwc3_process_event_buf(evt);
spin_unlock_irqrestore(&dwc->lock, flags);
local_bh_enable();
return ret;
}
static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
{
struct dwc3 *dwc = evt->dwc;
u32 amount;
u32 count;
if (pm_runtime_suspended(dwc->dev)) {
dwc->pending_events = true;
/*
* Trigger runtime resume. The get() function will be balanced
* after processing the pending events in dwc3_process_pending
* events().
*/
pm_runtime_get(dwc->dev);
disable_irq_nosync(dwc->irq_gadget);
return IRQ_HANDLED;
}
/*
* With PCIe legacy interrupt, test shows that top-half irq handler can
* be called again after HW interrupt deassertion. Check if bottom-half
* irq event handler completes before caching new event to prevent
* losing events.
*/
if (evt->flags & DWC3_EVENT_PENDING)
return IRQ_HANDLED;
count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
count &= DWC3_GEVNTCOUNT_MASK;
if (!count)
return IRQ_NONE;
evt->count = count;
evt->flags |= DWC3_EVENT_PENDING;
/* Mask interrupt */
dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
DWC3_GEVNTSIZ_INTMASK | DWC3_GEVNTSIZ_SIZE(evt->length));
amount = min(count, evt->length - evt->lpos);
memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount);
if (amount < count)
memcpy(evt->cache, evt->buf, count - amount);
dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
return IRQ_WAKE_THREAD;
}
static irqreturn_t dwc3_interrupt(int irq, void *_evt)
{
struct dwc3_event_buffer *evt = _evt;
return dwc3_check_event_buf(evt);
}
static int dwc3_gadget_get_irq(struct dwc3 *dwc)
{
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
int irq;
irq = platform_get_irq_byname_optional(dwc3_pdev, "peripheral");
if (irq > 0)
goto out;
if (irq == -EPROBE_DEFER)
goto out;
irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
if (irq > 0)
goto out;
if (irq == -EPROBE_DEFER)
goto out;
irq = platform_get_irq(dwc3_pdev, 0);
out:
return irq;
}
static void dwc_gadget_release(struct device *dev)
{
struct usb_gadget *gadget = container_of(dev, struct usb_gadget, dev);
kfree(gadget);
}
/**
* dwc3_gadget_init - initializes gadget related registers
* @dwc: pointer to our controller context structure
*
* Returns 0 on success otherwise negative errno.
*/
int dwc3_gadget_init(struct dwc3 *dwc)
{
int ret;
int irq;
struct device *dev;
irq = dwc3_gadget_get_irq(dwc);
if (irq < 0) {
ret = irq;
goto err0;
}
dwc->irq_gadget = irq;
dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev,
sizeof(*dwc->ep0_trb) * 2,
&dwc->ep0_trb_addr, GFP_KERNEL);
if (!dwc->ep0_trb) {
dev_err(dwc->dev, "failed to allocate ep0 trb\n");
ret = -ENOMEM;
goto err0;
}
dwc->setup_buf = kzalloc(DWC3_EP0_SETUP_SIZE, GFP_KERNEL);
if (!dwc->setup_buf) {
ret = -ENOMEM;
goto err1;
}
dwc->bounce = dma_alloc_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE,
&dwc->bounce_addr, GFP_KERNEL);
if (!dwc->bounce) {
ret = -ENOMEM;
goto err2;
}
init_completion(&dwc->ep0_in_setup);
dwc->gadget = kzalloc(sizeof(struct usb_gadget), GFP_KERNEL);
if (!dwc->gadget) {
ret = -ENOMEM;
goto err3;
}
usb_initialize_gadget(dwc->dev, dwc->gadget, dwc_gadget_release);
dev = &dwc->gadget->dev;
dev->platform_data = dwc;
dwc->gadget->ops = &dwc3_gadget_ops;
dwc->gadget->speed = USB_SPEED_UNKNOWN;
dwc->gadget->ssp_rate = USB_SSP_GEN_UNKNOWN;
dwc->gadget->sg_supported = true;
dwc->gadget->name = "dwc3-gadget";
dwc->gadget->lpm_capable = !dwc->usb2_gadget_lpm_disable;
dwc->gadget->wakeup_capable = true;
/*
* FIXME We might be setting max_speed to <SUPER, however versions
* <2.20a of dwc3 have an issue with metastability (documented
* elsewhere in this driver) which tells us we can't set max speed to
* anything lower than SUPER.
*
* Because gadget.max_speed is only used by composite.c and function
* drivers (i.e. it won't go into dwc3's registers) we are allowing this
* to happen so we avoid sending SuperSpeed Capability descriptor
* together with our BOS descriptor as that could confuse host into
* thinking we can handle super speed.
*
* Note that, in fact, we won't even support GetBOS requests when speed
* is less than super speed because we don't have means, yet, to tell
* composite.c that we are USB 2.0 + LPM ECN.
*/
if (DWC3_VER_IS_PRIOR(DWC3, 220A) &&
!dwc->dis_metastability_quirk)
dev_info(dwc->dev, "changing max_speed on rev %08x\n",
dwc->revision);
dwc->gadget->max_speed = dwc->maximum_speed;
dwc->gadget->max_ssp_rate = dwc->max_ssp_rate;
/*
* REVISIT: Here we should clear all pending IRQs to be
* sure we're starting from a well known location.
*/
ret = dwc3_gadget_init_endpoints(dwc, dwc->num_eps);
if (ret)
goto err4;
ret = usb_add_gadget(dwc->gadget);
if (ret) {
dev_err(dwc->dev, "failed to add gadget\n");
goto err5;
}
if (DWC3_IP_IS(DWC32) && dwc->maximum_speed == USB_SPEED_SUPER_PLUS)
dwc3_gadget_set_ssp_rate(dwc->gadget, dwc->max_ssp_rate);
else
dwc3_gadget_set_speed(dwc->gadget, dwc->maximum_speed);
return 0;
err5:
dwc3_gadget_free_endpoints(dwc);
err4:
usb_put_gadget(dwc->gadget);
dwc->gadget = NULL;
err3:
dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
dwc->bounce_addr);
err2:
kfree(dwc->setup_buf);
err1:
dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
dwc->ep0_trb, dwc->ep0_trb_addr);
err0:
return ret;
}
/* -------------------------------------------------------------------------- */
void dwc3_gadget_exit(struct dwc3 *dwc)
{
if (!dwc->gadget)
return;
usb_del_gadget(dwc->gadget);
dwc3_gadget_free_endpoints(dwc);
usb_put_gadget(dwc->gadget);
dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
dwc->bounce_addr);
kfree(dwc->setup_buf);
dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
dwc->ep0_trb, dwc->ep0_trb_addr);
}
int dwc3_gadget_suspend(struct dwc3 *dwc)
{
unsigned long flags;
int ret;
if (!dwc->gadget_driver)
return 0;
ret = dwc3_gadget_soft_disconnect(dwc);
if (ret)
goto err;
spin_lock_irqsave(&dwc->lock, flags);
dwc3_disconnect_gadget(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
err:
/*
* Attempt to reset the controller's state. Likely no
* communication can be established until the host
* performs a port reset.
*/
if (dwc->softconnect)
dwc3_gadget_soft_connect(dwc);
return ret;
}
int dwc3_gadget_resume(struct dwc3 *dwc)
{
if (!dwc->gadget_driver || !dwc->softconnect)
return 0;
return dwc3_gadget_soft_connect(dwc);
}
void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
{
if (dwc->pending_events) {
dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
dwc3_thread_interrupt(dwc->irq_gadget, dwc->ev_buf);
pm_runtime_put(dwc->dev);
dwc->pending_events = false;
enable_irq(dwc->irq_gadget);
}
}
| linux-master | drivers/usb/dwc3/gadget.c |
// SPDX-License-Identifier: GPL-2.0
/*
* trace.c - DesignWare USB3 DRD Controller Trace Support
*
* Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com
*
* Author: Felipe Balbi <[email protected]>
*/
#define CREATE_TRACE_POINTS
#include "trace.h"
| linux-master | drivers/usb/dwc3/trace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* dwc3-exynos.c - Samsung Exynos DWC3 Specific Glue layer
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Author: Anton Tikhomirov <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/regulator/consumer.h>
#define DWC3_EXYNOS_MAX_CLOCKS 4
struct dwc3_exynos_driverdata {
const char *clk_names[DWC3_EXYNOS_MAX_CLOCKS];
int num_clks;
int suspend_clk_idx;
};
struct dwc3_exynos {
struct device *dev;
const char **clk_names;
struct clk *clks[DWC3_EXYNOS_MAX_CLOCKS];
int num_clks;
int suspend_clk_idx;
struct regulator *vdd33;
struct regulator *vdd10;
};
static int dwc3_exynos_probe(struct platform_device *pdev)
{
struct dwc3_exynos *exynos;
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
const struct dwc3_exynos_driverdata *driver_data;
int i, ret;
exynos = devm_kzalloc(dev, sizeof(*exynos), GFP_KERNEL);
if (!exynos)
return -ENOMEM;
driver_data = of_device_get_match_data(dev);
exynos->dev = dev;
exynos->num_clks = driver_data->num_clks;
exynos->clk_names = (const char **)driver_data->clk_names;
exynos->suspend_clk_idx = driver_data->suspend_clk_idx;
platform_set_drvdata(pdev, exynos);
for (i = 0; i < exynos->num_clks; i++) {
exynos->clks[i] = devm_clk_get(dev, exynos->clk_names[i]);
if (IS_ERR(exynos->clks[i])) {
dev_err(dev, "failed to get clock: %s\n",
exynos->clk_names[i]);
return PTR_ERR(exynos->clks[i]);
}
}
for (i = 0; i < exynos->num_clks; i++) {
ret = clk_prepare_enable(exynos->clks[i]);
if (ret) {
while (i-- > 0)
clk_disable_unprepare(exynos->clks[i]);
return ret;
}
}
if (exynos->suspend_clk_idx >= 0)
clk_prepare_enable(exynos->clks[exynos->suspend_clk_idx]);
exynos->vdd33 = devm_regulator_get(dev, "vdd33");
if (IS_ERR(exynos->vdd33)) {
ret = PTR_ERR(exynos->vdd33);
goto vdd33_err;
}
ret = regulator_enable(exynos->vdd33);
if (ret) {
dev_err(dev, "Failed to enable VDD33 supply\n");
goto vdd33_err;
}
exynos->vdd10 = devm_regulator_get(dev, "vdd10");
if (IS_ERR(exynos->vdd10)) {
ret = PTR_ERR(exynos->vdd10);
goto vdd10_err;
}
ret = regulator_enable(exynos->vdd10);
if (ret) {
dev_err(dev, "Failed to enable VDD10 supply\n");
goto vdd10_err;
}
if (node) {
ret = of_platform_populate(node, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to add dwc3 core\n");
goto populate_err;
}
} else {
dev_err(dev, "no device node, failed to add dwc3 core\n");
ret = -ENODEV;
goto populate_err;
}
return 0;
populate_err:
regulator_disable(exynos->vdd10);
vdd10_err:
regulator_disable(exynos->vdd33);
vdd33_err:
for (i = exynos->num_clks - 1; i >= 0; i--)
clk_disable_unprepare(exynos->clks[i]);
if (exynos->suspend_clk_idx >= 0)
clk_disable_unprepare(exynos->clks[exynos->suspend_clk_idx]);
return ret;
}
static void dwc3_exynos_remove(struct platform_device *pdev)
{
struct dwc3_exynos *exynos = platform_get_drvdata(pdev);
int i;
of_platform_depopulate(&pdev->dev);
for (i = exynos->num_clks - 1; i >= 0; i--)
clk_disable_unprepare(exynos->clks[i]);
if (exynos->suspend_clk_idx >= 0)
clk_disable_unprepare(exynos->clks[exynos->suspend_clk_idx]);
regulator_disable(exynos->vdd33);
regulator_disable(exynos->vdd10);
}
static const struct dwc3_exynos_driverdata exynos5250_drvdata = {
.clk_names = { "usbdrd30" },
.num_clks = 1,
.suspend_clk_idx = -1,
};
static const struct dwc3_exynos_driverdata exynos5433_drvdata = {
.clk_names = { "aclk", "susp_clk", "pipe_pclk", "phyclk" },
.num_clks = 4,
.suspend_clk_idx = 1,
};
static const struct dwc3_exynos_driverdata exynos7_drvdata = {
.clk_names = { "usbdrd30", "usbdrd30_susp_clk", "usbdrd30_axius_clk" },
.num_clks = 3,
.suspend_clk_idx = 1,
};
static const struct dwc3_exynos_driverdata exynos850_drvdata = {
.clk_names = { "bus_early", "ref" },
.num_clks = 2,
.suspend_clk_idx = -1,
};
static const struct of_device_id exynos_dwc3_match[] = {
{
.compatible = "samsung,exynos5250-dwusb3",
.data = &exynos5250_drvdata,
}, {
.compatible = "samsung,exynos5433-dwusb3",
.data = &exynos5433_drvdata,
}, {
.compatible = "samsung,exynos7-dwusb3",
.data = &exynos7_drvdata,
}, {
.compatible = "samsung,exynos850-dwusb3",
.data = &exynos850_drvdata,
}, {
}
};
MODULE_DEVICE_TABLE(of, exynos_dwc3_match);
#ifdef CONFIG_PM_SLEEP
static int dwc3_exynos_suspend(struct device *dev)
{
struct dwc3_exynos *exynos = dev_get_drvdata(dev);
int i;
for (i = exynos->num_clks - 1; i >= 0; i--)
clk_disable_unprepare(exynos->clks[i]);
regulator_disable(exynos->vdd33);
regulator_disable(exynos->vdd10);
return 0;
}
static int dwc3_exynos_resume(struct device *dev)
{
struct dwc3_exynos *exynos = dev_get_drvdata(dev);
int i, ret;
ret = regulator_enable(exynos->vdd33);
if (ret) {
dev_err(dev, "Failed to enable VDD33 supply\n");
return ret;
}
ret = regulator_enable(exynos->vdd10);
if (ret) {
dev_err(dev, "Failed to enable VDD10 supply\n");
return ret;
}
for (i = 0; i < exynos->num_clks; i++) {
ret = clk_prepare_enable(exynos->clks[i]);
if (ret) {
while (i-- > 0)
clk_disable_unprepare(exynos->clks[i]);
return ret;
}
}
return 0;
}
static const struct dev_pm_ops dwc3_exynos_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dwc3_exynos_suspend, dwc3_exynos_resume)
};
#define DEV_PM_OPS (&dwc3_exynos_dev_pm_ops)
#else
#define DEV_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
static struct platform_driver dwc3_exynos_driver = {
.probe = dwc3_exynos_probe,
.remove_new = dwc3_exynos_remove,
.driver = {
.name = "exynos-dwc3",
.of_match_table = exynos_dwc3_match,
.pm = DEV_PM_OPS,
},
};
module_platform_driver(dwc3_exynos_driver);
MODULE_AUTHOR("Anton Tikhomirov <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DesignWare USB3 Exynos Glue Layer");
| linux-master | drivers/usb/dwc3/dwc3-exynos.c |
// SPDX-License-Identifier: GPL-2.0
/*
* dwc3-of-simple.c - OF glue layer for simple integrations
*
* Copyright (c) 2015 Texas Instruments Incorporated - https://www.ti.com
*
* Author: Felipe Balbi <[email protected]>
*
* This is a combination of the old dwc3-qcom.c by Ivan T. Ivanov
* <[email protected]> and the original patch adding support for Xilinx' SoC
* by Subbaraya Sundeep Bhatta <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
struct dwc3_of_simple {
struct device *dev;
struct clk_bulk_data *clks;
int num_clocks;
struct reset_control *resets;
bool need_reset;
};
static int dwc3_of_simple_probe(struct platform_device *pdev)
{
struct dwc3_of_simple *simple;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
int ret;
simple = devm_kzalloc(dev, sizeof(*simple), GFP_KERNEL);
if (!simple)
return -ENOMEM;
platform_set_drvdata(pdev, simple);
simple->dev = dev;
/*
* Some controllers need to toggle the usb3-otg reset before trying to
* initialize the PHY, otherwise the PHY times out.
*/
if (of_device_is_compatible(np, "rockchip,rk3399-dwc3"))
simple->need_reset = true;
simple->resets = of_reset_control_array_get(np, false, true,
true);
if (IS_ERR(simple->resets)) {
ret = PTR_ERR(simple->resets);
dev_err(dev, "failed to get device resets, err=%d\n", ret);
return ret;
}
ret = reset_control_deassert(simple->resets);
if (ret)
goto err_resetc_put;
ret = clk_bulk_get_all(simple->dev, &simple->clks);
if (ret < 0)
goto err_resetc_assert;
simple->num_clocks = ret;
ret = clk_bulk_prepare_enable(simple->num_clocks, simple->clks);
if (ret)
goto err_resetc_assert;
ret = of_platform_populate(np, NULL, NULL, dev);
if (ret)
goto err_clk_put;
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
return 0;
err_clk_put:
clk_bulk_disable_unprepare(simple->num_clocks, simple->clks);
clk_bulk_put_all(simple->num_clocks, simple->clks);
err_resetc_assert:
reset_control_assert(simple->resets);
err_resetc_put:
reset_control_put(simple->resets);
return ret;
}
static void __dwc3_of_simple_teardown(struct dwc3_of_simple *simple)
{
of_platform_depopulate(simple->dev);
clk_bulk_disable_unprepare(simple->num_clocks, simple->clks);
clk_bulk_put_all(simple->num_clocks, simple->clks);
simple->num_clocks = 0;
reset_control_assert(simple->resets);
reset_control_put(simple->resets);
pm_runtime_disable(simple->dev);
pm_runtime_put_noidle(simple->dev);
pm_runtime_set_suspended(simple->dev);
}
static void dwc3_of_simple_remove(struct platform_device *pdev)
{
struct dwc3_of_simple *simple = platform_get_drvdata(pdev);
__dwc3_of_simple_teardown(simple);
}
static void dwc3_of_simple_shutdown(struct platform_device *pdev)
{
struct dwc3_of_simple *simple = platform_get_drvdata(pdev);
__dwc3_of_simple_teardown(simple);
}
static int __maybe_unused dwc3_of_simple_runtime_suspend(struct device *dev)
{
struct dwc3_of_simple *simple = dev_get_drvdata(dev);
clk_bulk_disable(simple->num_clocks, simple->clks);
return 0;
}
static int __maybe_unused dwc3_of_simple_runtime_resume(struct device *dev)
{
struct dwc3_of_simple *simple = dev_get_drvdata(dev);
return clk_bulk_enable(simple->num_clocks, simple->clks);
}
static int __maybe_unused dwc3_of_simple_suspend(struct device *dev)
{
struct dwc3_of_simple *simple = dev_get_drvdata(dev);
if (simple->need_reset)
reset_control_assert(simple->resets);
return 0;
}
static int __maybe_unused dwc3_of_simple_resume(struct device *dev)
{
struct dwc3_of_simple *simple = dev_get_drvdata(dev);
if (simple->need_reset)
reset_control_deassert(simple->resets);
return 0;
}
static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dwc3_of_simple_suspend, dwc3_of_simple_resume)
SET_RUNTIME_PM_OPS(dwc3_of_simple_runtime_suspend,
dwc3_of_simple_runtime_resume, NULL)
};
static const struct of_device_id of_dwc3_simple_match[] = {
{ .compatible = "rockchip,rk3399-dwc3" },
{ .compatible = "sprd,sc9860-dwc3" },
{ .compatible = "allwinner,sun50i-h6-dwc3" },
{ .compatible = "hisilicon,hi3670-dwc3" },
{ .compatible = "intel,keembay-dwc3" },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_dwc3_simple_match);
static struct platform_driver dwc3_of_simple_driver = {
.probe = dwc3_of_simple_probe,
.remove_new = dwc3_of_simple_remove,
.shutdown = dwc3_of_simple_shutdown,
.driver = {
.name = "dwc3-of-simple",
.of_match_table = of_dwc3_simple_match,
.pm = &dwc3_of_simple_dev_pm_ops,
},
};
module_platform_driver(dwc3_of_simple_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DesignWare USB3 OF Simple Glue Layer");
MODULE_AUTHOR("Felipe Balbi <[email protected]>");
| linux-master | drivers/usb/dwc3/dwc3-of-simple.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ulpi.c - DesignWare USB3 Controller's ULPI PHY interface
*
* Copyright (C) 2015 Intel Corporation
*
* Author: Heikki Krogerus <[email protected]>
*/
#include <linux/delay.h>
#include <linux/time64.h>
#include <linux/ulpi/regs.h>
#include "core.h"
#include "io.h"
#define DWC3_ULPI_ADDR(a) \
((a >= ULPI_EXT_VENDOR_SPECIFIC) ? \
DWC3_GUSB2PHYACC_ADDR(ULPI_ACCESS_EXTENDED) | \
DWC3_GUSB2PHYACC_EXTEND_ADDR(a) : DWC3_GUSB2PHYACC_ADDR(a))
#define DWC3_ULPI_BASE_DELAY DIV_ROUND_UP(NSEC_PER_SEC, 60000000L)
static int dwc3_ulpi_busyloop(struct dwc3 *dwc, u8 addr, bool read)
{
unsigned long ns = 5L * DWC3_ULPI_BASE_DELAY;
unsigned int count = 10000;
u32 reg;
if (addr >= ULPI_EXT_VENDOR_SPECIFIC)
ns += DWC3_ULPI_BASE_DELAY;
if (read)
ns += DWC3_ULPI_BASE_DELAY;
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
if (reg & DWC3_GUSB2PHYCFG_SUSPHY)
usleep_range(1000, 1200);
while (count--) {
ndelay(ns);
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0));
if (reg & DWC3_GUSB2PHYACC_DONE)
return 0;
cpu_relax();
}
return -ETIMEDOUT;
}
static int dwc3_ulpi_read(struct device *dev, u8 addr)
{
struct dwc3 *dwc = dev_get_drvdata(dev);
u32 reg;
int ret;
reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
ret = dwc3_ulpi_busyloop(dwc, addr, true);
if (ret)
return ret;
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0));
return DWC3_GUSB2PHYACC_DATA(reg);
}
static int dwc3_ulpi_write(struct device *dev, u8 addr, u8 val)
{
struct dwc3 *dwc = dev_get_drvdata(dev);
u32 reg;
reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
reg |= DWC3_GUSB2PHYACC_WRITE | val;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
return dwc3_ulpi_busyloop(dwc, addr, false);
}
static const struct ulpi_ops dwc3_ulpi_ops = {
.read = dwc3_ulpi_read,
.write = dwc3_ulpi_write,
};
int dwc3_ulpi_init(struct dwc3 *dwc)
{
/* Register the interface */
dwc->ulpi = ulpi_register_interface(dwc->dev, &dwc3_ulpi_ops);
if (IS_ERR(dwc->ulpi)) {
dev_err(dwc->dev, "failed to register ULPI interface");
return PTR_ERR(dwc->ulpi);
}
return 0;
}
void dwc3_ulpi_exit(struct dwc3 *dwc)
{
if (dwc->ulpi) {
ulpi_unregister_interface(dwc->ulpi);
dwc->ulpi = NULL;
}
}
| linux-master | drivers/usb/dwc3/ulpi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* debugfs.c - DesignWare USB3 DRD Controller DebugFS file
*
* Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
*
* Authors: Felipe Balbi <[email protected]>,
* Sebastian Andrzej Siewior <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/ptrace.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/usb/ch9.h>
#include "core.h"
#include "gadget.h"
#include "io.h"
#include "debug.h"
#define DWC3_LSP_MUX_UNSELECTED 0xfffff
#define dump_register(nm) \
{ \
.name = __stringify(nm), \
.offset = DWC3_ ##nm, \
}
#define dump_ep_register_set(n) \
{ \
.name = "DEPCMDPAR2("__stringify(n)")", \
.offset = DWC3_DEP_BASE(n) + \
DWC3_DEPCMDPAR2, \
}, \
{ \
.name = "DEPCMDPAR1("__stringify(n)")", \
.offset = DWC3_DEP_BASE(n) + \
DWC3_DEPCMDPAR1, \
}, \
{ \
.name = "DEPCMDPAR0("__stringify(n)")", \
.offset = DWC3_DEP_BASE(n) + \
DWC3_DEPCMDPAR0, \
}, \
{ \
.name = "DEPCMD("__stringify(n)")", \
.offset = DWC3_DEP_BASE(n) + \
DWC3_DEPCMD, \
}
static const struct debugfs_reg32 dwc3_regs[] = {
dump_register(GSBUSCFG0),
dump_register(GSBUSCFG1),
dump_register(GTXTHRCFG),
dump_register(GRXTHRCFG),
dump_register(GCTL),
dump_register(GEVTEN),
dump_register(GSTS),
dump_register(GUCTL1),
dump_register(GSNPSID),
dump_register(GGPIO),
dump_register(GUID),
dump_register(GUCTL),
dump_register(GBUSERRADDR0),
dump_register(GBUSERRADDR1),
dump_register(GPRTBIMAP0),
dump_register(GPRTBIMAP1),
dump_register(GHWPARAMS0),
dump_register(GHWPARAMS1),
dump_register(GHWPARAMS2),
dump_register(GHWPARAMS3),
dump_register(GHWPARAMS4),
dump_register(GHWPARAMS5),
dump_register(GHWPARAMS6),
dump_register(GHWPARAMS7),
dump_register(GDBGFIFOSPACE),
dump_register(GDBGLTSSM),
dump_register(GDBGBMU),
dump_register(GPRTBIMAP_HS0),
dump_register(GPRTBIMAP_HS1),
dump_register(GPRTBIMAP_FS0),
dump_register(GPRTBIMAP_FS1),
dump_register(GUCTL2),
dump_register(VER_NUMBER),
dump_register(VER_TYPE),
dump_register(GUSB2PHYCFG(0)),
dump_register(GUSB2PHYCFG(1)),
dump_register(GUSB2PHYCFG(2)),
dump_register(GUSB2PHYCFG(3)),
dump_register(GUSB2PHYCFG(4)),
dump_register(GUSB2PHYCFG(5)),
dump_register(GUSB2PHYCFG(6)),
dump_register(GUSB2PHYCFG(7)),
dump_register(GUSB2PHYCFG(8)),
dump_register(GUSB2PHYCFG(9)),
dump_register(GUSB2PHYCFG(10)),
dump_register(GUSB2PHYCFG(11)),
dump_register(GUSB2PHYCFG(12)),
dump_register(GUSB2PHYCFG(13)),
dump_register(GUSB2PHYCFG(14)),
dump_register(GUSB2PHYCFG(15)),
dump_register(GUSB2I2CCTL(0)),
dump_register(GUSB2I2CCTL(1)),
dump_register(GUSB2I2CCTL(2)),
dump_register(GUSB2I2CCTL(3)),
dump_register(GUSB2I2CCTL(4)),
dump_register(GUSB2I2CCTL(5)),
dump_register(GUSB2I2CCTL(6)),
dump_register(GUSB2I2CCTL(7)),
dump_register(GUSB2I2CCTL(8)),
dump_register(GUSB2I2CCTL(9)),
dump_register(GUSB2I2CCTL(10)),
dump_register(GUSB2I2CCTL(11)),
dump_register(GUSB2I2CCTL(12)),
dump_register(GUSB2I2CCTL(13)),
dump_register(GUSB2I2CCTL(14)),
dump_register(GUSB2I2CCTL(15)),
dump_register(GUSB2PHYACC(0)),
dump_register(GUSB2PHYACC(1)),
dump_register(GUSB2PHYACC(2)),
dump_register(GUSB2PHYACC(3)),
dump_register(GUSB2PHYACC(4)),
dump_register(GUSB2PHYACC(5)),
dump_register(GUSB2PHYACC(6)),
dump_register(GUSB2PHYACC(7)),
dump_register(GUSB2PHYACC(8)),
dump_register(GUSB2PHYACC(9)),
dump_register(GUSB2PHYACC(10)),
dump_register(GUSB2PHYACC(11)),
dump_register(GUSB2PHYACC(12)),
dump_register(GUSB2PHYACC(13)),
dump_register(GUSB2PHYACC(14)),
dump_register(GUSB2PHYACC(15)),
dump_register(GUSB3PIPECTL(0)),
dump_register(GUSB3PIPECTL(1)),
dump_register(GUSB3PIPECTL(2)),
dump_register(GUSB3PIPECTL(3)),
dump_register(GUSB3PIPECTL(4)),
dump_register(GUSB3PIPECTL(5)),
dump_register(GUSB3PIPECTL(6)),
dump_register(GUSB3PIPECTL(7)),
dump_register(GUSB3PIPECTL(8)),
dump_register(GUSB3PIPECTL(9)),
dump_register(GUSB3PIPECTL(10)),
dump_register(GUSB3PIPECTL(11)),
dump_register(GUSB3PIPECTL(12)),
dump_register(GUSB3PIPECTL(13)),
dump_register(GUSB3PIPECTL(14)),
dump_register(GUSB3PIPECTL(15)),
dump_register(GTXFIFOSIZ(0)),
dump_register(GTXFIFOSIZ(1)),
dump_register(GTXFIFOSIZ(2)),
dump_register(GTXFIFOSIZ(3)),
dump_register(GTXFIFOSIZ(4)),
dump_register(GTXFIFOSIZ(5)),
dump_register(GTXFIFOSIZ(6)),
dump_register(GTXFIFOSIZ(7)),
dump_register(GTXFIFOSIZ(8)),
dump_register(GTXFIFOSIZ(9)),
dump_register(GTXFIFOSIZ(10)),
dump_register(GTXFIFOSIZ(11)),
dump_register(GTXFIFOSIZ(12)),
dump_register(GTXFIFOSIZ(13)),
dump_register(GTXFIFOSIZ(14)),
dump_register(GTXFIFOSIZ(15)),
dump_register(GTXFIFOSIZ(16)),
dump_register(GTXFIFOSIZ(17)),
dump_register(GTXFIFOSIZ(18)),
dump_register(GTXFIFOSIZ(19)),
dump_register(GTXFIFOSIZ(20)),
dump_register(GTXFIFOSIZ(21)),
dump_register(GTXFIFOSIZ(22)),
dump_register(GTXFIFOSIZ(23)),
dump_register(GTXFIFOSIZ(24)),
dump_register(GTXFIFOSIZ(25)),
dump_register(GTXFIFOSIZ(26)),
dump_register(GTXFIFOSIZ(27)),
dump_register(GTXFIFOSIZ(28)),
dump_register(GTXFIFOSIZ(29)),
dump_register(GTXFIFOSIZ(30)),
dump_register(GTXFIFOSIZ(31)),
dump_register(GRXFIFOSIZ(0)),
dump_register(GRXFIFOSIZ(1)),
dump_register(GRXFIFOSIZ(2)),
dump_register(GRXFIFOSIZ(3)),
dump_register(GRXFIFOSIZ(4)),
dump_register(GRXFIFOSIZ(5)),
dump_register(GRXFIFOSIZ(6)),
dump_register(GRXFIFOSIZ(7)),
dump_register(GRXFIFOSIZ(8)),
dump_register(GRXFIFOSIZ(9)),
dump_register(GRXFIFOSIZ(10)),
dump_register(GRXFIFOSIZ(11)),
dump_register(GRXFIFOSIZ(12)),
dump_register(GRXFIFOSIZ(13)),
dump_register(GRXFIFOSIZ(14)),
dump_register(GRXFIFOSIZ(15)),
dump_register(GRXFIFOSIZ(16)),
dump_register(GRXFIFOSIZ(17)),
dump_register(GRXFIFOSIZ(18)),
dump_register(GRXFIFOSIZ(19)),
dump_register(GRXFIFOSIZ(20)),
dump_register(GRXFIFOSIZ(21)),
dump_register(GRXFIFOSIZ(22)),
dump_register(GRXFIFOSIZ(23)),
dump_register(GRXFIFOSIZ(24)),
dump_register(GRXFIFOSIZ(25)),
dump_register(GRXFIFOSIZ(26)),
dump_register(GRXFIFOSIZ(27)),
dump_register(GRXFIFOSIZ(28)),
dump_register(GRXFIFOSIZ(29)),
dump_register(GRXFIFOSIZ(30)),
dump_register(GRXFIFOSIZ(31)),
dump_register(GEVNTADRLO(0)),
dump_register(GEVNTADRHI(0)),
dump_register(GEVNTSIZ(0)),
dump_register(GEVNTCOUNT(0)),
dump_register(GHWPARAMS8),
dump_register(GUCTL3),
dump_register(GFLADJ),
dump_register(DCFG),
dump_register(DCTL),
dump_register(DEVTEN),
dump_register(DSTS),
dump_register(DGCMDPAR),
dump_register(DGCMD),
dump_register(DALEPENA),
dump_ep_register_set(0),
dump_ep_register_set(1),
dump_ep_register_set(2),
dump_ep_register_set(3),
dump_ep_register_set(4),
dump_ep_register_set(5),
dump_ep_register_set(6),
dump_ep_register_set(7),
dump_ep_register_set(8),
dump_ep_register_set(9),
dump_ep_register_set(10),
dump_ep_register_set(11),
dump_ep_register_set(12),
dump_ep_register_set(13),
dump_ep_register_set(14),
dump_ep_register_set(15),
dump_ep_register_set(16),
dump_ep_register_set(17),
dump_ep_register_set(18),
dump_ep_register_set(19),
dump_ep_register_set(20),
dump_ep_register_set(21),
dump_ep_register_set(22),
dump_ep_register_set(23),
dump_ep_register_set(24),
dump_ep_register_set(25),
dump_ep_register_set(26),
dump_ep_register_set(27),
dump_ep_register_set(28),
dump_ep_register_set(29),
dump_ep_register_set(30),
dump_ep_register_set(31),
dump_register(OCFG),
dump_register(OCTL),
dump_register(OEVT),
dump_register(OEVTEN),
dump_register(OSTS),
};
static void dwc3_host_lsp(struct seq_file *s)
{
struct dwc3 *dwc = s->private;
bool dbc_enabled;
u32 sel;
u32 reg;
u32 val;
dbc_enabled = !!(dwc->hwparams.hwparams1 & DWC3_GHWPARAMS1_ENDBC);
sel = dwc->dbg_lsp_select;
if (sel == DWC3_LSP_MUX_UNSELECTED) {
seq_puts(s, "Write LSP selection to print for host\n");
return;
}
reg = DWC3_GDBGLSPMUX_HOSTSELECT(sel);
dwc3_writel(dwc->regs, DWC3_GDBGLSPMUX, reg);
val = dwc3_readl(dwc->regs, DWC3_GDBGLSP);
seq_printf(s, "GDBGLSP[%d] = 0x%08x\n", sel, val);
if (dbc_enabled && sel < 256) {
reg |= DWC3_GDBGLSPMUX_ENDBC;
dwc3_writel(dwc->regs, DWC3_GDBGLSPMUX, reg);
val = dwc3_readl(dwc->regs, DWC3_GDBGLSP);
seq_printf(s, "GDBGLSP_DBC[%d] = 0x%08x\n", sel, val);
}
}
static void dwc3_gadget_lsp(struct seq_file *s)
{
struct dwc3 *dwc = s->private;
int i;
u32 reg;
for (i = 0; i < 16; i++) {
reg = DWC3_GDBGLSPMUX_DEVSELECT(i);
dwc3_writel(dwc->regs, DWC3_GDBGLSPMUX, reg);
reg = dwc3_readl(dwc->regs, DWC3_GDBGLSP);
seq_printf(s, "GDBGLSP[%d] = 0x%08x\n", i, reg);
}
}
static int dwc3_lsp_show(struct seq_file *s, void *unused)
{
struct dwc3 *dwc = s->private;
unsigned int current_mode;
unsigned long flags;
u32 reg;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_GSTS);
current_mode = DWC3_GSTS_CURMOD(reg);
switch (current_mode) {
case DWC3_GSTS_CURMOD_HOST:
dwc3_host_lsp(s);
break;
case DWC3_GSTS_CURMOD_DEVICE:
dwc3_gadget_lsp(s);
break;
default:
seq_puts(s, "Mode is unknown, no LSP register printed\n");
break;
}
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
static int dwc3_lsp_open(struct inode *inode, struct file *file)
{
return single_open(file, dwc3_lsp_show, inode->i_private);
}
static ssize_t dwc3_lsp_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct dwc3 *dwc = s->private;
unsigned long flags;
char buf[32] = { 0 };
u32 sel;
int ret;
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
ret = kstrtouint(buf, 0, &sel);
if (ret)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
dwc->dbg_lsp_select = sel;
spin_unlock_irqrestore(&dwc->lock, flags);
return count;
}
static const struct file_operations dwc3_lsp_fops = {
.open = dwc3_lsp_open,
.write = dwc3_lsp_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int dwc3_mode_show(struct seq_file *s, void *unused)
{
struct dwc3 *dwc = s->private;
unsigned long flags;
u32 reg;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
spin_unlock_irqrestore(&dwc->lock, flags);
switch (DWC3_GCTL_PRTCAP(reg)) {
case DWC3_GCTL_PRTCAP_HOST:
seq_puts(s, "host\n");
break;
case DWC3_GCTL_PRTCAP_DEVICE:
seq_puts(s, "device\n");
break;
case DWC3_GCTL_PRTCAP_OTG:
seq_puts(s, "otg\n");
break;
default:
seq_printf(s, "UNKNOWN %08x\n", DWC3_GCTL_PRTCAP(reg));
}
pm_runtime_put_sync(dwc->dev);
return 0;
}
static int dwc3_mode_open(struct inode *inode, struct file *file)
{
return single_open(file, dwc3_mode_show, inode->i_private);
}
static ssize_t dwc3_mode_write(struct file *file,
const char __user *ubuf, size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct dwc3 *dwc = s->private;
u32 mode = 0;
char buf[32];
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
if (dwc->dr_mode != USB_DR_MODE_OTG)
return count;
if (!strncmp(buf, "host", 4))
mode = DWC3_GCTL_PRTCAP_HOST;
if (!strncmp(buf, "device", 6))
mode = DWC3_GCTL_PRTCAP_DEVICE;
if (!strncmp(buf, "otg", 3))
mode = DWC3_GCTL_PRTCAP_OTG;
dwc3_set_mode(dwc, mode);
return count;
}
static const struct file_operations dwc3_mode_fops = {
.open = dwc3_mode_open,
.write = dwc3_mode_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int dwc3_testmode_show(struct seq_file *s, void *unused)
{
struct dwc3 *dwc = s->private;
unsigned long flags;
u32 reg;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= DWC3_DCTL_TSTCTRL_MASK;
reg >>= 1;
spin_unlock_irqrestore(&dwc->lock, flags);
switch (reg) {
case 0:
seq_puts(s, "no test\n");
break;
case USB_TEST_J:
seq_puts(s, "test_j\n");
break;
case USB_TEST_K:
seq_puts(s, "test_k\n");
break;
case USB_TEST_SE0_NAK:
seq_puts(s, "test_se0_nak\n");
break;
case USB_TEST_PACKET:
seq_puts(s, "test_packet\n");
break;
case USB_TEST_FORCE_ENABLE:
seq_puts(s, "test_force_enable\n");
break;
default:
seq_printf(s, "UNKNOWN %d\n", reg);
}
pm_runtime_put_sync(dwc->dev);
return 0;
}
static int dwc3_testmode_open(struct inode *inode, struct file *file)
{
return single_open(file, dwc3_testmode_show, inode->i_private);
}
static ssize_t dwc3_testmode_write(struct file *file,
const char __user *ubuf, size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct dwc3 *dwc = s->private;
unsigned long flags;
u32 testmode = 0;
char buf[32];
int ret;
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
if (!strncmp(buf, "test_j", 6))
testmode = USB_TEST_J;
else if (!strncmp(buf, "test_k", 6))
testmode = USB_TEST_K;
else if (!strncmp(buf, "test_se0_nak", 12))
testmode = USB_TEST_SE0_NAK;
else if (!strncmp(buf, "test_packet", 11))
testmode = USB_TEST_PACKET;
else if (!strncmp(buf, "test_force_enable", 17))
testmode = USB_TEST_FORCE_ENABLE;
else
testmode = 0;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_set_test_mode(dwc, testmode);
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return count;
}
static const struct file_operations dwc3_testmode_fops = {
.open = dwc3_testmode_open,
.write = dwc3_testmode_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int dwc3_link_state_show(struct seq_file *s, void *unused)
{
struct dwc3 *dwc = s->private;
unsigned long flags;
enum dwc3_link_state state;
u32 reg;
u8 speed;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_GSTS);
if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) {
seq_puts(s, "Not available\n");
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
state = DWC3_DSTS_USBLNKST(reg);
speed = reg & DWC3_DSTS_CONNECTSPD;
seq_printf(s, "%s\n", (speed >= DWC3_DSTS_SUPERSPEED) ?
dwc3_gadget_link_string(state) :
dwc3_gadget_hs_link_string(state));
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
static int dwc3_link_state_open(struct inode *inode, struct file *file)
{
return single_open(file, dwc3_link_state_show, inode->i_private);
}
static ssize_t dwc3_link_state_write(struct file *file,
const char __user *ubuf, size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct dwc3 *dwc = s->private;
unsigned long flags;
enum dwc3_link_state state = 0;
char buf[32];
u32 reg;
u8 speed;
int ret;
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
if (!strncmp(buf, "SS.Disabled", 11))
state = DWC3_LINK_STATE_SS_DIS;
else if (!strncmp(buf, "Rx.Detect", 9))
state = DWC3_LINK_STATE_RX_DET;
else if (!strncmp(buf, "SS.Inactive", 11))
state = DWC3_LINK_STATE_SS_INACT;
else if (!strncmp(buf, "Recovery", 8))
state = DWC3_LINK_STATE_RECOV;
else if (!strncmp(buf, "Compliance", 10))
state = DWC3_LINK_STATE_CMPLY;
else if (!strncmp(buf, "Loopback", 8))
state = DWC3_LINK_STATE_LPBK;
else
return -EINVAL;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_GSTS);
if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) {
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return -EINVAL;
}
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
speed = reg & DWC3_DSTS_CONNECTSPD;
if (speed < DWC3_DSTS_SUPERSPEED &&
state != DWC3_LINK_STATE_RECOV) {
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return -EINVAL;
}
dwc3_gadget_set_link_state(dwc, state);
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return count;
}
static const struct file_operations dwc3_link_state_fops = {
.open = dwc3_link_state_open,
.write = dwc3_link_state_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
struct dwc3_ep_file_map {
const char name[25];
const struct file_operations *const fops;
};
static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused)
{
struct dwc3_ep *dep = s->private;
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 mdwidth;
u32 val;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_TXFIFO);
/* Convert to bytes */
mdwidth = dwc3_mdwidth(dwc);
val *= mdwidth;
val >>= 3;
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused)
{
struct dwc3_ep *dep = s->private;
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 mdwidth;
u32 val;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_RXFIFO);
/* Convert to bytes */
mdwidth = dwc3_mdwidth(dwc);
val *= mdwidth;
val >>= 3;
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
static int dwc3_tx_request_queue_show(struct seq_file *s, void *unused)
{
struct dwc3_ep *dep = s->private;
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 val;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_TXREQQ);
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
static int dwc3_rx_request_queue_show(struct seq_file *s, void *unused)
{
struct dwc3_ep *dep = s->private;
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 val;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_RXREQQ);
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
static int dwc3_rx_info_queue_show(struct seq_file *s, void *unused)
{
struct dwc3_ep *dep = s->private;
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 val;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_RXINFOQ);
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
static int dwc3_descriptor_fetch_queue_show(struct seq_file *s, void *unused)
{
struct dwc3_ep *dep = s->private;
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 val;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_DESCFETCHQ);
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
static int dwc3_event_queue_show(struct seq_file *s, void *unused)
{
struct dwc3_ep *dep = s->private;
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 val;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_EVENTQ);
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
static int dwc3_transfer_type_show(struct seq_file *s, void *unused)
{
struct dwc3_ep *dep = s->private;
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
if (!(dep->flags & DWC3_EP_ENABLED) || !dep->endpoint.desc) {
seq_puts(s, "--\n");
goto out;
}
switch (usb_endpoint_type(dep->endpoint.desc)) {
case USB_ENDPOINT_XFER_CONTROL:
seq_puts(s, "control\n");
break;
case USB_ENDPOINT_XFER_ISOC:
seq_puts(s, "isochronous\n");
break;
case USB_ENDPOINT_XFER_BULK:
seq_puts(s, "bulk\n");
break;
case USB_ENDPOINT_XFER_INT:
seq_puts(s, "interrupt\n");
break;
default:
seq_puts(s, "--\n");
}
out:
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
}
static int dwc3_trb_ring_show(struct seq_file *s, void *unused)
{
struct dwc3_ep *dep = s->private;
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
int i;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
if (dep->number <= 1) {
seq_puts(s, "--\n");
goto out;
}
seq_puts(s, "buffer_addr,size,type,ioc,isp_imi,csp,chn,lst,hwo\n");
for (i = 0; i < DWC3_TRB_NUM; i++) {
struct dwc3_trb *trb = &dep->trb_pool[i];
unsigned int type = DWC3_TRBCTL_TYPE(trb->ctrl);
seq_printf(s, "%08x%08x,%d,%s,%d,%d,%d,%d,%d,%d %c%c\n",
trb->bph, trb->bpl, trb->size,
dwc3_trb_type_string(type),
!!(trb->ctrl & DWC3_TRB_CTRL_IOC),
!!(trb->ctrl & DWC3_TRB_CTRL_ISP_IMI),
!!(trb->ctrl & DWC3_TRB_CTRL_CSP),
!!(trb->ctrl & DWC3_TRB_CTRL_CHN),
!!(trb->ctrl & DWC3_TRB_CTRL_LST),
!!(trb->ctrl & DWC3_TRB_CTRL_HWO),
dep->trb_enqueue == i ? 'E' : ' ',
dep->trb_dequeue == i ? 'D' : ' ');
}
out:
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
static int dwc3_ep_info_register_show(struct seq_file *s, void *unused)
{
struct dwc3_ep *dep = s->private;
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u64 ep_info;
u32 lower_32_bits;
u32 upper_32_bits;
u32 reg;
int ret;
ret = pm_runtime_resume_and_get(dwc->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&dwc->lock, flags);
reg = DWC3_GDBGLSPMUX_EPSELECT(dep->number);
dwc3_writel(dwc->regs, DWC3_GDBGLSPMUX, reg);
lower_32_bits = dwc3_readl(dwc->regs, DWC3_GDBGEPINFO0);
upper_32_bits = dwc3_readl(dwc->regs, DWC3_GDBGEPINFO1);
ep_info = ((u64)upper_32_bits << 32) | lower_32_bits;
seq_printf(s, "0x%016llx\n", ep_info);
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_put_sync(dwc->dev);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(dwc3_tx_fifo_size);
DEFINE_SHOW_ATTRIBUTE(dwc3_rx_fifo_size);
DEFINE_SHOW_ATTRIBUTE(dwc3_tx_request_queue);
DEFINE_SHOW_ATTRIBUTE(dwc3_rx_request_queue);
DEFINE_SHOW_ATTRIBUTE(dwc3_rx_info_queue);
DEFINE_SHOW_ATTRIBUTE(dwc3_descriptor_fetch_queue);
DEFINE_SHOW_ATTRIBUTE(dwc3_event_queue);
DEFINE_SHOW_ATTRIBUTE(dwc3_transfer_type);
DEFINE_SHOW_ATTRIBUTE(dwc3_trb_ring);
DEFINE_SHOW_ATTRIBUTE(dwc3_ep_info_register);
static const struct dwc3_ep_file_map dwc3_ep_file_map[] = {
{ "tx_fifo_size", &dwc3_tx_fifo_size_fops, },
{ "rx_fifo_size", &dwc3_rx_fifo_size_fops, },
{ "tx_request_queue", &dwc3_tx_request_queue_fops, },
{ "rx_request_queue", &dwc3_rx_request_queue_fops, },
{ "rx_info_queue", &dwc3_rx_info_queue_fops, },
{ "descriptor_fetch_queue", &dwc3_descriptor_fetch_queue_fops, },
{ "event_queue", &dwc3_event_queue_fops, },
{ "transfer_type", &dwc3_transfer_type_fops, },
{ "trb_ring", &dwc3_trb_ring_fops, },
{ "GDBGEPINFO", &dwc3_ep_info_register_fops, },
};
void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
{
struct dentry *dir;
int i;
dir = debugfs_create_dir(dep->name, dep->dwc->debug_root);
for (i = 0; i < ARRAY_SIZE(dwc3_ep_file_map); i++) {
const struct file_operations *fops = dwc3_ep_file_map[i].fops;
const char *name = dwc3_ep_file_map[i].name;
debugfs_create_file(name, 0444, dir, dep, fops);
}
}
void dwc3_debugfs_remove_endpoint_dir(struct dwc3_ep *dep)
{
debugfs_lookup_and_remove(dep->name, dep->dwc->debug_root);
}
void dwc3_debugfs_init(struct dwc3 *dwc)
{
struct dentry *root;
dwc->regset = kzalloc(sizeof(*dwc->regset), GFP_KERNEL);
if (!dwc->regset)
return;
dwc->dbg_lsp_select = DWC3_LSP_MUX_UNSELECTED;
dwc->regset->regs = dwc3_regs;
dwc->regset->nregs = ARRAY_SIZE(dwc3_regs);
dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
dwc->regset->dev = dwc->dev;
root = debugfs_create_dir(dev_name(dwc->dev), usb_debug_root);
dwc->debug_root = root;
debugfs_create_regset32("regdump", 0444, root, dwc->regset);
debugfs_create_file("lsp_dump", 0644, root, dwc, &dwc3_lsp_fops);
if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE))
debugfs_create_file("mode", 0644, root, dwc,
&dwc3_mode_fops);
if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE) ||
IS_ENABLED(CONFIG_USB_DWC3_GADGET)) {
debugfs_create_file("testmode", 0644, root, dwc,
&dwc3_testmode_fops);
debugfs_create_file("link_state", 0644, root, dwc,
&dwc3_link_state_fops);
}
}
void dwc3_debugfs_exit(struct dwc3 *dwc)
{
debugfs_lookup_and_remove(dev_name(dwc->dev), usb_debug_root);
kfree(dwc->regset);
}
| linux-master | drivers/usb/dwc3/debugfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ep0.c - DesignWare USB3 DRD Controller Endpoint 0 Handling
*
* Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
*
* Authors: Felipe Balbi <[email protected]>,
* Sebastian Andrzej Siewior <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/dma-mapping.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/composite.h>
#include "core.h"
#include "debug.h"
#include "gadget.h"
#include "io.h"
static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep);
static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
struct dwc3_ep *dep, struct dwc3_request *req);
static int dwc3_ep0_delegate_req(struct dwc3 *dwc,
struct usb_ctrlrequest *ctrl);
static void dwc3_ep0_prepare_one_trb(struct dwc3_ep *dep,
dma_addr_t buf_dma, u32 len, u32 type, bool chain)
{
struct dwc3_trb *trb;
struct dwc3 *dwc;
dwc = dep->dwc;
trb = &dwc->ep0_trb[dep->trb_enqueue];
if (chain)
dep->trb_enqueue++;
trb->bpl = lower_32_bits(buf_dma);
trb->bph = upper_32_bits(buf_dma);
trb->size = len;
trb->ctrl = type;
trb->ctrl |= (DWC3_TRB_CTRL_HWO
| DWC3_TRB_CTRL_ISP_IMI);
if (chain)
trb->ctrl |= DWC3_TRB_CTRL_CHN;
else
trb->ctrl |= (DWC3_TRB_CTRL_IOC
| DWC3_TRB_CTRL_LST);
trace_dwc3_prepare_trb(dep, trb);
}
static int dwc3_ep0_start_trans(struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
struct dwc3 *dwc;
int ret;
if (dep->flags & DWC3_EP_TRANSFER_STARTED)
return 0;
dwc = dep->dwc;
memset(¶ms, 0, sizeof(params));
params.param0 = upper_32_bits(dwc->ep0_trb_addr);
params.param1 = lower_32_bits(dwc->ep0_trb_addr);
ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_STARTTRANSFER, ¶ms);
if (ret < 0)
return ret;
dwc->ep0_next_event = DWC3_EP0_COMPLETE;
return 0;
}
static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
struct dwc3_request *req)
{
struct dwc3 *dwc = dep->dwc;
req->request.actual = 0;
req->request.status = -EINPROGRESS;
req->epnum = dep->number;
list_add_tail(&req->list, &dep->pending_list);
/*
* Gadget driver might not be quick enough to queue a request
* before we get a Transfer Not Ready event on this endpoint.
*
* In that case, we will set DWC3_EP_PENDING_REQUEST. When that
* flag is set, it's telling us that as soon as Gadget queues the
* required request, we should kick the transfer here because the
* IRQ we were waiting for is long gone.
*/
if (dep->flags & DWC3_EP_PENDING_REQUEST) {
unsigned int direction;
direction = !!(dep->flags & DWC3_EP0_DIR_IN);
if (dwc->ep0state != EP0_DATA_PHASE) {
dev_WARN(dwc->dev, "Unexpected pending request\n");
return 0;
}
__dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);
dep->flags &= ~(DWC3_EP_PENDING_REQUEST |
DWC3_EP0_DIR_IN);
return 0;
}
/*
* In case gadget driver asked us to delay the STATUS phase,
* handle it here.
*/
if (dwc->delayed_status) {
unsigned int direction;
direction = !dwc->ep0_expect_in;
dwc->delayed_status = false;
usb_gadget_set_state(dwc->gadget, USB_STATE_CONFIGURED);
if (dwc->ep0state == EP0_STATUS_PHASE)
__dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
return 0;
}
/*
* Unfortunately we have uncovered a limitation wrt the Data Phase.
*
* Section 9.4 says we can wait for the XferNotReady(DATA) event to
* come before issueing Start Transfer command, but if we do, we will
* miss situations where the host starts another SETUP phase instead of
* the DATA phase. Such cases happen at least on TD.7.6 of the Link
* Layer Compliance Suite.
*
* The problem surfaces due to the fact that in case of back-to-back
* SETUP packets there will be no XferNotReady(DATA) generated and we
* will be stuck waiting for XferNotReady(DATA) forever.
*
* By looking at tables 9-13 and 9-14 of the Databook, we can see that
* it tells us to start Data Phase right away. It also mentions that if
* we receive a SETUP phase instead of the DATA phase, core will issue
* XferComplete for the DATA phase, before actually initiating it in
* the wire, with the TRB's status set to "SETUP_PENDING". Such status
* can only be used to print some debugging logs, as the core expects
* us to go through to the STATUS phase and start a CONTROL_STATUS TRB,
* just so it completes right away, without transferring anything and,
* only then, we can go back to the SETUP phase.
*
* Because of this scenario, SNPS decided to change the programming
* model of control transfers and support on-demand transfers only for
* the STATUS phase. To fix the issue we have now, we will always wait
* for gadget driver to queue the DATA phase's struct usb_request, then
* start it right away.
*
* If we're actually in a 2-stage transfer, we will wait for
* XferNotReady(STATUS).
*/
if (dwc->three_stage_setup) {
unsigned int direction;
direction = dwc->ep0_expect_in;
dwc->ep0state = EP0_DATA_PHASE;
__dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);
dep->flags &= ~DWC3_EP0_DIR_IN;
}
return 0;
}
int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
gfp_t gfp_flags)
{
struct dwc3_request *req = to_dwc3_request(request);
struct dwc3_ep *dep = to_dwc3_ep(ep);
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
int ret;
spin_lock_irqsave(&dwc->lock, flags);
if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) {
dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
dep->name);
ret = -ESHUTDOWN;
goto out;
}
/* we share one TRB for ep0/1 */
if (!list_empty(&dep->pending_list)) {
ret = -EBUSY;
goto out;
}
ret = __dwc3_gadget_ep0_queue(dep, req);
out:
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
{
struct dwc3_ep *dep;
/* reinitialize physical ep1 */
dep = dwc->eps[1];
dep->flags = DWC3_EP_ENABLED;
/* stall is always issued on EP0 */
dep = dwc->eps[0];
__dwc3_gadget_ep_set_halt(dep, 1, false);
dep->flags = DWC3_EP_ENABLED;
dwc->delayed_status = false;
if (!list_empty(&dep->pending_list)) {
struct dwc3_request *req;
req = next_request(&dep->pending_list);
dwc3_gadget_giveback(dep, req, -ECONNRESET);
}
dwc->eps[0]->trb_enqueue = 0;
dwc->eps[1]->trb_enqueue = 0;
dwc->ep0state = EP0_SETUP_PHASE;
dwc3_ep0_out_start(dwc);
}
int __dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
{
struct dwc3_ep *dep = to_dwc3_ep(ep);
struct dwc3 *dwc = dep->dwc;
dwc3_ep0_stall_and_restart(dwc);
return 0;
}
int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
{
struct dwc3_ep *dep = to_dwc3_ep(ep);
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
int ret;
spin_lock_irqsave(&dwc->lock, flags);
ret = __dwc3_gadget_ep0_set_halt(ep, value);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
void dwc3_ep0_out_start(struct dwc3 *dwc)
{
struct dwc3_ep *dep;
int ret;
int i;
complete(&dwc->ep0_in_setup);
dep = dwc->eps[0];
dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 8,
DWC3_TRBCTL_CONTROL_SETUP, false);
ret = dwc3_ep0_start_trans(dep);
WARN_ON(ret < 0);
for (i = 2; i < DWC3_ENDPOINTS_NUM; i++) {
struct dwc3_ep *dwc3_ep;
dwc3_ep = dwc->eps[i];
if (!dwc3_ep)
continue;
if (!(dwc3_ep->flags & DWC3_EP_DELAY_STOP))
continue;
dwc3_ep->flags &= ~DWC3_EP_DELAY_STOP;
if (dwc->connected)
dwc3_stop_active_transfer(dwc3_ep, true, true);
else
dwc3_remove_requests(dwc, dwc3_ep, -ESHUTDOWN);
}
}
static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
{
struct dwc3_ep *dep;
u32 windex = le16_to_cpu(wIndex_le);
u32 epnum;
epnum = (windex & USB_ENDPOINT_NUMBER_MASK) << 1;
if ((windex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
epnum |= 1;
dep = dwc->eps[epnum];
if (dep == NULL)
return NULL;
if (dep->flags & DWC3_EP_ENABLED)
return dep;
return NULL;
}
static void dwc3_ep0_status_cmpl(struct usb_ep *ep, struct usb_request *req)
{
}
/*
* ch 9.4.5
*/
static int dwc3_ep0_handle_status(struct dwc3 *dwc,
struct usb_ctrlrequest *ctrl)
{
struct dwc3_ep *dep;
u32 recip;
u32 value;
u32 reg;
u16 usb_status = 0;
__le16 *response_pkt;
/* We don't support PTM_STATUS */
value = le16_to_cpu(ctrl->wValue);
if (value != 0)
return -EINVAL;
recip = ctrl->bRequestType & USB_RECIP_MASK;
switch (recip) {
case USB_RECIP_DEVICE:
/*
* LTM will be set once we know how to set this in HW.
*/
usb_status |= dwc->gadget->is_selfpowered;
if ((dwc->speed == DWC3_DSTS_SUPERSPEED) ||
(dwc->speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (reg & DWC3_DCTL_INITU1ENA)
usb_status |= 1 << USB_DEV_STAT_U1_ENABLED;
if (reg & DWC3_DCTL_INITU2ENA)
usb_status |= 1 << USB_DEV_STAT_U2_ENABLED;
} else {
usb_status |= dwc->gadget->wakeup_armed <<
USB_DEVICE_REMOTE_WAKEUP;
}
break;
case USB_RECIP_INTERFACE:
/*
* Function Remote Wake Capable D0
* Function Remote Wakeup D1
*/
return dwc3_ep0_delegate_req(dwc, ctrl);
case USB_RECIP_ENDPOINT:
dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
if (!dep)
return -EINVAL;
if (dep->flags & DWC3_EP_STALL)
usb_status = 1 << USB_ENDPOINT_HALT;
break;
default:
return -EINVAL;
}
response_pkt = (__le16 *) dwc->setup_buf;
*response_pkt = cpu_to_le16(usb_status);
dep = dwc->eps[0];
dwc->ep0_usb_req.dep = dep;
dwc->ep0_usb_req.request.length = sizeof(*response_pkt);
dwc->ep0_usb_req.request.buf = dwc->setup_buf;
dwc->ep0_usb_req.request.complete = dwc3_ep0_status_cmpl;
return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
}
static int dwc3_ep0_handle_u1(struct dwc3 *dwc, enum usb_device_state state,
int set)
{
u32 reg;
if (state != USB_STATE_CONFIGURED)
return -EINVAL;
if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
(dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
return -EINVAL;
if (set && dwc->dis_u1_entry_quirk)
return -EINVAL;
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (set)
reg |= DWC3_DCTL_INITU1ENA;
else
reg &= ~DWC3_DCTL_INITU1ENA;
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
return 0;
}
static int dwc3_ep0_handle_u2(struct dwc3 *dwc, enum usb_device_state state,
int set)
{
u32 reg;
if (state != USB_STATE_CONFIGURED)
return -EINVAL;
if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
(dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
return -EINVAL;
if (set && dwc->dis_u2_entry_quirk)
return -EINVAL;
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (set)
reg |= DWC3_DCTL_INITU2ENA;
else
reg &= ~DWC3_DCTL_INITU2ENA;
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
return 0;
}
static int dwc3_ep0_handle_test(struct dwc3 *dwc, enum usb_device_state state,
u32 wIndex, int set)
{
if ((wIndex & 0xff) != 0)
return -EINVAL;
if (!set)
return -EINVAL;
switch (wIndex >> 8) {
case USB_TEST_J:
case USB_TEST_K:
case USB_TEST_SE0_NAK:
case USB_TEST_PACKET:
case USB_TEST_FORCE_ENABLE:
dwc->test_mode_nr = wIndex >> 8;
dwc->test_mode = true;
break;
default:
return -EINVAL;
}
return 0;
}
static int dwc3_ep0_handle_device(struct dwc3 *dwc,
struct usb_ctrlrequest *ctrl, int set)
{
enum usb_device_state state;
u32 wValue;
u32 wIndex;
int ret = 0;
wValue = le16_to_cpu(ctrl->wValue);
wIndex = le16_to_cpu(ctrl->wIndex);
state = dwc->gadget->state;
switch (wValue) {
case USB_DEVICE_REMOTE_WAKEUP:
if (dwc->wakeup_configured)
dwc->gadget->wakeup_armed = set;
else
ret = -EINVAL;
break;
/*
* 9.4.1 says only for SS, in AddressState only for
* default control pipe
*/
case USB_DEVICE_U1_ENABLE:
ret = dwc3_ep0_handle_u1(dwc, state, set);
break;
case USB_DEVICE_U2_ENABLE:
ret = dwc3_ep0_handle_u2(dwc, state, set);
break;
case USB_DEVICE_LTM_ENABLE:
ret = -EINVAL;
break;
case USB_DEVICE_TEST_MODE:
ret = dwc3_ep0_handle_test(dwc, state, wIndex, set);
break;
default:
ret = -EINVAL;
}
return ret;
}
static int dwc3_ep0_handle_intf(struct dwc3 *dwc,
struct usb_ctrlrequest *ctrl, int set)
{
u32 wValue;
int ret = 0;
wValue = le16_to_cpu(ctrl->wValue);
switch (wValue) {
case USB_INTRF_FUNC_SUSPEND:
ret = dwc3_ep0_delegate_req(dwc, ctrl);
break;
default:
ret = -EINVAL;
}
return ret;
}
static int dwc3_ep0_handle_endpoint(struct dwc3 *dwc,
struct usb_ctrlrequest *ctrl, int set)
{
struct dwc3_ep *dep;
u32 wValue;
int ret;
wValue = le16_to_cpu(ctrl->wValue);
switch (wValue) {
case USB_ENDPOINT_HALT:
dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
if (!dep)
return -EINVAL;
if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
break;
ret = __dwc3_gadget_ep_set_halt(dep, set, true);
if (ret)
return -EINVAL;
/* ClearFeature(Halt) may need delayed status */
if (!set && (dep->flags & DWC3_EP_END_TRANSFER_PENDING))
return USB_GADGET_DELAYED_STATUS;
break;
default:
return -EINVAL;
}
return 0;
}
static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
struct usb_ctrlrequest *ctrl, int set)
{
u32 recip;
int ret;
recip = ctrl->bRequestType & USB_RECIP_MASK;
switch (recip) {
case USB_RECIP_DEVICE:
ret = dwc3_ep0_handle_device(dwc, ctrl, set);
break;
case USB_RECIP_INTERFACE:
ret = dwc3_ep0_handle_intf(dwc, ctrl, set);
break;
case USB_RECIP_ENDPOINT:
ret = dwc3_ep0_handle_endpoint(dwc, ctrl, set);
break;
default:
ret = -EINVAL;
}
return ret;
}
static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
enum usb_device_state state = dwc->gadget->state;
u32 addr;
u32 reg;
addr = le16_to_cpu(ctrl->wValue);
if (addr > 127) {
dev_err(dwc->dev, "invalid device address %d\n", addr);
return -EINVAL;
}
if (state == USB_STATE_CONFIGURED) {
dev_err(dwc->dev, "can't SetAddress() from Configured State\n");
return -EINVAL;
}
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~(DWC3_DCFG_DEVADDR_MASK);
reg |= DWC3_DCFG_DEVADDR(addr);
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
if (addr)
usb_gadget_set_state(dwc->gadget, USB_STATE_ADDRESS);
else
usb_gadget_set_state(dwc->gadget, USB_STATE_DEFAULT);
return 0;
}
static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
int ret = -EINVAL;
if (dwc->async_callbacks) {
spin_unlock(&dwc->lock);
ret = dwc->gadget_driver->setup(dwc->gadget, ctrl);
spin_lock(&dwc->lock);
}
return ret;
}
static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
enum usb_device_state state = dwc->gadget->state;
u32 cfg;
int ret;
u32 reg;
cfg = le16_to_cpu(ctrl->wValue);
switch (state) {
case USB_STATE_DEFAULT:
return -EINVAL;
case USB_STATE_ADDRESS:
dwc3_gadget_clear_tx_fifos(dwc);
ret = dwc3_ep0_delegate_req(dwc, ctrl);
/* if the cfg matches and the cfg is non zero */
if (cfg && (!ret || (ret == USB_GADGET_DELAYED_STATUS))) {
/*
* only change state if set_config has already
* been processed. If gadget driver returns
* USB_GADGET_DELAYED_STATUS, we will wait
* to change the state on the next usb_ep_queue()
*/
if (ret == 0)
usb_gadget_set_state(dwc->gadget,
USB_STATE_CONFIGURED);
/*
* Enable transition to U1/U2 state when
* nothing is pending from application.
*/
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (!dwc->dis_u1_entry_quirk)
reg |= DWC3_DCTL_ACCEPTU1ENA;
if (!dwc->dis_u2_entry_quirk)
reg |= DWC3_DCTL_ACCEPTU2ENA;
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
}
break;
case USB_STATE_CONFIGURED:
ret = dwc3_ep0_delegate_req(dwc, ctrl);
if (!cfg && !ret)
usb_gadget_set_state(dwc->gadget,
USB_STATE_ADDRESS);
break;
default:
ret = -EINVAL;
}
return ret;
}
static void dwc3_ep0_set_sel_cmpl(struct usb_ep *ep, struct usb_request *req)
{
struct dwc3_ep *dep = to_dwc3_ep(ep);
struct dwc3 *dwc = dep->dwc;
u32 param = 0;
u32 reg;
struct timing {
u8 u1sel;
u8 u1pel;
__le16 u2sel;
__le16 u2pel;
} __packed timing;
int ret;
memcpy(&timing, req->buf, sizeof(timing));
dwc->u1sel = timing.u1sel;
dwc->u1pel = timing.u1pel;
dwc->u2sel = le16_to_cpu(timing.u2sel);
dwc->u2pel = le16_to_cpu(timing.u2pel);
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (reg & DWC3_DCTL_INITU2ENA)
param = dwc->u2pel;
if (reg & DWC3_DCTL_INITU1ENA)
param = dwc->u1pel;
/*
* According to Synopsys Databook, if parameter is
* greater than 125, a value of zero should be
* programmed in the register.
*/
if (param > 125)
param = 0;
/* now that we have the time, issue DGCMD Set Sel */
ret = dwc3_send_gadget_generic_command(dwc,
DWC3_DGCMD_SET_PERIODIC_PAR, param);
WARN_ON(ret < 0);
}
static int dwc3_ep0_set_sel(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
struct dwc3_ep *dep;
enum usb_device_state state = dwc->gadget->state;
u16 wLength;
if (state == USB_STATE_DEFAULT)
return -EINVAL;
wLength = le16_to_cpu(ctrl->wLength);
if (wLength != 6) {
dev_err(dwc->dev, "Set SEL should be 6 bytes, got %d\n",
wLength);
return -EINVAL;
}
/*
* To handle Set SEL we need to receive 6 bytes from Host. So let's
* queue a usb_request for 6 bytes.
*
* Remember, though, this controller can't handle non-wMaxPacketSize
* aligned transfers on the OUT direction, so we queue a request for
* wMaxPacketSize instead.
*/
dep = dwc->eps[0];
dwc->ep0_usb_req.dep = dep;
dwc->ep0_usb_req.request.length = dep->endpoint.maxpacket;
dwc->ep0_usb_req.request.buf = dwc->setup_buf;
dwc->ep0_usb_req.request.complete = dwc3_ep0_set_sel_cmpl;
return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
}
static int dwc3_ep0_set_isoch_delay(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
u16 wLength;
u16 wValue;
u16 wIndex;
wValue = le16_to_cpu(ctrl->wValue);
wLength = le16_to_cpu(ctrl->wLength);
wIndex = le16_to_cpu(ctrl->wIndex);
if (wIndex || wLength)
return -EINVAL;
dwc->gadget->isoch_delay = wValue;
return 0;
}
static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
int ret;
switch (ctrl->bRequest) {
case USB_REQ_GET_STATUS:
ret = dwc3_ep0_handle_status(dwc, ctrl);
break;
case USB_REQ_CLEAR_FEATURE:
ret = dwc3_ep0_handle_feature(dwc, ctrl, 0);
break;
case USB_REQ_SET_FEATURE:
ret = dwc3_ep0_handle_feature(dwc, ctrl, 1);
break;
case USB_REQ_SET_ADDRESS:
ret = dwc3_ep0_set_address(dwc, ctrl);
break;
case USB_REQ_SET_CONFIGURATION:
ret = dwc3_ep0_set_config(dwc, ctrl);
break;
case USB_REQ_SET_SEL:
ret = dwc3_ep0_set_sel(dwc, ctrl);
break;
case USB_REQ_SET_ISOCH_DELAY:
ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
break;
default:
ret = dwc3_ep0_delegate_req(dwc, ctrl);
break;
}
return ret;
}
static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
struct usb_ctrlrequest *ctrl = (void *) dwc->ep0_trb;
int ret = -EINVAL;
u32 len;
if (!dwc->gadget_driver || !dwc->softconnect || !dwc->connected)
goto out;
trace_dwc3_ctrl_req(ctrl);
len = le16_to_cpu(ctrl->wLength);
if (!len) {
dwc->three_stage_setup = false;
dwc->ep0_expect_in = false;
dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
} else {
dwc->three_stage_setup = true;
dwc->ep0_expect_in = !!(ctrl->bRequestType & USB_DIR_IN);
dwc->ep0_next_event = DWC3_EP0_NRDY_DATA;
}
if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
ret = dwc3_ep0_std_request(dwc, ctrl);
else
ret = dwc3_ep0_delegate_req(dwc, ctrl);
if (ret == USB_GADGET_DELAYED_STATUS)
dwc->delayed_status = true;
out:
if (ret < 0)
dwc3_ep0_stall_and_restart(dwc);
}
static void dwc3_ep0_complete_data(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
struct dwc3_request *r;
struct usb_request *ur;
struct dwc3_trb *trb;
struct dwc3_ep *ep0;
u32 transferred = 0;
u32 status;
u32 length;
u8 epnum;
epnum = event->endpoint_number;
ep0 = dwc->eps[0];
dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
trb = dwc->ep0_trb;
trace_dwc3_complete_trb(ep0, trb);
r = next_request(&ep0->pending_list);
if (!r)
return;
status = DWC3_TRB_SIZE_TRBSTS(trb->size);
if (status == DWC3_TRBSTS_SETUP_PENDING) {
dwc->setup_packet_pending = true;
if (r)
dwc3_gadget_giveback(ep0, r, -ECONNRESET);
return;
}
ur = &r->request;
length = trb->size & DWC3_TRB_SIZE_MASK;
transferred = ur->length - length;
ur->actual += transferred;
if ((IS_ALIGNED(ur->length, ep0->endpoint.maxpacket) &&
ur->length && ur->zero) || dwc->ep0_bounced) {
trb++;
trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
trace_dwc3_complete_trb(ep0, trb);
if (r->direction)
dwc->eps[1]->trb_enqueue = 0;
else
dwc->eps[0]->trb_enqueue = 0;
dwc->ep0_bounced = false;
}
if ((epnum & 1) && ur->actual < ur->length)
dwc3_ep0_stall_and_restart(dwc);
else
dwc3_gadget_giveback(ep0, r, 0);
}
static void dwc3_ep0_complete_status(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
struct dwc3_request *r;
struct dwc3_ep *dep;
struct dwc3_trb *trb;
u32 status;
dep = dwc->eps[0];
trb = dwc->ep0_trb;
trace_dwc3_complete_trb(dep, trb);
if (!list_empty(&dep->pending_list)) {
r = next_request(&dep->pending_list);
dwc3_gadget_giveback(dep, r, 0);
}
if (dwc->test_mode) {
int ret;
ret = dwc3_gadget_set_test_mode(dwc, dwc->test_mode_nr);
if (ret < 0) {
dev_err(dwc->dev, "invalid test #%d\n",
dwc->test_mode_nr);
dwc3_ep0_stall_and_restart(dwc);
return;
}
}
status = DWC3_TRB_SIZE_TRBSTS(trb->size);
if (status == DWC3_TRBSTS_SETUP_PENDING)
dwc->setup_packet_pending = true;
dwc->ep0state = EP0_SETUP_PHASE;
dwc3_ep0_out_start(dwc);
}
static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
dep->resource_index = 0;
dwc->setup_packet_pending = false;
switch (dwc->ep0state) {
case EP0_SETUP_PHASE:
dwc3_ep0_inspect_setup(dwc, event);
break;
case EP0_DATA_PHASE:
dwc3_ep0_complete_data(dwc, event);
break;
case EP0_STATUS_PHASE:
dwc3_ep0_complete_status(dwc, event);
break;
default:
WARN(true, "UNKNOWN ep0state %d\n", dwc->ep0state);
}
}
static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
struct dwc3_ep *dep, struct dwc3_request *req)
{
unsigned int trb_length = 0;
int ret;
req->direction = !!dep->number;
if (req->request.length == 0) {
if (!req->direction)
trb_length = dep->endpoint.maxpacket;
dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr, trb_length,
DWC3_TRBCTL_CONTROL_DATA, false);
ret = dwc3_ep0_start_trans(dep);
} else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket)
&& (dep->number == 0)) {
u32 maxpacket;
u32 rem;
ret = usb_gadget_map_request_by_dev(dwc->sysdev,
&req->request, dep->number);
if (ret)
return;
maxpacket = dep->endpoint.maxpacket;
rem = req->request.length % maxpacket;
dwc->ep0_bounced = true;
/* prepare normal TRB */
dwc3_ep0_prepare_one_trb(dep, req->request.dma,
req->request.length,
DWC3_TRBCTL_CONTROL_DATA,
true);
req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
/* Now prepare one extra TRB to align transfer size */
dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
maxpacket - rem,
DWC3_TRBCTL_CONTROL_DATA,
false);
ret = dwc3_ep0_start_trans(dep);
} else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) &&
req->request.length && req->request.zero) {
ret = usb_gadget_map_request_by_dev(dwc->sysdev,
&req->request, dep->number);
if (ret)
return;
/* prepare normal TRB */
dwc3_ep0_prepare_one_trb(dep, req->request.dma,
req->request.length,
DWC3_TRBCTL_CONTROL_DATA,
true);
req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
if (!req->direction)
trb_length = dep->endpoint.maxpacket;
/* Now prepare one extra TRB to align transfer size */
dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
trb_length, DWC3_TRBCTL_CONTROL_DATA,
false);
ret = dwc3_ep0_start_trans(dep);
} else {
ret = usb_gadget_map_request_by_dev(dwc->sysdev,
&req->request, dep->number);
if (ret)
return;
dwc3_ep0_prepare_one_trb(dep, req->request.dma,
req->request.length, DWC3_TRBCTL_CONTROL_DATA,
false);
req->trb = &dwc->ep0_trb[dep->trb_enqueue];
ret = dwc3_ep0_start_trans(dep);
}
WARN_ON(ret < 0);
}
static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
u32 type;
type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3
: DWC3_TRBCTL_CONTROL_STATUS2;
dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 0, type, false);
return dwc3_ep0_start_trans(dep);
}
static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep)
{
WARN_ON(dwc3_ep0_start_control_status(dep));
}
static void dwc3_ep0_do_control_status(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
__dwc3_ep0_do_control_status(dwc, dep);
}
void dwc3_ep0_send_delayed_status(struct dwc3 *dwc)
{
unsigned int direction = !dwc->ep0_expect_in;
dwc->delayed_status = false;
dwc->clear_stall_protocol = 0;
if (dwc->ep0state != EP0_STATUS_PHASE)
return;
__dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
}
void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
u32 cmd;
int ret;
/*
* For status/DATA OUT stage, TRB will be queued on ep0 out
* endpoint for which resource index is zero. Hence allow
* queuing ENDXFER command for ep0 out endpoint.
*/
if (!dep->resource_index && dep->number)
return;
cmd = DWC3_DEPCMD_ENDTRANSFER;
cmd |= DWC3_DEPCMD_CMDIOC;
cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
memset(¶ms, 0, sizeof(params));
ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms);
WARN_ON_ONCE(ret);
dep->resource_index = 0;
}
static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
switch (event->status) {
case DEPEVT_STATUS_CONTROL_DATA:
if (!dwc->softconnect || !dwc->connected)
return;
/*
* We already have a DATA transfer in the controller's cache,
* if we receive a XferNotReady(DATA) we will ignore it, unless
* it's for the wrong direction.
*
* In that case, we must issue END_TRANSFER command to the Data
* Phase we already have started and issue SetStall on the
* control endpoint.
*/
if (dwc->ep0_expect_in != event->endpoint_number) {
struct dwc3_ep *dep = dwc->eps[dwc->ep0_expect_in];
dev_err(dwc->dev, "unexpected direction for Data Phase\n");
dwc3_ep0_end_control_data(dwc, dep);
dwc3_ep0_stall_and_restart(dwc);
return;
}
break;
case DEPEVT_STATUS_CONTROL_STATUS:
if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS)
return;
if (dwc->setup_packet_pending) {
dwc3_ep0_stall_and_restart(dwc);
return;
}
dwc->ep0state = EP0_STATUS_PHASE;
if (dwc->delayed_status) {
struct dwc3_ep *dep = dwc->eps[0];
WARN_ON_ONCE(event->endpoint_number != 1);
/*
* We should handle the delay STATUS phase here if the
* request for handling delay STATUS has been queued
* into the list.
*/
if (!list_empty(&dep->pending_list)) {
dwc->delayed_status = false;
usb_gadget_set_state(dwc->gadget,
USB_STATE_CONFIGURED);
dwc3_ep0_do_control_status(dwc, event);
}
return;
}
dwc3_ep0_do_control_status(dwc, event);
}
}
void dwc3_ep0_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
u8 cmd;
switch (event->endpoint_event) {
case DWC3_DEPEVT_XFERCOMPLETE:
dwc3_ep0_xfer_complete(dwc, event);
break;
case DWC3_DEPEVT_XFERNOTREADY:
dwc3_ep0_xfernotready(dwc, event);
break;
case DWC3_DEPEVT_XFERINPROGRESS:
case DWC3_DEPEVT_RXTXFIFOEVT:
case DWC3_DEPEVT_STREAMEVT:
break;
case DWC3_DEPEVT_EPCMDCMPLT:
cmd = DEPEVT_PARAMETER_CMD(event->parameters);
if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
}
break;
default:
dev_err(dwc->dev, "unknown endpoint event %d\n", event->endpoint_event);
break;
}
}
| linux-master | drivers/usb/dwc3/ep0.c |
// SPDX-License-Identifier: GPL-2.0
/*
* dwc3-imx8mp.c - NXP imx8mp Specific Glue layer
*
* Copyright (c) 2020 NXP.
*/
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include "core.h"
/* USB wakeup registers */
#define USB_WAKEUP_CTRL 0x00
/* Global wakeup interrupt enable, also used to clear interrupt */
#define USB_WAKEUP_EN BIT(31)
/* Wakeup from connect or disconnect, only for superspeed */
#define USB_WAKEUP_SS_CONN BIT(5)
/* 0 select vbus_valid, 1 select sessvld */
#define USB_WAKEUP_VBUS_SRC_SESS_VAL BIT(4)
/* Enable signal for wake up from u3 state */
#define USB_WAKEUP_U3_EN BIT(3)
/* Enable signal for wake up from id change */
#define USB_WAKEUP_ID_EN BIT(2)
/* Enable signal for wake up from vbus change */
#define USB_WAKEUP_VBUS_EN BIT(1)
/* Enable signal for wake up from dp/dm change */
#define USB_WAKEUP_DPDM_EN BIT(0)
#define USB_WAKEUP_EN_MASK GENMASK(5, 0)
/* USB glue registers */
#define USB_CTRL0 0x00
#define USB_CTRL1 0x04
#define USB_CTRL0_PORTPWR_EN BIT(12) /* 1 - PPC enabled (default) */
#define USB_CTRL0_USB3_FIXED BIT(22) /* 1 - USB3 permanent attached */
#define USB_CTRL0_USB2_FIXED BIT(23) /* 1 - USB2 permanent attached */
#define USB_CTRL1_OC_POLARITY BIT(16) /* 0 - HIGH / 1 - LOW */
#define USB_CTRL1_PWR_POLARITY BIT(17) /* 0 - HIGH / 1 - LOW */
struct dwc3_imx8mp {
struct device *dev;
struct platform_device *dwc3;
void __iomem *hsio_blk_base;
void __iomem *glue_base;
struct clk *hsio_clk;
struct clk *suspend_clk;
int irq;
bool pm_suspended;
bool wakeup_pending;
};
static void imx8mp_configure_glue(struct dwc3_imx8mp *dwc3_imx)
{
struct device *dev = dwc3_imx->dev;
u32 value;
if (!dwc3_imx->glue_base)
return;
value = readl(dwc3_imx->glue_base + USB_CTRL0);
if (device_property_read_bool(dev, "fsl,permanently-attached"))
value |= (USB_CTRL0_USB2_FIXED | USB_CTRL0_USB3_FIXED);
else
value &= ~(USB_CTRL0_USB2_FIXED | USB_CTRL0_USB3_FIXED);
if (device_property_read_bool(dev, "fsl,disable-port-power-control"))
value &= ~(USB_CTRL0_PORTPWR_EN);
else
value |= USB_CTRL0_PORTPWR_EN;
writel(value, dwc3_imx->glue_base + USB_CTRL0);
value = readl(dwc3_imx->glue_base + USB_CTRL1);
if (device_property_read_bool(dev, "fsl,over-current-active-low"))
value |= USB_CTRL1_OC_POLARITY;
else
value &= ~USB_CTRL1_OC_POLARITY;
if (device_property_read_bool(dev, "fsl,power-active-low"))
value |= USB_CTRL1_PWR_POLARITY;
else
value &= ~USB_CTRL1_PWR_POLARITY;
writel(value, dwc3_imx->glue_base + USB_CTRL1);
}
static void dwc3_imx8mp_wakeup_enable(struct dwc3_imx8mp *dwc3_imx)
{
struct dwc3 *dwc3 = platform_get_drvdata(dwc3_imx->dwc3);
u32 val;
if (!dwc3)
return;
val = readl(dwc3_imx->hsio_blk_base + USB_WAKEUP_CTRL);
if ((dwc3->current_dr_role == DWC3_GCTL_PRTCAP_HOST) && dwc3->xhci)
val |= USB_WAKEUP_EN | USB_WAKEUP_SS_CONN |
USB_WAKEUP_U3_EN | USB_WAKEUP_DPDM_EN;
else if (dwc3->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE)
val |= USB_WAKEUP_EN | USB_WAKEUP_VBUS_EN |
USB_WAKEUP_VBUS_SRC_SESS_VAL;
writel(val, dwc3_imx->hsio_blk_base + USB_WAKEUP_CTRL);
}
static void dwc3_imx8mp_wakeup_disable(struct dwc3_imx8mp *dwc3_imx)
{
u32 val;
val = readl(dwc3_imx->hsio_blk_base + USB_WAKEUP_CTRL);
val &= ~(USB_WAKEUP_EN | USB_WAKEUP_EN_MASK);
writel(val, dwc3_imx->hsio_blk_base + USB_WAKEUP_CTRL);
}
static irqreturn_t dwc3_imx8mp_interrupt(int irq, void *_dwc3_imx)
{
struct dwc3_imx8mp *dwc3_imx = _dwc3_imx;
struct dwc3 *dwc = platform_get_drvdata(dwc3_imx->dwc3);
if (!dwc3_imx->pm_suspended)
return IRQ_HANDLED;
disable_irq_nosync(dwc3_imx->irq);
dwc3_imx->wakeup_pending = true;
if ((dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) && dwc->xhci)
pm_runtime_resume(&dwc->xhci->dev);
else if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE)
pm_runtime_get(dwc->dev);
return IRQ_HANDLED;
}
static int dwc3_imx8mp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *dwc3_np, *node = dev->of_node;
struct dwc3_imx8mp *dwc3_imx;
struct resource *res;
int err, irq;
if (!node) {
dev_err(dev, "device node not found\n");
return -EINVAL;
}
dwc3_imx = devm_kzalloc(dev, sizeof(*dwc3_imx), GFP_KERNEL);
if (!dwc3_imx)
return -ENOMEM;
platform_set_drvdata(pdev, dwc3_imx);
dwc3_imx->dev = dev;
dwc3_imx->hsio_blk_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dwc3_imx->hsio_blk_base))
return PTR_ERR(dwc3_imx->hsio_blk_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res) {
dev_warn(dev, "Base address for glue layer missing. Continuing without, some features are missing though.");
} else {
dwc3_imx->glue_base = devm_ioremap_resource(dev, res);
if (IS_ERR(dwc3_imx->glue_base))
return PTR_ERR(dwc3_imx->glue_base);
}
dwc3_imx->hsio_clk = devm_clk_get(dev, "hsio");
if (IS_ERR(dwc3_imx->hsio_clk)) {
err = PTR_ERR(dwc3_imx->hsio_clk);
dev_err(dev, "Failed to get hsio clk, err=%d\n", err);
return err;
}
err = clk_prepare_enable(dwc3_imx->hsio_clk);
if (err) {
dev_err(dev, "Failed to enable hsio clk, err=%d\n", err);
return err;
}
dwc3_imx->suspend_clk = devm_clk_get(dev, "suspend");
if (IS_ERR(dwc3_imx->suspend_clk)) {
err = PTR_ERR(dwc3_imx->suspend_clk);
dev_err(dev, "Failed to get suspend clk, err=%d\n", err);
goto disable_hsio_clk;
}
err = clk_prepare_enable(dwc3_imx->suspend_clk);
if (err) {
dev_err(dev, "Failed to enable suspend clk, err=%d\n", err);
goto disable_hsio_clk;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
err = irq;
goto disable_clks;
}
dwc3_imx->irq = irq;
imx8mp_configure_glue(dwc3_imx);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
err = pm_runtime_get_sync(dev);
if (err < 0)
goto disable_rpm;
dwc3_np = of_get_compatible_child(node, "snps,dwc3");
if (!dwc3_np) {
err = -ENODEV;
dev_err(dev, "failed to find dwc3 core child\n");
goto disable_rpm;
}
err = of_platform_populate(node, NULL, NULL, dev);
if (err) {
dev_err(&pdev->dev, "failed to create dwc3 core\n");
goto err_node_put;
}
dwc3_imx->dwc3 = of_find_device_by_node(dwc3_np);
if (!dwc3_imx->dwc3) {
dev_err(dev, "failed to get dwc3 platform device\n");
err = -ENODEV;
goto depopulate;
}
of_node_put(dwc3_np);
err = devm_request_threaded_irq(dev, irq, NULL, dwc3_imx8mp_interrupt,
IRQF_ONESHOT, dev_name(dev), dwc3_imx);
if (err) {
dev_err(dev, "failed to request IRQ #%d --> %d\n", irq, err);
goto depopulate;
}
device_set_wakeup_capable(dev, true);
pm_runtime_put(dev);
return 0;
depopulate:
of_platform_depopulate(dev);
err_node_put:
of_node_put(dwc3_np);
disable_rpm:
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
disable_clks:
clk_disable_unprepare(dwc3_imx->suspend_clk);
disable_hsio_clk:
clk_disable_unprepare(dwc3_imx->hsio_clk);
return err;
}
static void dwc3_imx8mp_remove(struct platform_device *pdev)
{
struct dwc3_imx8mp *dwc3_imx = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
pm_runtime_get_sync(dev);
of_platform_depopulate(dev);
clk_disable_unprepare(dwc3_imx->suspend_clk);
clk_disable_unprepare(dwc3_imx->hsio_clk);
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
}
static int __maybe_unused dwc3_imx8mp_suspend(struct dwc3_imx8mp *dwc3_imx,
pm_message_t msg)
{
if (dwc3_imx->pm_suspended)
return 0;
/* Wakeup enable */
if (PMSG_IS_AUTO(msg) || device_may_wakeup(dwc3_imx->dev))
dwc3_imx8mp_wakeup_enable(dwc3_imx);
dwc3_imx->pm_suspended = true;
return 0;
}
static int __maybe_unused dwc3_imx8mp_resume(struct dwc3_imx8mp *dwc3_imx,
pm_message_t msg)
{
struct dwc3 *dwc = platform_get_drvdata(dwc3_imx->dwc3);
int ret = 0;
if (!dwc3_imx->pm_suspended)
return 0;
/* Wakeup disable */
dwc3_imx8mp_wakeup_disable(dwc3_imx);
dwc3_imx->pm_suspended = false;
/* Upon power loss any previous configuration is lost, restore it */
imx8mp_configure_glue(dwc3_imx);
if (dwc3_imx->wakeup_pending) {
dwc3_imx->wakeup_pending = false;
if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE) {
pm_runtime_mark_last_busy(dwc->dev);
pm_runtime_put_autosuspend(dwc->dev);
} else {
/*
* Add wait for xhci switch from suspend
* clock to normal clock to detect connection.
*/
usleep_range(9000, 10000);
}
enable_irq(dwc3_imx->irq);
}
return ret;
}
static int __maybe_unused dwc3_imx8mp_pm_suspend(struct device *dev)
{
struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
int ret;
ret = dwc3_imx8mp_suspend(dwc3_imx, PMSG_SUSPEND);
if (device_may_wakeup(dwc3_imx->dev))
enable_irq_wake(dwc3_imx->irq);
else
clk_disable_unprepare(dwc3_imx->suspend_clk);
clk_disable_unprepare(dwc3_imx->hsio_clk);
dev_dbg(dev, "dwc3 imx8mp pm suspend.\n");
return ret;
}
static int __maybe_unused dwc3_imx8mp_pm_resume(struct device *dev)
{
struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
int ret;
if (device_may_wakeup(dwc3_imx->dev)) {
disable_irq_wake(dwc3_imx->irq);
} else {
ret = clk_prepare_enable(dwc3_imx->suspend_clk);
if (ret)
return ret;
}
ret = clk_prepare_enable(dwc3_imx->hsio_clk);
if (ret)
return ret;
ret = dwc3_imx8mp_resume(dwc3_imx, PMSG_RESUME);
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
dev_dbg(dev, "dwc3 imx8mp pm resume.\n");
return ret;
}
static int __maybe_unused dwc3_imx8mp_runtime_suspend(struct device *dev)
{
struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
dev_dbg(dev, "dwc3 imx8mp runtime suspend.\n");
return dwc3_imx8mp_suspend(dwc3_imx, PMSG_AUTO_SUSPEND);
}
static int __maybe_unused dwc3_imx8mp_runtime_resume(struct device *dev)
{
struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
dev_dbg(dev, "dwc3 imx8mp runtime resume.\n");
return dwc3_imx8mp_resume(dwc3_imx, PMSG_AUTO_RESUME);
}
static const struct dev_pm_ops dwc3_imx8mp_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dwc3_imx8mp_pm_suspend, dwc3_imx8mp_pm_resume)
SET_RUNTIME_PM_OPS(dwc3_imx8mp_runtime_suspend,
dwc3_imx8mp_runtime_resume, NULL)
};
static const struct of_device_id dwc3_imx8mp_of_match[] = {
{ .compatible = "fsl,imx8mp-dwc3", },
{},
};
MODULE_DEVICE_TABLE(of, dwc3_imx8mp_of_match);
static struct platform_driver dwc3_imx8mp_driver = {
.probe = dwc3_imx8mp_probe,
.remove_new = dwc3_imx8mp_remove,
.driver = {
.name = "imx8mp-dwc3",
.pm = &dwc3_imx8mp_dev_pm_ops,
.of_match_table = dwc3_imx8mp_of_match,
},
};
module_platform_driver(dwc3_imx8mp_driver);
MODULE_ALIAS("platform:imx8mp-dwc3");
MODULE_AUTHOR("[email protected]");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DesignWare USB3 imx8mp Glue Layer");
| linux-master | drivers/usb/dwc3/dwc3-imx8mp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* dwc3-am62.c - TI specific Glue layer for AM62 DWC3 USB Controller
*
* Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/clk.h>
#include <linux/regmap.h>
#include <linux/pinctrl/consumer.h>
#include "core.h"
/* USB WRAPPER register offsets */
#define USBSS_PID 0x0
#define USBSS_OVERCURRENT_CTRL 0x4
#define USBSS_PHY_CONFIG 0x8
#define USBSS_PHY_TEST 0xc
#define USBSS_CORE_STAT 0x14
#define USBSS_HOST_VBUS_CTRL 0x18
#define USBSS_MODE_CONTROL 0x1c
#define USBSS_WAKEUP_CONFIG 0x30
#define USBSS_WAKEUP_STAT 0x34
#define USBSS_OVERRIDE_CONFIG 0x38
#define USBSS_IRQ_MISC_STATUS_RAW 0x430
#define USBSS_IRQ_MISC_STATUS 0x434
#define USBSS_IRQ_MISC_ENABLE_SET 0x438
#define USBSS_IRQ_MISC_ENABLE_CLR 0x43c
#define USBSS_IRQ_MISC_EOI 0x440
#define USBSS_INTR_TEST 0x490
#define USBSS_VBUS_FILTER 0x614
#define USBSS_VBUS_STAT 0x618
#define USBSS_DEBUG_CFG 0x708
#define USBSS_DEBUG_DATA 0x70c
#define USBSS_HOST_HUB_CTRL 0x714
/* PHY CONFIG register bits */
#define USBSS_PHY_VBUS_SEL_MASK GENMASK(2, 1)
#define USBSS_PHY_VBUS_SEL_SHIFT 1
#define USBSS_PHY_LANE_REVERSE BIT(0)
/* CORE STAT register bits */
#define USBSS_CORE_OPERATIONAL_MODE_MASK GENMASK(13, 12)
#define USBSS_CORE_OPERATIONAL_MODE_SHIFT 12
/* MODE CONTROL register bits */
#define USBSS_MODE_VALID BIT(0)
/* WAKEUP CONFIG register bits */
#define USBSS_WAKEUP_CFG_OVERCURRENT_EN BIT(3)
#define USBSS_WAKEUP_CFG_LINESTATE_EN BIT(2)
#define USBSS_WAKEUP_CFG_SESSVALID_EN BIT(1)
#define USBSS_WAKEUP_CFG_VBUSVALID_EN BIT(0)
#define USBSS_WAKEUP_CFG_ALL (USBSS_WAKEUP_CFG_VBUSVALID_EN | \
USBSS_WAKEUP_CFG_SESSVALID_EN | \
USBSS_WAKEUP_CFG_LINESTATE_EN | \
USBSS_WAKEUP_CFG_OVERCURRENT_EN)
#define USBSS_WAKEUP_CFG_NONE 0
/* WAKEUP STAT register bits */
#define USBSS_WAKEUP_STAT_OVERCURRENT BIT(4)
#define USBSS_WAKEUP_STAT_LINESTATE BIT(3)
#define USBSS_WAKEUP_STAT_SESSVALID BIT(2)
#define USBSS_WAKEUP_STAT_VBUSVALID BIT(1)
#define USBSS_WAKEUP_STAT_CLR BIT(0)
/* IRQ_MISC_STATUS_RAW register bits */
#define USBSS_IRQ_MISC_RAW_VBUSVALID BIT(22)
#define USBSS_IRQ_MISC_RAW_SESSVALID BIT(20)
/* IRQ_MISC_STATUS register bits */
#define USBSS_IRQ_MISC_VBUSVALID BIT(22)
#define USBSS_IRQ_MISC_SESSVALID BIT(20)
/* IRQ_MISC_ENABLE_SET register bits */
#define USBSS_IRQ_MISC_ENABLE_SET_VBUSVALID BIT(22)
#define USBSS_IRQ_MISC_ENABLE_SET_SESSVALID BIT(20)
/* IRQ_MISC_ENABLE_CLR register bits */
#define USBSS_IRQ_MISC_ENABLE_CLR_VBUSVALID BIT(22)
#define USBSS_IRQ_MISC_ENABLE_CLR_SESSVALID BIT(20)
/* IRQ_MISC_EOI register bits */
#define USBSS_IRQ_MISC_EOI_VECTOR BIT(0)
/* VBUS_STAT register bits */
#define USBSS_VBUS_STAT_SESSVALID BIT(2)
#define USBSS_VBUS_STAT_VBUSVALID BIT(0)
/* Mask for PHY PLL REFCLK */
#define PHY_PLL_REFCLK_MASK GENMASK(3, 0)
#define DWC3_AM62_AUTOSUSPEND_DELAY 100
struct dwc3_am62 {
struct device *dev;
void __iomem *usbss;
struct clk *usb2_refclk;
int rate_code;
struct regmap *syscon;
unsigned int offset;
unsigned int vbus_divider;
u32 wakeup_stat;
};
static const int dwc3_ti_rate_table[] = { /* in KHZ */
9600,
10000,
12000,
19200,
20000,
24000,
25000,
26000,
38400,
40000,
58000,
50000,
52000,
};
static inline u32 dwc3_ti_readl(struct dwc3_am62 *am62, u32 offset)
{
return readl((am62->usbss) + offset);
}
static inline void dwc3_ti_writel(struct dwc3_am62 *am62, u32 offset, u32 value)
{
writel(value, (am62->usbss) + offset);
}
static int phy_syscon_pll_refclk(struct dwc3_am62 *am62)
{
struct device *dev = am62->dev;
struct device_node *node = dev->of_node;
struct of_phandle_args args;
struct regmap *syscon;
int ret;
syscon = syscon_regmap_lookup_by_phandle(node, "ti,syscon-phy-pll-refclk");
if (IS_ERR(syscon)) {
dev_err(dev, "unable to get ti,syscon-phy-pll-refclk regmap\n");
return PTR_ERR(syscon);
}
am62->syscon = syscon;
ret = of_parse_phandle_with_fixed_args(node, "ti,syscon-phy-pll-refclk", 1,
0, &args);
if (ret)
return ret;
am62->offset = args.args[0];
ret = regmap_update_bits(am62->syscon, am62->offset, PHY_PLL_REFCLK_MASK, am62->rate_code);
if (ret) {
dev_err(dev, "failed to set phy pll reference clock rate\n");
return ret;
}
return 0;
}
static int dwc3_ti_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = pdev->dev.of_node;
struct dwc3_am62 *am62;
int i, ret;
unsigned long rate;
u32 reg;
am62 = devm_kzalloc(dev, sizeof(*am62), GFP_KERNEL);
if (!am62)
return -ENOMEM;
am62->dev = dev;
platform_set_drvdata(pdev, am62);
am62->usbss = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(am62->usbss)) {
dev_err(dev, "can't map IOMEM resource\n");
return PTR_ERR(am62->usbss);
}
am62->usb2_refclk = devm_clk_get(dev, "ref");
if (IS_ERR(am62->usb2_refclk)) {
dev_err(dev, "can't get usb2_refclk\n");
return PTR_ERR(am62->usb2_refclk);
}
/* Calculate the rate code */
rate = clk_get_rate(am62->usb2_refclk);
rate /= 1000; // To KHz
for (i = 0; i < ARRAY_SIZE(dwc3_ti_rate_table); i++) {
if (dwc3_ti_rate_table[i] == rate)
break;
}
if (i == ARRAY_SIZE(dwc3_ti_rate_table)) {
dev_err(dev, "unsupported usb2_refclk rate: %lu KHz\n", rate);
return -EINVAL;
}
am62->rate_code = i;
/* Read the syscon property and set the rate code */
ret = phy_syscon_pll_refclk(am62);
if (ret)
return ret;
/* VBUS divider select */
am62->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
reg = dwc3_ti_readl(am62, USBSS_PHY_CONFIG);
if (am62->vbus_divider)
reg |= 1 << USBSS_PHY_VBUS_SEL_SHIFT;
dwc3_ti_writel(am62, USBSS_PHY_CONFIG, reg);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
/*
* Don't ignore its dependencies with its children
*/
pm_suspend_ignore_children(dev, false);
clk_prepare_enable(am62->usb2_refclk);
pm_runtime_get_noresume(dev);
ret = of_platform_populate(node, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to create dwc3 core: %d\n", ret);
goto err_pm_disable;
}
/* Set mode valid bit to indicate role is valid */
reg = dwc3_ti_readl(am62, USBSS_MODE_CONTROL);
reg |= USBSS_MODE_VALID;
dwc3_ti_writel(am62, USBSS_MODE_CONTROL, reg);
/* Device has capability to wakeup system from sleep */
device_set_wakeup_capable(dev, true);
ret = device_wakeup_enable(dev);
if (ret)
dev_err(dev, "couldn't enable device as a wakeup source: %d\n", ret);
/* Setting up autosuspend */
pm_runtime_set_autosuspend_delay(dev, DWC3_AM62_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(dev);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
err_pm_disable:
clk_disable_unprepare(am62->usb2_refclk);
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
return ret;
}
static int dwc3_ti_remove_core(struct device *dev, void *c)
{
struct platform_device *pdev = to_platform_device(dev);
platform_device_unregister(pdev);
return 0;
}
static void dwc3_ti_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dwc3_am62 *am62 = platform_get_drvdata(pdev);
u32 reg;
device_for_each_child(dev, NULL, dwc3_ti_remove_core);
/* Clear mode valid bit */
reg = dwc3_ti_readl(am62, USBSS_MODE_CONTROL);
reg &= ~USBSS_MODE_VALID;
dwc3_ti_writel(am62, USBSS_MODE_CONTROL, reg);
pm_runtime_put_sync(dev);
clk_disable_unprepare(am62->usb2_refclk);
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
}
#ifdef CONFIG_PM
static int dwc3_ti_suspend_common(struct device *dev)
{
struct dwc3_am62 *am62 = dev_get_drvdata(dev);
u32 reg, current_prtcap_dir;
if (device_may_wakeup(dev)) {
reg = dwc3_ti_readl(am62, USBSS_CORE_STAT);
current_prtcap_dir = (reg & USBSS_CORE_OPERATIONAL_MODE_MASK)
>> USBSS_CORE_OPERATIONAL_MODE_SHIFT;
/* Set wakeup config enable bits */
reg = dwc3_ti_readl(am62, USBSS_WAKEUP_CONFIG);
if (current_prtcap_dir == DWC3_GCTL_PRTCAP_HOST) {
reg = USBSS_WAKEUP_CFG_LINESTATE_EN | USBSS_WAKEUP_CFG_OVERCURRENT_EN;
} else {
reg = USBSS_WAKEUP_CFG_VBUSVALID_EN | USBSS_WAKEUP_CFG_SESSVALID_EN;
/*
* Enable LINESTATE wake up only if connected to bus
* and in U2/L3 state else it causes spurious wake-up.
*/
}
dwc3_ti_writel(am62, USBSS_WAKEUP_CONFIG, reg);
/* clear wakeup status so we know what caused the wake up */
dwc3_ti_writel(am62, USBSS_WAKEUP_STAT, USBSS_WAKEUP_STAT_CLR);
}
clk_disable_unprepare(am62->usb2_refclk);
return 0;
}
static int dwc3_ti_resume_common(struct device *dev)
{
struct dwc3_am62 *am62 = dev_get_drvdata(dev);
u32 reg;
clk_prepare_enable(am62->usb2_refclk);
if (device_may_wakeup(dev)) {
/* Clear wakeup config enable bits */
dwc3_ti_writel(am62, USBSS_WAKEUP_CONFIG, USBSS_WAKEUP_CFG_NONE);
}
reg = dwc3_ti_readl(am62, USBSS_WAKEUP_STAT);
am62->wakeup_stat = reg;
return 0;
}
static UNIVERSAL_DEV_PM_OPS(dwc3_ti_pm_ops, dwc3_ti_suspend_common,
dwc3_ti_resume_common, NULL);
#define DEV_PM_OPS (&dwc3_ti_pm_ops)
#else
#define DEV_PM_OPS NULL
#endif /* CONFIG_PM */
static const struct of_device_id dwc3_ti_of_match[] = {
{ .compatible = "ti,am62-usb"},
{},
};
MODULE_DEVICE_TABLE(of, dwc3_ti_of_match);
static struct platform_driver dwc3_ti_driver = {
.probe = dwc3_ti_probe,
.remove_new = dwc3_ti_remove,
.driver = {
.name = "dwc3-am62",
.pm = DEV_PM_OPS,
.of_match_table = dwc3_ti_of_match,
},
};
module_platform_driver(dwc3_ti_driver);
MODULE_ALIAS("platform:dwc3-am62");
MODULE_AUTHOR("Aswath Govindraju <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("DesignWare USB3 TI Glue Layer");
| linux-master | drivers/usb/dwc3/dwc3-am62.c |
// SPDX-License-Identifier: GPL-2.0
/*
* DWC3 glue for Cavium Octeon III SOCs.
*
* Copyright (C) 2010-2017 Cavium Networks
* Copyright (C) 2023 RACOM s.r.o.
*/
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
/*
* USB Control Register
*/
#define USBDRD_UCTL_CTL 0x00
/* BIST fast-clear mode select. A BIST run with this bit set
* clears all entries in USBH RAMs to 0x0.
*/
# define USBDRD_UCTL_CTL_CLEAR_BIST BIT_ULL(63)
/* 1 = Start BIST and cleared by hardware */
# define USBDRD_UCTL_CTL_START_BIST BIT_ULL(62)
/* Reference clock select for SuperSpeed and HighSpeed PLLs:
* 0x0 = Both PLLs use DLMC_REF_CLK0 for reference clock
* 0x1 = Both PLLs use DLMC_REF_CLK1 for reference clock
* 0x2 = SuperSpeed PLL uses DLMC_REF_CLK0 for reference clock &
* HighSpeed PLL uses PLL_REF_CLK for reference clck
* 0x3 = SuperSpeed PLL uses DLMC_REF_CLK1 for reference clock &
* HighSpeed PLL uses PLL_REF_CLK for reference clck
*/
# define USBDRD_UCTL_CTL_REF_CLK_SEL GENMASK_ULL(61, 60)
/* 1 = Spread-spectrum clock enable, 0 = SS clock disable */
# define USBDRD_UCTL_CTL_SSC_EN BIT_ULL(59)
/* Spread-spectrum clock modulation range:
* 0x0 = -4980 ppm downspread
* 0x1 = -4492 ppm downspread
* 0x2 = -4003 ppm downspread
* 0x3 - 0x7 = Reserved
*/
# define USBDRD_UCTL_CTL_SSC_RANGE GENMASK_ULL(58, 56)
/* Enable non-standard oscillator frequencies:
* [55:53] = modules -1
* [52:47] = 2's complement push amount, 0 = Feature disabled
*/
# define USBDRD_UCTL_CTL_SSC_REF_CLK_SEL GENMASK_ULL(55, 47)
/* Reference clock multiplier for non-standard frequencies:
* 0x19 = 100MHz on DLMC_REF_CLK* if REF_CLK_SEL = 0x0 or 0x1
* 0x28 = 125MHz on DLMC_REF_CLK* if REF_CLK_SEL = 0x0 or 0x1
* 0x32 = 50MHz on DLMC_REF_CLK* if REF_CLK_SEL = 0x0 or 0x1
* Other Values = Reserved
*/
# define USBDRD_UCTL_CTL_MPLL_MULTIPLIER GENMASK_ULL(46, 40)
/* Enable reference clock to prescaler for SuperSpeed functionality.
* Should always be set to "1"
*/
# define USBDRD_UCTL_CTL_REF_SSP_EN BIT_ULL(39)
/* Divide the reference clock by 2 before entering the
* REF_CLK_FSEL divider:
* If REF_CLK_SEL = 0x0 or 0x1, then only 0x0 is legal
* If REF_CLK_SEL = 0x2 or 0x3, then:
* 0x1 = DLMC_REF_CLK* is 125MHz
* 0x0 = DLMC_REF_CLK* is another supported frequency
*/
# define USBDRD_UCTL_CTL_REF_CLK_DIV2 BIT_ULL(38)
/* Select reference clock freqnuency for both PLL blocks:
* 0x27 = REF_CLK_SEL is 0x0 or 0x1
* 0x07 = REF_CLK_SEL is 0x2 or 0x3
*/
# define USBDRD_UCTL_CTL_REF_CLK_FSEL GENMASK_ULL(37, 32)
/* Controller clock enable. */
# define USBDRD_UCTL_CTL_H_CLK_EN BIT_ULL(30)
/* Select bypass input to controller clock divider:
* 0x0 = Use divided coprocessor clock from H_CLKDIV
* 0x1 = Use clock from GPIO pins
*/
# define USBDRD_UCTL_CTL_H_CLK_BYP_SEL BIT_ULL(29)
/* Reset controller clock divider. */
# define USBDRD_UCTL_CTL_H_CLKDIV_RST BIT_ULL(28)
/* Clock divider select:
* 0x0 = divide by 1
* 0x1 = divide by 2
* 0x2 = divide by 4
* 0x3 = divide by 6
* 0x4 = divide by 8
* 0x5 = divide by 16
* 0x6 = divide by 24
* 0x7 = divide by 32
*/
# define USBDRD_UCTL_CTL_H_CLKDIV_SEL GENMASK_ULL(26, 24)
/* USB3 port permanently attached: 0x0 = No, 0x1 = Yes */
# define USBDRD_UCTL_CTL_USB3_PORT_PERM_ATTACH BIT_ULL(21)
/* USB2 port permanently attached: 0x0 = No, 0x1 = Yes */
# define USBDRD_UCTL_CTL_USB2_PORT_PERM_ATTACH BIT_ULL(20)
/* Disable SuperSpeed PHY: 0x0 = No, 0x1 = Yes */
# define USBDRD_UCTL_CTL_USB3_PORT_DISABLE BIT_ULL(18)
/* Disable HighSpeed PHY: 0x0 = No, 0x1 = Yes */
# define USBDRD_UCTL_CTL_USB2_PORT_DISABLE BIT_ULL(16)
/* Enable PHY SuperSpeed block power: 0x0 = No, 0x1 = Yes */
# define USBDRD_UCTL_CTL_SS_POWER_EN BIT_ULL(14)
/* Enable PHY HighSpeed block power: 0x0 = No, 0x1 = Yes */
# define USBDRD_UCTL_CTL_HS_POWER_EN BIT_ULL(12)
/* Enable USB UCTL interface clock: 0xx = No, 0x1 = Yes */
# define USBDRD_UCTL_CTL_CSCLK_EN BIT_ULL(4)
/* Controller mode: 0x0 = Host, 0x1 = Device */
# define USBDRD_UCTL_CTL_DRD_MODE BIT_ULL(3)
/* PHY reset */
# define USBDRD_UCTL_CTL_UPHY_RST BIT_ULL(2)
/* Software reset UAHC */
# define USBDRD_UCTL_CTL_UAHC_RST BIT_ULL(1)
/* Software resets UCTL */
# define USBDRD_UCTL_CTL_UCTL_RST BIT_ULL(0)
#define USBDRD_UCTL_BIST_STATUS 0x08
#define USBDRD_UCTL_SPARE0 0x10
#define USBDRD_UCTL_INTSTAT 0x30
#define USBDRD_UCTL_PORT_CFG_HS(port) (0x40 + (0x20 * port))
#define USBDRD_UCTL_PORT_CFG_SS(port) (0x48 + (0x20 * port))
#define USBDRD_UCTL_PORT_CR_DBG_CFG(port) (0x50 + (0x20 * port))
#define USBDRD_UCTL_PORT_CR_DBG_STATUS(port) (0x58 + (0x20 * port))
/*
* UCTL Configuration Register
*/
#define USBDRD_UCTL_HOST_CFG 0xe0
/* Indicates minimum value of all received BELT values */
# define USBDRD_UCTL_HOST_CFG_HOST_CURRENT_BELT GENMASK_ULL(59, 48)
/* HS jitter adjustment */
# define USBDRD_UCTL_HOST_CFG_FLA GENMASK_ULL(37, 32)
/* Bus-master enable: 0x0 = Disabled (stall DMAs), 0x1 = enabled */
# define USBDRD_UCTL_HOST_CFG_BME BIT_ULL(28)
/* Overcurrent protection enable: 0x0 = unavailable, 0x1 = available */
# define USBDRD_UCTL_HOST_OCI_EN BIT_ULL(27)
/* Overcurrent sene selection:
* 0x0 = Overcurrent indication from off-chip is active-low
* 0x1 = Overcurrent indication from off-chip is active-high
*/
# define USBDRD_UCTL_HOST_OCI_ACTIVE_HIGH_EN BIT_ULL(26)
/* Port power control enable: 0x0 = unavailable, 0x1 = available */
# define USBDRD_UCTL_HOST_PPC_EN BIT_ULL(25)
/* Port power control sense selection:
* 0x0 = Port power to off-chip is active-low
* 0x1 = Port power to off-chip is active-high
*/
# define USBDRD_UCTL_HOST_PPC_ACTIVE_HIGH_EN BIT_ULL(24)
/*
* UCTL Shim Features Register
*/
#define USBDRD_UCTL_SHIM_CFG 0xe8
/* Out-of-bound UAHC register access: 0 = read, 1 = write */
# define USBDRD_UCTL_SHIM_CFG_XS_NCB_OOB_WRN BIT_ULL(63)
/* SRCID error log for out-of-bound UAHC register access:
* [59:58] = chipID
* [57] = Request source: 0 = core, 1 = NCB-device
* [56:51] = Core/NCB-device number, [56] always 0 for NCB devices
* [50:48] = SubID
*/
# define USBDRD_UCTL_SHIM_CFG_XS_NCB_OOB_OSRC GENMASK_ULL(59, 48)
/* Error log for bad UAHC DMA access: 0 = Read log, 1 = Write log */
# define USBDRD_UCTL_SHIM_CFG_XM_BAD_DMA_WRN BIT_ULL(47)
/* Encoded error type for bad UAHC DMA */
# define USBDRD_UCTL_SHIM_CFG_XM_BAD_DMA_TYPE GENMASK_ULL(43, 40)
/* Select the IOI read command used by DMA accesses */
# define USBDRD_UCTL_SHIM_CFG_DMA_READ_CMD BIT_ULL(12)
/* Select endian format for DMA accesses to the L2C:
* 0x0 = Little endian
* 0x1 = Big endian
* 0x2 = Reserved
* 0x3 = Reserved
*/
# define USBDRD_UCTL_SHIM_CFG_DMA_ENDIAN_MODE GENMASK_ULL(9, 8)
/* Select endian format for IOI CSR access to UAHC:
* 0x0 = Little endian
* 0x1 = Big endian
* 0x2 = Reserved
* 0x3 = Reserved
*/
# define USBDRD_UCTL_SHIM_CFG_CSR_ENDIAN_MODE GENMASK_ULL(1, 0)
#define USBDRD_UCTL_ECC 0xf0
#define USBDRD_UCTL_SPARE1 0xf8
struct dwc3_octeon {
struct device *dev;
void __iomem *base;
};
#define DWC3_GPIO_POWER_NONE (-1)
#ifdef CONFIG_CAVIUM_OCTEON_SOC
#include <asm/octeon/octeon.h>
static inline uint64_t dwc3_octeon_readq(void __iomem *addr)
{
return cvmx_readq_csr(addr);
}
static inline void dwc3_octeon_writeq(void __iomem *base, uint64_t val)
{
cvmx_writeq_csr(base, val);
}
static void dwc3_octeon_config_gpio(int index, int gpio)
{
union cvmx_gpio_bit_cfgx gpio_bit;
if ((OCTEON_IS_MODEL(OCTEON_CN73XX) ||
OCTEON_IS_MODEL(OCTEON_CNF75XX))
&& gpio <= 31) {
gpio_bit.u64 = cvmx_read_csr(CVMX_GPIO_BIT_CFGX(gpio));
gpio_bit.s.tx_oe = 1;
gpio_bit.s.output_sel = (index == 0 ? 0x14 : 0x15);
cvmx_write_csr(CVMX_GPIO_BIT_CFGX(gpio), gpio_bit.u64);
} else if (gpio <= 15) {
gpio_bit.u64 = cvmx_read_csr(CVMX_GPIO_BIT_CFGX(gpio));
gpio_bit.s.tx_oe = 1;
gpio_bit.s.output_sel = (index == 0 ? 0x14 : 0x19);
cvmx_write_csr(CVMX_GPIO_BIT_CFGX(gpio), gpio_bit.u64);
} else {
gpio_bit.u64 = cvmx_read_csr(CVMX_GPIO_XBIT_CFGX(gpio));
gpio_bit.s.tx_oe = 1;
gpio_bit.s.output_sel = (index == 0 ? 0x14 : 0x19);
cvmx_write_csr(CVMX_GPIO_XBIT_CFGX(gpio), gpio_bit.u64);
}
}
#else
static inline uint64_t dwc3_octeon_readq(void __iomem *addr)
{
return 0;
}
static inline void dwc3_octeon_writeq(void __iomem *base, uint64_t val) { }
static inline void dwc3_octeon_config_gpio(int index, int gpio) { }
static uint64_t octeon_get_io_clock_rate(void)
{
return 150000000;
}
#endif
static int dwc3_octeon_get_divider(void)
{
static const uint8_t clk_div[] = { 1, 2, 4, 6, 8, 16, 24, 32 };
int div = 0;
while (div < ARRAY_SIZE(clk_div)) {
uint64_t rate = octeon_get_io_clock_rate() / clk_div[div];
if (rate <= 300000000 && rate >= 150000000)
return div;
div++;
}
return -EINVAL;
}
static int dwc3_octeon_setup(struct dwc3_octeon *octeon,
int ref_clk_sel, int ref_clk_fsel, int mpll_mul,
int power_gpio, int power_active_low)
{
u64 val;
int div;
struct device *dev = octeon->dev;
void __iomem *uctl_ctl_reg = octeon->base + USBDRD_UCTL_CTL;
void __iomem *uctl_host_cfg_reg = octeon->base + USBDRD_UCTL_HOST_CFG;
/*
* Step 1: Wait for all voltages to be stable...that surely
* happened before starting the kernel. SKIP
*/
/* Step 2: Select GPIO for overcurrent indication, if desired. SKIP */
/* Step 3: Assert all resets. */
val = dwc3_octeon_readq(uctl_ctl_reg);
val |= USBDRD_UCTL_CTL_UPHY_RST |
USBDRD_UCTL_CTL_UAHC_RST |
USBDRD_UCTL_CTL_UCTL_RST;
dwc3_octeon_writeq(uctl_ctl_reg, val);
/* Step 4a: Reset the clock dividers. */
val = dwc3_octeon_readq(uctl_ctl_reg);
val |= USBDRD_UCTL_CTL_H_CLKDIV_RST;
dwc3_octeon_writeq(uctl_ctl_reg, val);
/* Step 4b: Select controller clock frequency. */
div = dwc3_octeon_get_divider();
if (div < 0) {
dev_err(dev, "clock divider invalid\n");
return div;
}
val = dwc3_octeon_readq(uctl_ctl_reg);
val &= ~USBDRD_UCTL_CTL_H_CLKDIV_SEL;
val |= FIELD_PREP(USBDRD_UCTL_CTL_H_CLKDIV_SEL, div);
val |= USBDRD_UCTL_CTL_H_CLK_EN;
dwc3_octeon_writeq(uctl_ctl_reg, val);
val = dwc3_octeon_readq(uctl_ctl_reg);
if ((div != FIELD_GET(USBDRD_UCTL_CTL_H_CLKDIV_SEL, val)) ||
(!(FIELD_GET(USBDRD_UCTL_CTL_H_CLK_EN, val)))) {
dev_err(dev, "clock init failure (UCTL_CTL=%016llx)\n", val);
return -EINVAL;
}
/* Step 4c: Deassert the controller clock divider reset. */
val &= ~USBDRD_UCTL_CTL_H_CLKDIV_RST;
dwc3_octeon_writeq(uctl_ctl_reg, val);
/* Step 5a: Reference clock configuration. */
val = dwc3_octeon_readq(uctl_ctl_reg);
val &= ~USBDRD_UCTL_CTL_REF_CLK_DIV2;
val &= ~USBDRD_UCTL_CTL_REF_CLK_SEL;
val |= FIELD_PREP(USBDRD_UCTL_CTL_REF_CLK_SEL, ref_clk_sel);
val &= ~USBDRD_UCTL_CTL_REF_CLK_FSEL;
val |= FIELD_PREP(USBDRD_UCTL_CTL_REF_CLK_FSEL, ref_clk_fsel);
val &= ~USBDRD_UCTL_CTL_MPLL_MULTIPLIER;
val |= FIELD_PREP(USBDRD_UCTL_CTL_MPLL_MULTIPLIER, mpll_mul);
/* Step 5b: Configure and enable spread-spectrum for SuperSpeed. */
val |= USBDRD_UCTL_CTL_SSC_EN;
/* Step 5c: Enable SuperSpeed. */
val |= USBDRD_UCTL_CTL_REF_SSP_EN;
/* Step 5d: Configure PHYs. SKIP */
/* Step 6a & 6b: Power up PHYs. */
val |= USBDRD_UCTL_CTL_HS_POWER_EN;
val |= USBDRD_UCTL_CTL_SS_POWER_EN;
dwc3_octeon_writeq(uctl_ctl_reg, val);
/* Step 7: Wait 10 controller-clock cycles to take effect. */
udelay(10);
/* Step 8a: Deassert UCTL reset signal. */
val = dwc3_octeon_readq(uctl_ctl_reg);
val &= ~USBDRD_UCTL_CTL_UCTL_RST;
dwc3_octeon_writeq(uctl_ctl_reg, val);
/* Step 8b: Wait 10 controller-clock cycles. */
udelay(10);
/* Step 8c: Setup power control. */
val = dwc3_octeon_readq(uctl_host_cfg_reg);
val |= USBDRD_UCTL_HOST_PPC_EN;
if (power_gpio == DWC3_GPIO_POWER_NONE) {
val &= ~USBDRD_UCTL_HOST_PPC_EN;
} else {
val |= USBDRD_UCTL_HOST_PPC_EN;
dwc3_octeon_config_gpio(((__force uintptr_t)octeon->base >> 24) & 1,
power_gpio);
dev_dbg(dev, "power control is using gpio%d\n", power_gpio);
}
if (power_active_low)
val &= ~USBDRD_UCTL_HOST_PPC_ACTIVE_HIGH_EN;
else
val |= USBDRD_UCTL_HOST_PPC_ACTIVE_HIGH_EN;
dwc3_octeon_writeq(uctl_host_cfg_reg, val);
/* Step 8d: Deassert UAHC reset signal. */
val = dwc3_octeon_readq(uctl_ctl_reg);
val &= ~USBDRD_UCTL_CTL_UAHC_RST;
dwc3_octeon_writeq(uctl_ctl_reg, val);
/* Step 8e: Wait 10 controller-clock cycles. */
udelay(10);
/* Step 9: Enable conditional coprocessor clock of UCTL. */
val = dwc3_octeon_readq(uctl_ctl_reg);
val |= USBDRD_UCTL_CTL_CSCLK_EN;
dwc3_octeon_writeq(uctl_ctl_reg, val);
/*Step 10: Set for host mode only. */
val = dwc3_octeon_readq(uctl_ctl_reg);
val &= ~USBDRD_UCTL_CTL_DRD_MODE;
dwc3_octeon_writeq(uctl_ctl_reg, val);
return 0;
}
static void dwc3_octeon_set_endian_mode(struct dwc3_octeon *octeon)
{
u64 val;
void __iomem *uctl_shim_cfg_reg = octeon->base + USBDRD_UCTL_SHIM_CFG;
val = dwc3_octeon_readq(uctl_shim_cfg_reg);
val &= ~USBDRD_UCTL_SHIM_CFG_DMA_ENDIAN_MODE;
val &= ~USBDRD_UCTL_SHIM_CFG_CSR_ENDIAN_MODE;
#ifdef __BIG_ENDIAN
val |= FIELD_PREP(USBDRD_UCTL_SHIM_CFG_DMA_ENDIAN_MODE, 1);
val |= FIELD_PREP(USBDRD_UCTL_SHIM_CFG_CSR_ENDIAN_MODE, 1);
#endif
dwc3_octeon_writeq(uctl_shim_cfg_reg, val);
}
static void dwc3_octeon_phy_reset(struct dwc3_octeon *octeon)
{
u64 val;
void __iomem *uctl_ctl_reg = octeon->base + USBDRD_UCTL_CTL;
val = dwc3_octeon_readq(uctl_ctl_reg);
val &= ~USBDRD_UCTL_CTL_UPHY_RST;
dwc3_octeon_writeq(uctl_ctl_reg, val);
}
static int dwc3_octeon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct dwc3_octeon *octeon;
const char *hs_clock_type, *ss_clock_type;
int ref_clk_sel, ref_clk_fsel, mpll_mul;
int power_active_low, power_gpio;
int err, len;
u32 clock_rate;
if (of_property_read_u32(node, "refclk-frequency", &clock_rate)) {
dev_err(dev, "No UCTL \"refclk-frequency\"\n");
return -EINVAL;
}
if (of_property_read_string(node, "refclk-type-ss", &ss_clock_type)) {
dev_err(dev, "No UCTL \"refclk-type-ss\"\n");
return -EINVAL;
}
if (of_property_read_string(node, "refclk-type-hs", &hs_clock_type)) {
dev_err(dev, "No UCTL \"refclk-type-hs\"\n");
return -EINVAL;
}
ref_clk_sel = 2;
if (strcmp("dlmc_ref_clk0", ss_clock_type) == 0) {
if (strcmp(hs_clock_type, "dlmc_ref_clk0") == 0)
ref_clk_sel = 0;
else if (strcmp(hs_clock_type, "pll_ref_clk"))
dev_warn(dev, "Invalid HS clock type %s, using pll_ref_clk instead\n",
hs_clock_type);
} else if (strcmp(ss_clock_type, "dlmc_ref_clk1") == 0) {
if (strcmp(hs_clock_type, "dlmc_ref_clk1") == 0) {
ref_clk_sel = 1;
} else {
ref_clk_sel = 3;
if (strcmp(hs_clock_type, "pll_ref_clk"))
dev_warn(dev, "Invalid HS clock type %s, using pll_ref_clk instead\n",
hs_clock_type);
}
} else {
dev_warn(dev, "Invalid SS clock type %s, using dlmc_ref_clk0 instead\n",
ss_clock_type);
}
ref_clk_fsel = 0x07;
switch (clock_rate) {
default:
dev_warn(dev, "Invalid ref_clk %u, using 100000000 instead\n",
clock_rate);
fallthrough;
case 100000000:
mpll_mul = 0x19;
if (ref_clk_sel < 2)
ref_clk_fsel = 0x27;
break;
case 50000000:
mpll_mul = 0x32;
break;
case 125000000:
mpll_mul = 0x28;
break;
}
power_gpio = DWC3_GPIO_POWER_NONE;
power_active_low = 0;
if (of_find_property(node, "power", &len)) {
u32 gpio_pwr[3];
switch (len) {
case 8:
of_property_read_u32_array(node, "power", gpio_pwr, 2);
break;
case 12:
of_property_read_u32_array(node, "power", gpio_pwr, 3);
power_active_low = gpio_pwr[2] & 0x01;
break;
default:
dev_err(dev, "invalid power configuration\n");
return -EINVAL;
}
power_gpio = gpio_pwr[1];
}
octeon = devm_kzalloc(dev, sizeof(*octeon), GFP_KERNEL);
if (!octeon)
return -ENOMEM;
octeon->dev = dev;
octeon->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(octeon->base))
return PTR_ERR(octeon->base);
err = dwc3_octeon_setup(octeon, ref_clk_sel, ref_clk_fsel, mpll_mul,
power_gpio, power_active_low);
if (err)
return err;
dwc3_octeon_set_endian_mode(octeon);
dwc3_octeon_phy_reset(octeon);
platform_set_drvdata(pdev, octeon);
return of_platform_populate(node, NULL, NULL, dev);
}
static void dwc3_octeon_remove(struct platform_device *pdev)
{
struct dwc3_octeon *octeon = platform_get_drvdata(pdev);
of_platform_depopulate(octeon->dev);
}
static const struct of_device_id dwc3_octeon_of_match[] = {
{ .compatible = "cavium,octeon-7130-usb-uctl" },
{ },
};
MODULE_DEVICE_TABLE(of, dwc3_octeon_of_match);
static struct platform_driver dwc3_octeon_driver = {
.probe = dwc3_octeon_probe,
.remove_new = dwc3_octeon_remove,
.driver = {
.name = "dwc3-octeon",
.of_match_table = dwc3_octeon_of_match,
},
};
module_platform_driver(dwc3_octeon_driver);
MODULE_ALIAS("platform:dwc3-octeon");
MODULE_AUTHOR("Ladislav Michl <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("DesignWare USB3 OCTEON III Glue Layer");
| linux-master | drivers/usb/dwc3/dwc3-octeon.c |
// SPDX-License-Identifier: GPL-2.0
/*
* dwc3-pci.c - PCI Specific glue layer
*
* Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
*
* Authors: Felipe Balbi <[email protected]>,
* Sebastian Andrzej Siewior <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/workqueue.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/machine.h>
#include <linux/acpi.h>
#include <linux/delay.h>
#define PCI_DEVICE_ID_INTEL_BYT 0x0f37
#define PCI_DEVICE_ID_INTEL_MRFLD 0x119e
#define PCI_DEVICE_ID_INTEL_BSW 0x22b7
#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30
#define PCI_DEVICE_ID_INTEL_SPTH 0xa130
#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa
#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa
#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
#define PCI_DEVICE_ID_INTEL_CMLLP 0x02ee
#define PCI_DEVICE_ID_INTEL_CMLH 0x06ee
#define PCI_DEVICE_ID_INTEL_GLK 0x31aa
#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee
#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
#define PCI_DEVICE_ID_INTEL_CNPV 0xa3b0
#define PCI_DEVICE_ID_INTEL_ICLLP 0x34ee
#define PCI_DEVICE_ID_INTEL_EHL 0x4b7e
#define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee
#define PCI_DEVICE_ID_INTEL_TGPH 0x43ee
#define PCI_DEVICE_ID_INTEL_JSP 0x4dee
#define PCI_DEVICE_ID_INTEL_ADL 0x460e
#define PCI_DEVICE_ID_INTEL_ADL_PCH 0x51ee
#define PCI_DEVICE_ID_INTEL_ADLN 0x465e
#define PCI_DEVICE_ID_INTEL_ADLN_PCH 0x54ee
#define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1
#define PCI_DEVICE_ID_INTEL_RPL 0xa70e
#define PCI_DEVICE_ID_INTEL_RPLS 0x7a61
#define PCI_DEVICE_ID_INTEL_MTLM 0x7eb1
#define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1
#define PCI_DEVICE_ID_INTEL_MTLS 0x7f6f
#define PCI_DEVICE_ID_INTEL_MTL 0x7e7e
#define PCI_DEVICE_ID_INTEL_TGL 0x9a15
#define PCI_DEVICE_ID_AMD_MR 0x163a
#define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
#define PCI_INTEL_BXT_FUNC_PMU_PWR 4
#define PCI_INTEL_BXT_STATE_D0 0
#define PCI_INTEL_BXT_STATE_D3 3
#define GP_RWBAR 1
#define GP_RWREG1 0xa0
#define GP_RWREG1_ULPI_REFCLK_DISABLE (1 << 17)
/**
* struct dwc3_pci - Driver private structure
* @dwc3: child dwc3 platform_device
* @pci: our link to PCI bus
* @guid: _DSM GUID
* @has_dsm_for_pm: true for devices which need to run _DSM on runtime PM
* @wakeup_work: work for asynchronous resume
*/
struct dwc3_pci {
struct platform_device *dwc3;
struct pci_dev *pci;
guid_t guid;
unsigned int has_dsm_for_pm:1;
struct work_struct wakeup_work;
};
static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
static const struct acpi_gpio_params cs_gpios = { 1, 0, false };
static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = {
{ "reset-gpios", &reset_gpios, 1 },
{ "cs-gpios", &cs_gpios, 1 },
{ },
};
static struct gpiod_lookup_table platform_bytcr_gpios = {
.dev_id = "0000:00:16.0",
.table = {
GPIO_LOOKUP("INT33FC:00", 54, "cs", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("INT33FC:02", 14, "reset", GPIO_ACTIVE_HIGH),
{}
},
};
static int dwc3_byt_enable_ulpi_refclock(struct pci_dev *pci)
{
void __iomem *reg;
u32 value;
reg = pcim_iomap(pci, GP_RWBAR, 0);
if (!reg)
return -ENOMEM;
value = readl(reg + GP_RWREG1);
if (!(value & GP_RWREG1_ULPI_REFCLK_DISABLE))
goto unmap; /* ULPI refclk already enabled */
value &= ~GP_RWREG1_ULPI_REFCLK_DISABLE;
writel(value, reg + GP_RWREG1);
/* This comes from the Intel Android x86 tree w/o any explanation */
msleep(100);
unmap:
pcim_iounmap(pci, reg);
return 0;
}
static const struct property_entry dwc3_pci_intel_properties[] = {
PROPERTY_ENTRY_STRING("dr_mode", "peripheral"),
PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{}
};
static const struct property_entry dwc3_pci_intel_phy_charger_detect_properties[] = {
PROPERTY_ENTRY_STRING("dr_mode", "peripheral"),
PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
PROPERTY_ENTRY_BOOL("linux,phy_charger_detect"),
PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{}
};
static const struct property_entry dwc3_pci_intel_byt_properties[] = {
PROPERTY_ENTRY_STRING("dr_mode", "peripheral"),
PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{}
};
static const struct property_entry dwc3_pci_mrfld_properties[] = {
PROPERTY_ENTRY_STRING("dr_mode", "otg"),
PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
PROPERTY_ENTRY_BOOL("snps,usb2-gadget-lpm-disable"),
PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{}
};
static const struct property_entry dwc3_pci_amd_properties[] = {
PROPERTY_ENTRY_BOOL("snps,has-lpm-erratum"),
PROPERTY_ENTRY_U8("snps,lpm-nyet-threshold", 0xf),
PROPERTY_ENTRY_BOOL("snps,u2exit_lfps_quirk"),
PROPERTY_ENTRY_BOOL("snps,u2ss_inp3_quirk"),
PROPERTY_ENTRY_BOOL("snps,req_p1p2p3_quirk"),
PROPERTY_ENTRY_BOOL("snps,del_p1p2p3_quirk"),
PROPERTY_ENTRY_BOOL("snps,del_phy_power_chg_quirk"),
PROPERTY_ENTRY_BOOL("snps,lfps_filter_quirk"),
PROPERTY_ENTRY_BOOL("snps,rx_detect_poll_quirk"),
PROPERTY_ENTRY_BOOL("snps,tx_de_emphasis_quirk"),
PROPERTY_ENTRY_U8("snps,tx_de_emphasis", 1),
/* FIXME these quirks should be removed when AMD NL tapes out */
PROPERTY_ENTRY_BOOL("snps,disable_scramble_quirk"),
PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{}
};
static const struct property_entry dwc3_pci_mr_properties[] = {
PROPERTY_ENTRY_STRING("dr_mode", "otg"),
PROPERTY_ENTRY_BOOL("usb-role-switch"),
PROPERTY_ENTRY_STRING("role-switch-default-mode", "host"),
PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{}
};
static const struct software_node dwc3_pci_intel_swnode = {
.properties = dwc3_pci_intel_properties,
};
static const struct software_node dwc3_pci_intel_phy_charger_detect_swnode = {
.properties = dwc3_pci_intel_phy_charger_detect_properties,
};
static const struct software_node dwc3_pci_intel_byt_swnode = {
.properties = dwc3_pci_intel_byt_properties,
};
static const struct software_node dwc3_pci_intel_mrfld_swnode = {
.properties = dwc3_pci_mrfld_properties,
};
static const struct software_node dwc3_pci_amd_swnode = {
.properties = dwc3_pci_amd_properties,
};
static const struct software_node dwc3_pci_amd_mr_swnode = {
.properties = dwc3_pci_mr_properties,
};
static int dwc3_pci_quirks(struct dwc3_pci *dwc,
const struct software_node *swnode)
{
struct pci_dev *pdev = dwc->pci;
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
if (pdev->device == PCI_DEVICE_ID_INTEL_BXT ||
pdev->device == PCI_DEVICE_ID_INTEL_BXT_M ||
pdev->device == PCI_DEVICE_ID_INTEL_EHL) {
guid_parse(PCI_INTEL_BXT_DSM_GUID, &dwc->guid);
dwc->has_dsm_for_pm = true;
}
if (pdev->device == PCI_DEVICE_ID_INTEL_BYT) {
struct gpio_desc *gpio;
int ret;
/* On BYT the FW does not always enable the refclock */
ret = dwc3_byt_enable_ulpi_refclock(pdev);
if (ret)
return ret;
ret = devm_acpi_dev_add_driver_gpios(&pdev->dev,
acpi_dwc3_byt_gpios);
if (ret)
dev_dbg(&pdev->dev, "failed to add mapping table\n");
/*
* A lot of BYT devices lack ACPI resource entries for
* the GPIOs. If the ACPI entry for the GPIO controller
* is present add a fallback mapping to the reference
* design GPIOs which all boards seem to use.
*/
if (acpi_dev_present("INT33FC", NULL, -1))
gpiod_add_lookup_table(&platform_bytcr_gpios);
/*
* These GPIOs will turn on the USB2 PHY. Note that we have to
* put the gpio descriptors again here because the phy driver
* might want to grab them, too.
*/
gpio = gpiod_get_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
gpiod_set_value_cansleep(gpio, 1);
gpiod_put(gpio);
gpio = gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
if (gpio) {
gpiod_set_value_cansleep(gpio, 1);
gpiod_put(gpio);
usleep_range(10000, 11000);
}
/*
* Make the pdev name predictable (only 1 DWC3 on BYT)
* and patch the phy dev-name into the lookup table so
* that the phy-driver can get the GPIOs.
*/
dwc->dwc3->id = PLATFORM_DEVID_NONE;
platform_bytcr_gpios.dev_id = "dwc3.ulpi";
/*
* Some Android tablets with a Crystal Cove PMIC
* (INT33FD), rely on the TUSB1211 phy for charger
* detection. These can be identified by them _not_
* using the standard ACPI battery and ac drivers.
*/
if (acpi_dev_present("INT33FD", "1", 2) &&
acpi_quirk_skip_acpi_ac_and_battery()) {
dev_info(&pdev->dev, "Using TUSB1211 phy for charger detection\n");
swnode = &dwc3_pci_intel_phy_charger_detect_swnode;
}
}
}
return device_add_software_node(&dwc->dwc3->dev, swnode);
}
#ifdef CONFIG_PM
static void dwc3_pci_resume_work(struct work_struct *work)
{
struct dwc3_pci *dwc = container_of(work, struct dwc3_pci, wakeup_work);
struct platform_device *dwc3 = dwc->dwc3;
int ret;
ret = pm_runtime_get_sync(&dwc3->dev);
if (ret < 0) {
pm_runtime_put_sync_autosuspend(&dwc3->dev);
return;
}
pm_runtime_mark_last_busy(&dwc3->dev);
pm_runtime_put_sync_autosuspend(&dwc3->dev);
}
#endif
static int dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
struct dwc3_pci *dwc;
struct resource res[2];
int ret;
struct device *dev = &pci->dev;
ret = pcim_enable_device(pci);
if (ret) {
dev_err(dev, "failed to enable pci device\n");
return -ENODEV;
}
pci_set_master(pci);
dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
if (!dwc)
return -ENOMEM;
dwc->dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO);
if (!dwc->dwc3)
return -ENOMEM;
memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
res[0].start = pci_resource_start(pci, 0);
res[0].end = pci_resource_end(pci, 0);
res[0].name = "dwc_usb3";
res[0].flags = IORESOURCE_MEM;
res[1].start = pci->irq;
res[1].name = "dwc_usb3";
res[1].flags = IORESOURCE_IRQ;
ret = platform_device_add_resources(dwc->dwc3, res, ARRAY_SIZE(res));
if (ret) {
dev_err(dev, "couldn't add resources to dwc3 device\n");
goto err;
}
dwc->pci = pci;
dwc->dwc3->dev.parent = dev;
ACPI_COMPANION_SET(&dwc->dwc3->dev, ACPI_COMPANION(dev));
ret = dwc3_pci_quirks(dwc, (void *)id->driver_data);
if (ret)
goto err;
ret = platform_device_add(dwc->dwc3);
if (ret) {
dev_err(dev, "failed to register dwc3 device\n");
goto err;
}
device_init_wakeup(dev, true);
pci_set_drvdata(pci, dwc);
pm_runtime_put(dev);
#ifdef CONFIG_PM
INIT_WORK(&dwc->wakeup_work, dwc3_pci_resume_work);
#endif
return 0;
err:
device_remove_software_node(&dwc->dwc3->dev);
platform_device_put(dwc->dwc3);
return ret;
}
static void dwc3_pci_remove(struct pci_dev *pci)
{
struct dwc3_pci *dwc = pci_get_drvdata(pci);
struct pci_dev *pdev = dwc->pci;
if (pdev->device == PCI_DEVICE_ID_INTEL_BYT)
gpiod_remove_lookup_table(&platform_bytcr_gpios);
#ifdef CONFIG_PM
cancel_work_sync(&dwc->wakeup_work);
#endif
device_init_wakeup(&pci->dev, false);
pm_runtime_get(&pci->dev);
device_remove_software_node(&dwc->dwc3->dev);
platform_device_unregister(dwc->dwc3);
}
static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, BSW, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, BYT, &dwc3_pci_intel_byt_swnode) },
{ PCI_DEVICE_DATA(INTEL, MRFLD, &dwc3_pci_intel_mrfld_swnode) },
{ PCI_DEVICE_DATA(INTEL, CMLLP, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, CMLH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, SPTLP, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, SPTH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, BXT, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, BXT_M, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, APL, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, KBP, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, GLK, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, CNPLP, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, CNPH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, CNPV, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, ICLLP, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, EHL, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, TGPLP, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, TGPH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, JSP, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, ADL, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, ADL_PCH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, ADLN, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, ADLN_PCH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, ADLS, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, RPL, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, RPLS, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, MTLM, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, MTLP, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, MTL, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, MTLS, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, TGL, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(AMD, NL_USB, &dwc3_pci_amd_swnode) },
{ PCI_DEVICE_DATA(AMD, MR, &dwc3_pci_amd_mr_swnode) },
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
#if defined(CONFIG_PM) || defined(CONFIG_PM_SLEEP)
static int dwc3_pci_dsm(struct dwc3_pci *dwc, int param)
{
union acpi_object *obj;
union acpi_object tmp;
union acpi_object argv4 = ACPI_INIT_DSM_ARGV4(1, &tmp);
if (!dwc->has_dsm_for_pm)
return 0;
tmp.type = ACPI_TYPE_INTEGER;
tmp.integer.value = param;
obj = acpi_evaluate_dsm(ACPI_HANDLE(&dwc->pci->dev), &dwc->guid,
1, PCI_INTEL_BXT_FUNC_PMU_PWR, &argv4);
if (!obj) {
dev_err(&dwc->pci->dev, "failed to evaluate _DSM\n");
return -EIO;
}
ACPI_FREE(obj);
return 0;
}
#endif /* CONFIG_PM || CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
static int dwc3_pci_runtime_suspend(struct device *dev)
{
struct dwc3_pci *dwc = dev_get_drvdata(dev);
if (device_can_wakeup(dev))
return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D3);
return -EBUSY;
}
static int dwc3_pci_runtime_resume(struct device *dev)
{
struct dwc3_pci *dwc = dev_get_drvdata(dev);
int ret;
ret = dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D0);
if (ret)
return ret;
queue_work(pm_wq, &dwc->wakeup_work);
return 0;
}
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
static int dwc3_pci_suspend(struct device *dev)
{
struct dwc3_pci *dwc = dev_get_drvdata(dev);
return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D3);
}
static int dwc3_pci_resume(struct device *dev)
{
struct dwc3_pci *dwc = dev_get_drvdata(dev);
return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D0);
}
#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops dwc3_pci_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_suspend, dwc3_pci_resume)
SET_RUNTIME_PM_OPS(dwc3_pci_runtime_suspend, dwc3_pci_runtime_resume,
NULL)
};
static struct pci_driver dwc3_pci_driver = {
.name = "dwc3-pci",
.id_table = dwc3_pci_id_table,
.probe = dwc3_pci_probe,
.remove = dwc3_pci_remove,
.driver = {
.pm = &dwc3_pci_dev_pm_ops,
}
};
MODULE_AUTHOR("Felipe Balbi <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DesignWare USB3 PCI Glue Layer");
module_pci_driver(dwc3_pci_driver);
| linux-master | drivers/usb/dwc3/dwc3-pci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* core.c - DesignWare USB3 DRD Controller Core file
*
* Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
*
* Authors: Felipe Balbi <[email protected]>,
* Sebastian Andrzej Siewior <[email protected]>
*/
#include <linux/clk.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/acpi.h>
#include <linux/pinctrl/consumer.h>
#include <linux/reset.h>
#include <linux/bitfield.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/of.h>
#include <linux/usb/otg.h>
#include "core.h"
#include "gadget.h"
#include "io.h"
#include "debug.h"
#define DWC3_DEFAULT_AUTOSUSPEND_DELAY 5000 /* ms */
/**
* dwc3_get_dr_mode - Validates and sets dr_mode
* @dwc: pointer to our context structure
*/
static int dwc3_get_dr_mode(struct dwc3 *dwc)
{
enum usb_dr_mode mode;
struct device *dev = dwc->dev;
unsigned int hw_mode;
if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
dwc->dr_mode = USB_DR_MODE_OTG;
mode = dwc->dr_mode;
hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
switch (hw_mode) {
case DWC3_GHWPARAMS0_MODE_GADGET:
if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) {
dev_err(dev,
"Controller does not support host mode.\n");
return -EINVAL;
}
mode = USB_DR_MODE_PERIPHERAL;
break;
case DWC3_GHWPARAMS0_MODE_HOST:
if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) {
dev_err(dev,
"Controller does not support device mode.\n");
return -EINVAL;
}
mode = USB_DR_MODE_HOST;
break;
default:
if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
mode = USB_DR_MODE_HOST;
else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
mode = USB_DR_MODE_PERIPHERAL;
/*
* DWC_usb31 and DWC_usb3 v3.30a and higher do not support OTG
* mode. If the controller supports DRD but the dr_mode is not
* specified or set to OTG, then set the mode to peripheral.
*/
if (mode == USB_DR_MODE_OTG && !dwc->edev &&
(!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) ||
!device_property_read_bool(dwc->dev, "usb-role-switch")) &&
!DWC3_VER_IS_PRIOR(DWC3, 330A))
mode = USB_DR_MODE_PERIPHERAL;
}
if (mode != dwc->dr_mode) {
dev_warn(dev,
"Configuration mismatch. dr_mode forced to %s\n",
mode == USB_DR_MODE_HOST ? "host" : "gadget");
dwc->dr_mode = mode;
}
return 0;
}
void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
{
u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
reg |= DWC3_GCTL_PRTCAPDIR(mode);
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
dwc->current_dr_role = mode;
}
static void __dwc3_set_mode(struct work_struct *work)
{
struct dwc3 *dwc = work_to_dwc(work);
unsigned long flags;
int ret;
u32 reg;
u32 desired_dr_role;
mutex_lock(&dwc->mutex);
spin_lock_irqsave(&dwc->lock, flags);
desired_dr_role = dwc->desired_dr_role;
spin_unlock_irqrestore(&dwc->lock, flags);
pm_runtime_get_sync(dwc->dev);
if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG)
dwc3_otg_update(dwc, 0);
if (!desired_dr_role)
goto out;
if (desired_dr_role == dwc->current_dr_role)
goto out;
if (desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev)
goto out;
switch (dwc->current_dr_role) {
case DWC3_GCTL_PRTCAP_HOST:
dwc3_host_exit(dwc);
break;
case DWC3_GCTL_PRTCAP_DEVICE:
dwc3_gadget_exit(dwc);
dwc3_event_buffers_cleanup(dwc);
break;
case DWC3_GCTL_PRTCAP_OTG:
dwc3_otg_exit(dwc);
spin_lock_irqsave(&dwc->lock, flags);
dwc->desired_otg_role = DWC3_OTG_ROLE_IDLE;
spin_unlock_irqrestore(&dwc->lock, flags);
dwc3_otg_update(dwc, 1);
break;
default:
break;
}
/*
* When current_dr_role is not set, there's no role switching.
* Only perform GCTL.CoreSoftReset when there's DRD role switching.
*/
if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) ||
DWC3_VER_IS_PRIOR(DWC31, 190A)) &&
desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) {
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
reg |= DWC3_GCTL_CORESOFTRESET;
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
/*
* Wait for internal clocks to synchronized. DWC_usb31 and
* DWC_usb32 may need at least 50ms (less for DWC_usb3). To
* keep it consistent across different IPs, let's wait up to
* 100ms before clearing GCTL.CORESOFTRESET.
*/
msleep(100);
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
reg &= ~DWC3_GCTL_CORESOFTRESET;
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
}
spin_lock_irqsave(&dwc->lock, flags);
dwc3_set_prtcap(dwc, desired_dr_role);
spin_unlock_irqrestore(&dwc->lock, flags);
switch (desired_dr_role) {
case DWC3_GCTL_PRTCAP_HOST:
ret = dwc3_host_init(dwc);
if (ret) {
dev_err(dwc->dev, "failed to initialize host\n");
} else {
if (dwc->usb2_phy)
otg_set_vbus(dwc->usb2_phy->otg, true);
phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
if (dwc->dis_split_quirk) {
reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
reg |= DWC3_GUCTL3_SPLITDISABLE;
dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
}
}
break;
case DWC3_GCTL_PRTCAP_DEVICE:
dwc3_core_soft_reset(dwc);
dwc3_event_buffers_setup(dwc);
if (dwc->usb2_phy)
otg_set_vbus(dwc->usb2_phy->otg, false);
phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE);
phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE);
ret = dwc3_gadget_init(dwc);
if (ret)
dev_err(dwc->dev, "failed to initialize peripheral\n");
break;
case DWC3_GCTL_PRTCAP_OTG:
dwc3_otg_init(dwc);
dwc3_otg_update(dwc, 0);
break;
default:
break;
}
out:
pm_runtime_mark_last_busy(dwc->dev);
pm_runtime_put_autosuspend(dwc->dev);
mutex_unlock(&dwc->mutex);
}
void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
{
unsigned long flags;
if (dwc->dr_mode != USB_DR_MODE_OTG)
return;
spin_lock_irqsave(&dwc->lock, flags);
dwc->desired_dr_role = mode;
spin_unlock_irqrestore(&dwc->lock, flags);
queue_work(system_freezable_wq, &dwc->drd_work);
}
u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type)
{
struct dwc3 *dwc = dep->dwc;
u32 reg;
dwc3_writel(dwc->regs, DWC3_GDBGFIFOSPACE,
DWC3_GDBGFIFOSPACE_NUM(dep->number) |
DWC3_GDBGFIFOSPACE_TYPE(type));
reg = dwc3_readl(dwc->regs, DWC3_GDBGFIFOSPACE);
return DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(reg);
}
/**
* dwc3_core_soft_reset - Issues core soft reset and PHY reset
* @dwc: pointer to our context structure
*/
int dwc3_core_soft_reset(struct dwc3 *dwc)
{
u32 reg;
int retries = 1000;
/*
* We're resetting only the device side because, if we're in host mode,
* XHCI driver will reset the host block. If dwc3 was configured for
* host-only mode or current role is host, then we can return early.
*/
if (dwc->dr_mode == USB_DR_MODE_HOST || dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
return 0;
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg |= DWC3_DCTL_CSFTRST;
reg &= ~DWC3_DCTL_RUN_STOP;
dwc3_gadget_dctl_write_safe(dwc, reg);
/*
* For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit
* is cleared only after all the clocks are synchronized. This can
* take a little more than 50ms. Set the polling rate at 20ms
* for 10 times instead.
*/
if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32))
retries = 10;
do {
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (!(reg & DWC3_DCTL_CSFTRST))
goto done;
if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32))
msleep(20);
else
udelay(1);
} while (--retries);
dev_warn(dwc->dev, "DWC3 controller soft reset failed.\n");
return -ETIMEDOUT;
done:
/*
* For DWC_usb31 controller 1.80a and prior, once DCTL.CSFRST bit
* is cleared, we must wait at least 50ms before accessing the PHY
* domain (synchronization delay).
*/
if (DWC3_VER_IS_WITHIN(DWC31, ANY, 180A))
msleep(50);
return 0;
}
/*
* dwc3_frame_length_adjustment - Adjusts frame length if required
* @dwc3: Pointer to our controller context structure
*/
static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
{
u32 reg;
u32 dft;
if (DWC3_VER_IS_PRIOR(DWC3, 250A))
return;
if (dwc->fladj == 0)
return;
reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
dft = reg & DWC3_GFLADJ_30MHZ_MASK;
if (dft != dwc->fladj) {
reg &= ~DWC3_GFLADJ_30MHZ_MASK;
reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj;
dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
}
}
/**
* dwc3_ref_clk_period - Reference clock period configuration
* Default reference clock period depends on hardware
* configuration. For systems with reference clock that differs
* from the default, this will set clock period in DWC3_GUCTL
* register.
* @dwc: Pointer to our controller context structure
*/
static void dwc3_ref_clk_period(struct dwc3 *dwc)
{
unsigned long period;
unsigned long fladj;
unsigned long decr;
unsigned long rate;
u32 reg;
if (dwc->ref_clk) {
rate = clk_get_rate(dwc->ref_clk);
if (!rate)
return;
period = NSEC_PER_SEC / rate;
} else if (dwc->ref_clk_per) {
period = dwc->ref_clk_per;
rate = NSEC_PER_SEC / period;
} else {
return;
}
reg = dwc3_readl(dwc->regs, DWC3_GUCTL);
reg &= ~DWC3_GUCTL_REFCLKPER_MASK;
reg |= FIELD_PREP(DWC3_GUCTL_REFCLKPER_MASK, period);
dwc3_writel(dwc->regs, DWC3_GUCTL, reg);
if (DWC3_VER_IS_PRIOR(DWC3, 250A))
return;
/*
* The calculation below is
*
* 125000 * (NSEC_PER_SEC / (rate * period) - 1)
*
* but rearranged for fixed-point arithmetic. The division must be
* 64-bit because 125000 * NSEC_PER_SEC doesn't fit in 32 bits (and
* neither does rate * period).
*
* Note that rate * period ~= NSEC_PER_SECOND, minus the number of
* nanoseconds of error caused by the truncation which happened during
* the division when calculating rate or period (whichever one was
* derived from the other). We first calculate the relative error, then
* scale it to units of 8 ppm.
*/
fladj = div64_u64(125000ULL * NSEC_PER_SEC, (u64)rate * period);
fladj -= 125000;
/*
* The documented 240MHz constant is scaled by 2 to get PLS1 as well.
*/
decr = 480000000 / rate;
reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
reg &= ~DWC3_GFLADJ_REFCLK_FLADJ_MASK
& ~DWC3_GFLADJ_240MHZDECR
& ~DWC3_GFLADJ_240MHZDECR_PLS1;
reg |= FIELD_PREP(DWC3_GFLADJ_REFCLK_FLADJ_MASK, fladj)
| FIELD_PREP(DWC3_GFLADJ_240MHZDECR, decr >> 1)
| FIELD_PREP(DWC3_GFLADJ_240MHZDECR_PLS1, decr & 1);
if (dwc->gfladj_refclk_lpm_sel)
reg |= DWC3_GFLADJ_REFCLK_LPM_SEL;
dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
}
/**
* dwc3_free_one_event_buffer - Frees one event buffer
* @dwc: Pointer to our controller context structure
* @evt: Pointer to event buffer to be freed
*/
static void dwc3_free_one_event_buffer(struct dwc3 *dwc,
struct dwc3_event_buffer *evt)
{
dma_free_coherent(dwc->sysdev, evt->length, evt->buf, evt->dma);
}
/**
* dwc3_alloc_one_event_buffer - Allocates one event buffer structure
* @dwc: Pointer to our controller context structure
* @length: size of the event buffer
*
* Returns a pointer to the allocated event buffer structure on success
* otherwise ERR_PTR(errno).
*/
static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc,
unsigned int length)
{
struct dwc3_event_buffer *evt;
evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
if (!evt)
return ERR_PTR(-ENOMEM);
evt->dwc = dwc;
evt->length = length;
evt->cache = devm_kzalloc(dwc->dev, length, GFP_KERNEL);
if (!evt->cache)
return ERR_PTR(-ENOMEM);
evt->buf = dma_alloc_coherent(dwc->sysdev, length,
&evt->dma, GFP_KERNEL);
if (!evt->buf)
return ERR_PTR(-ENOMEM);
return evt;
}
/**
* dwc3_free_event_buffers - frees all allocated event buffers
* @dwc: Pointer to our controller context structure
*/
static void dwc3_free_event_buffers(struct dwc3 *dwc)
{
struct dwc3_event_buffer *evt;
evt = dwc->ev_buf;
if (evt)
dwc3_free_one_event_buffer(dwc, evt);
}
/**
* dwc3_alloc_event_buffers - Allocates @num event buffers of size @length
* @dwc: pointer to our controller context structure
* @length: size of event buffer
*
* Returns 0 on success otherwise negative errno. In the error case, dwc
* may contain some buffers allocated but not all which were requested.
*/
static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned int length)
{
struct dwc3_event_buffer *evt;
evt = dwc3_alloc_one_event_buffer(dwc, length);
if (IS_ERR(evt)) {
dev_err(dwc->dev, "can't allocate event buffer\n");
return PTR_ERR(evt);
}
dwc->ev_buf = evt;
return 0;
}
/**
* dwc3_event_buffers_setup - setup our allocated event buffers
* @dwc: pointer to our controller context structure
*
* Returns 0 on success otherwise negative errno.
*/
int dwc3_event_buffers_setup(struct dwc3 *dwc)
{
struct dwc3_event_buffer *evt;
evt = dwc->ev_buf;
evt->lpos = 0;
dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0),
lower_32_bits(evt->dma));
dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0),
upper_32_bits(evt->dma));
dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
DWC3_GEVNTSIZ_SIZE(evt->length));
dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0);
return 0;
}
void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
{
struct dwc3_event_buffer *evt;
evt = dwc->ev_buf;
evt->lpos = 0;
dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), 0);
dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 0);
dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), DWC3_GEVNTSIZ_INTMASK
| DWC3_GEVNTSIZ_SIZE(0));
dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0);
}
static void dwc3_core_num_eps(struct dwc3 *dwc)
{
struct dwc3_hwparams *parms = &dwc->hwparams;
dwc->num_eps = DWC3_NUM_EPS(parms);
}
static void dwc3_cache_hwparams(struct dwc3 *dwc)
{
struct dwc3_hwparams *parms = &dwc->hwparams;
parms->hwparams0 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS0);
parms->hwparams1 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS1);
parms->hwparams2 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS2);
parms->hwparams3 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS3);
parms->hwparams4 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS4);
parms->hwparams5 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS5);
parms->hwparams6 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS6);
parms->hwparams7 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS7);
parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8);
if (DWC3_IP_IS(DWC32))
parms->hwparams9 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS9);
}
static int dwc3_core_ulpi_init(struct dwc3 *dwc)
{
int intf;
int ret = 0;
intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3);
if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI ||
(intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI &&
dwc->hsphy_interface &&
!strncmp(dwc->hsphy_interface, "ulpi", 4)))
ret = dwc3_ulpi_init(dwc);
return ret;
}
/**
* dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core
* @dwc: Pointer to our controller context structure
*
* Returns 0 on success. The USB PHY interfaces are configured but not
* initialized. The PHY interfaces and the PHYs get initialized together with
* the core in dwc3_core_init.
*/
static int dwc3_phy_setup(struct dwc3 *dwc)
{
unsigned int hw_mode;
u32 reg;
hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
/*
* Make sure UX_EXIT_PX is cleared as that causes issues with some
* PHYs. Also, this bit is not supposed to be used in normal operation.
*/
reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX;
/*
* Above 1.94a, it is recommended to set DWC3_GUSB3PIPECTL_SUSPHY
* to '0' during coreConsultant configuration. So default value
* will be '0' when the core is reset. Application needs to set it
* to '1' after the core initialization is completed.
*/
if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A))
reg |= DWC3_GUSB3PIPECTL_SUSPHY;
/*
* For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be cleared after
* power-on reset, and it can be set after core initialization, which is
* after device soft-reset during initialization.
*/
if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD)
reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
if (dwc->u2ss_inp3_quirk)
reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK;
if (dwc->dis_rxdet_inp3_quirk)
reg |= DWC3_GUSB3PIPECTL_DISRXDETINP3;
if (dwc->req_p1p2p3_quirk)
reg |= DWC3_GUSB3PIPECTL_REQP1P2P3;
if (dwc->del_p1p2p3_quirk)
reg |= DWC3_GUSB3PIPECTL_DEP1P2P3_EN;
if (dwc->del_phy_power_chg_quirk)
reg |= DWC3_GUSB3PIPECTL_DEPOCHANGE;
if (dwc->lfps_filter_quirk)
reg |= DWC3_GUSB3PIPECTL_LFPSFILT;
if (dwc->rx_detect_poll_quirk)
reg |= DWC3_GUSB3PIPECTL_RX_DETOPOLL;
if (dwc->tx_de_emphasis_quirk)
reg |= DWC3_GUSB3PIPECTL_TX_DEEPH(dwc->tx_de_emphasis);
if (dwc->dis_u3_susphy_quirk)
reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
if (dwc->dis_del_phy_power_chg_quirk)
reg &= ~DWC3_GUSB3PIPECTL_DEPOCHANGE;
dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
/* Select the HS PHY interface */
switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) {
case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI:
if (dwc->hsphy_interface &&
!strncmp(dwc->hsphy_interface, "utmi", 4)) {
reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI;
break;
} else if (dwc->hsphy_interface &&
!strncmp(dwc->hsphy_interface, "ulpi", 4)) {
reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
} else {
/* Relying on default value. */
if (!(reg & DWC3_GUSB2PHYCFG_ULPI_UTMI))
break;
}
fallthrough;
case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI:
default:
break;
}
switch (dwc->hsphy_mode) {
case USBPHY_INTERFACE_MODE_UTMI:
reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK |
DWC3_GUSB2PHYCFG_USBTRDTIM_MASK);
reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_8_BIT) |
DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_8_BIT);
break;
case USBPHY_INTERFACE_MODE_UTMIW:
reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK |
DWC3_GUSB2PHYCFG_USBTRDTIM_MASK);
reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_16_BIT) |
DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_16_BIT);
break;
default:
break;
}
/*
* Above 1.94a, it is recommended to set DWC3_GUSB2PHYCFG_SUSPHY to
* '0' during coreConsultant configuration. So default value will
* be '0' when the core is reset. Application needs to set it to
* '1' after the core initialization is completed.
*/
if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A))
reg |= DWC3_GUSB2PHYCFG_SUSPHY;
/*
* For DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared after
* power-on reset, and it can be set after core initialization, which is
* after device soft-reset during initialization.
*/
if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD)
reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
if (dwc->dis_u2_susphy_quirk)
reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
if (dwc->dis_enblslpm_quirk)
reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
else
reg |= DWC3_GUSB2PHYCFG_ENBLSLPM;
if (dwc->dis_u2_freeclk_exists_quirk || dwc->gfladj_refclk_lpm_sel)
reg &= ~DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS;
/*
* Some ULPI USB PHY does not support internal VBUS supply, to drive
* the CPEN pin requires the configuration of the ULPI DRVVBUSEXTERNAL
* bit of OTG_CTRL register. Controller configures the USB2 PHY
* ULPIEXTVBUSDRV bit[17] of the GUSB2PHYCFG register to drive vBus
* with an external supply.
*/
if (dwc->ulpi_ext_vbus_drv)
reg |= DWC3_GUSB2PHYCFG_ULPIEXTVBUSDRV;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
return 0;
}
static int dwc3_phy_init(struct dwc3 *dwc)
{
int ret;
usb_phy_init(dwc->usb2_phy);
usb_phy_init(dwc->usb3_phy);
ret = phy_init(dwc->usb2_generic_phy);
if (ret < 0)
goto err_shutdown_usb3_phy;
ret = phy_init(dwc->usb3_generic_phy);
if (ret < 0)
goto err_exit_usb2_phy;
return 0;
err_exit_usb2_phy:
phy_exit(dwc->usb2_generic_phy);
err_shutdown_usb3_phy:
usb_phy_shutdown(dwc->usb3_phy);
usb_phy_shutdown(dwc->usb2_phy);
return ret;
}
static void dwc3_phy_exit(struct dwc3 *dwc)
{
phy_exit(dwc->usb3_generic_phy);
phy_exit(dwc->usb2_generic_phy);
usb_phy_shutdown(dwc->usb3_phy);
usb_phy_shutdown(dwc->usb2_phy);
}
static int dwc3_phy_power_on(struct dwc3 *dwc)
{
int ret;
usb_phy_set_suspend(dwc->usb2_phy, 0);
usb_phy_set_suspend(dwc->usb3_phy, 0);
ret = phy_power_on(dwc->usb2_generic_phy);
if (ret < 0)
goto err_suspend_usb3_phy;
ret = phy_power_on(dwc->usb3_generic_phy);
if (ret < 0)
goto err_power_off_usb2_phy;
return 0;
err_power_off_usb2_phy:
phy_power_off(dwc->usb2_generic_phy);
err_suspend_usb3_phy:
usb_phy_set_suspend(dwc->usb3_phy, 1);
usb_phy_set_suspend(dwc->usb2_phy, 1);
return ret;
}
static void dwc3_phy_power_off(struct dwc3 *dwc)
{
phy_power_off(dwc->usb3_generic_phy);
phy_power_off(dwc->usb2_generic_phy);
usb_phy_set_suspend(dwc->usb3_phy, 1);
usb_phy_set_suspend(dwc->usb2_phy, 1);
}
static int dwc3_clk_enable(struct dwc3 *dwc)
{
int ret;
ret = clk_prepare_enable(dwc->bus_clk);
if (ret)
return ret;
ret = clk_prepare_enable(dwc->ref_clk);
if (ret)
goto disable_bus_clk;
ret = clk_prepare_enable(dwc->susp_clk);
if (ret)
goto disable_ref_clk;
return 0;
disable_ref_clk:
clk_disable_unprepare(dwc->ref_clk);
disable_bus_clk:
clk_disable_unprepare(dwc->bus_clk);
return ret;
}
static void dwc3_clk_disable(struct dwc3 *dwc)
{
clk_disable_unprepare(dwc->susp_clk);
clk_disable_unprepare(dwc->ref_clk);
clk_disable_unprepare(dwc->bus_clk);
}
static void dwc3_core_exit(struct dwc3 *dwc)
{
dwc3_event_buffers_cleanup(dwc);
dwc3_phy_power_off(dwc);
dwc3_phy_exit(dwc);
dwc3_clk_disable(dwc);
reset_control_assert(dwc->reset);
}
static bool dwc3_core_is_valid(struct dwc3 *dwc)
{
u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
dwc->ip = DWC3_GSNPS_ID(reg);
/* This should read as U3 followed by revision number */
if (DWC3_IP_IS(DWC3)) {
dwc->revision = reg;
} else if (DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) {
dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER);
dwc->version_type = dwc3_readl(dwc->regs, DWC3_VER_TYPE);
} else {
return false;
}
return true;
}
static void dwc3_core_setup_global_control(struct dwc3 *dwc)
{
u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1)) {
case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
/**
* WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an
* issue which would cause xHCI compliance tests to fail.
*
* Because of that we cannot enable clock gating on such
* configurations.
*
* Refers to:
*
* STAR#9000588375: Clock Gating, SOF Issues when ref_clk-Based
* SOF/ITP Mode Used
*/
if ((dwc->dr_mode == USB_DR_MODE_HOST ||
dwc->dr_mode == USB_DR_MODE_OTG) &&
DWC3_VER_IS_WITHIN(DWC3, 210A, 250A))
reg |= DWC3_GCTL_DSBLCLKGTNG | DWC3_GCTL_SOFITPSYNC;
else
reg &= ~DWC3_GCTL_DSBLCLKGTNG;
break;
case DWC3_GHWPARAMS1_EN_PWROPT_HIB:
/*
* REVISIT Enabling this bit so that host-mode hibernation
* will work. Device-mode hibernation is not yet implemented.
*/
reg |= DWC3_GCTL_GBLHIBERNATIONEN;
break;
default:
/* nothing */
break;
}
/* check if current dwc3 is on simulation board */
if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) {
dev_info(dwc->dev, "Running with FPGA optimizations\n");
dwc->is_fpga = true;
}
WARN_ONCE(dwc->disable_scramble_quirk && !dwc->is_fpga,
"disable_scramble cannot be used on non-FPGA builds\n");
if (dwc->disable_scramble_quirk && dwc->is_fpga)
reg |= DWC3_GCTL_DISSCRAMBLE;
else
reg &= ~DWC3_GCTL_DISSCRAMBLE;
if (dwc->u2exit_lfps_quirk)
reg |= DWC3_GCTL_U2EXIT_LFPS;
/*
* WORKAROUND: DWC3 revisions <1.90a have a bug
* where the device can fail to connect at SuperSpeed
* and falls back to high-speed mode which causes
* the device to enter a Connect/Disconnect loop
*/
if (DWC3_VER_IS_PRIOR(DWC3, 190A))
reg |= DWC3_GCTL_U2RSTECN;
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
}
static int dwc3_core_get_phy(struct dwc3 *dwc);
static int dwc3_core_ulpi_init(struct dwc3 *dwc);
/* set global incr burst type configuration registers */
static void dwc3_set_incr_burst_type(struct dwc3 *dwc)
{
struct device *dev = dwc->dev;
/* incrx_mode : for INCR burst type. */
bool incrx_mode;
/* incrx_size : for size of INCRX burst. */
u32 incrx_size;
u32 *vals;
u32 cfg;
int ntype;
int ret;
int i;
cfg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0);
/*
* Handle property "snps,incr-burst-type-adjustment".
* Get the number of value from this property:
* result <= 0, means this property is not supported.
* result = 1, means INCRx burst mode supported.
* result > 1, means undefined length burst mode supported.
*/
ntype = device_property_count_u32(dev, "snps,incr-burst-type-adjustment");
if (ntype <= 0)
return;
vals = kcalloc(ntype, sizeof(u32), GFP_KERNEL);
if (!vals)
return;
/* Get INCR burst type, and parse it */
ret = device_property_read_u32_array(dev,
"snps,incr-burst-type-adjustment", vals, ntype);
if (ret) {
kfree(vals);
dev_err(dev, "Error to get property\n");
return;
}
incrx_size = *vals;
if (ntype > 1) {
/* INCRX (undefined length) burst mode */
incrx_mode = INCRX_UNDEF_LENGTH_BURST_MODE;
for (i = 1; i < ntype; i++) {
if (vals[i] > incrx_size)
incrx_size = vals[i];
}
} else {
/* INCRX burst mode */
incrx_mode = INCRX_BURST_MODE;
}
kfree(vals);
/* Enable Undefined Length INCR Burst and Enable INCRx Burst */
cfg &= ~DWC3_GSBUSCFG0_INCRBRST_MASK;
if (incrx_mode)
cfg |= DWC3_GSBUSCFG0_INCRBRSTENA;
switch (incrx_size) {
case 256:
cfg |= DWC3_GSBUSCFG0_INCR256BRSTENA;
break;
case 128:
cfg |= DWC3_GSBUSCFG0_INCR128BRSTENA;
break;
case 64:
cfg |= DWC3_GSBUSCFG0_INCR64BRSTENA;
break;
case 32:
cfg |= DWC3_GSBUSCFG0_INCR32BRSTENA;
break;
case 16:
cfg |= DWC3_GSBUSCFG0_INCR16BRSTENA;
break;
case 8:
cfg |= DWC3_GSBUSCFG0_INCR8BRSTENA;
break;
case 4:
cfg |= DWC3_GSBUSCFG0_INCR4BRSTENA;
break;
case 1:
break;
default:
dev_err(dev, "Invalid property\n");
break;
}
dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, cfg);
}
static void dwc3_set_power_down_clk_scale(struct dwc3 *dwc)
{
u32 scale;
u32 reg;
if (!dwc->susp_clk)
return;
/*
* The power down scale field specifies how many suspend_clk
* periods fit into a 16KHz clock period. When performing
* the division, round up the remainder.
*
* The power down scale value is calculated using the fastest
* frequency of the suspend_clk. If it isn't fixed (but within
* the accuracy requirement), the driver may not know the max
* rate of the suspend_clk, so only update the power down scale
* if the default is less than the calculated value from
* clk_get_rate() or if the default is questionably high
* (3x or more) to be within the requirement.
*/
scale = DIV_ROUND_UP(clk_get_rate(dwc->susp_clk), 16000);
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
if ((reg & DWC3_GCTL_PWRDNSCALE_MASK) < DWC3_GCTL_PWRDNSCALE(scale) ||
(reg & DWC3_GCTL_PWRDNSCALE_MASK) > DWC3_GCTL_PWRDNSCALE(scale*3)) {
reg &= ~(DWC3_GCTL_PWRDNSCALE_MASK);
reg |= DWC3_GCTL_PWRDNSCALE(scale);
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
}
}
/**
* dwc3_core_init - Low-level initialization of DWC3 Core
* @dwc: Pointer to our controller context structure
*
* Returns 0 on success otherwise negative errno.
*/
static int dwc3_core_init(struct dwc3 *dwc)
{
unsigned int hw_mode;
u32 reg;
int ret;
hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
/*
* Write Linux Version Code to our GUID register so it's easy to figure
* out which kernel version a bug was found.
*/
dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
ret = dwc3_phy_setup(dwc);
if (ret)
return ret;
if (!dwc->ulpi_ready) {
ret = dwc3_core_ulpi_init(dwc);
if (ret) {
if (ret == -ETIMEDOUT) {
dwc3_core_soft_reset(dwc);
ret = -EPROBE_DEFER;
}
return ret;
}
dwc->ulpi_ready = true;
}
if (!dwc->phys_ready) {
ret = dwc3_core_get_phy(dwc);
if (ret)
goto err_exit_ulpi;
dwc->phys_ready = true;
}
ret = dwc3_phy_init(dwc);
if (ret)
goto err_exit_ulpi;
ret = dwc3_core_soft_reset(dwc);
if (ret)
goto err_exit_phy;
if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD &&
!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) {
if (!dwc->dis_u3_susphy_quirk) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
reg |= DWC3_GUSB3PIPECTL_SUSPHY;
dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
}
if (!dwc->dis_u2_susphy_quirk) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
reg |= DWC3_GUSB2PHYCFG_SUSPHY;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
}
}
dwc3_core_setup_global_control(dwc);
dwc3_core_num_eps(dwc);
/* Set power down scale of suspend_clk */
dwc3_set_power_down_clk_scale(dwc);
/* Adjust Frame Length */
dwc3_frame_length_adjustment(dwc);
/* Adjust Reference Clock Period */
dwc3_ref_clk_period(dwc);
dwc3_set_incr_burst_type(dwc);
ret = dwc3_phy_power_on(dwc);
if (ret)
goto err_exit_phy;
ret = dwc3_event_buffers_setup(dwc);
if (ret) {
dev_err(dwc->dev, "failed to setup event buffers\n");
goto err_power_off_phy;
}
/*
* ENDXFER polling is available on version 3.10a and later of
* the DWC_usb3 controller. It is NOT available in the
* DWC_usb31 controller.
*/
if (DWC3_VER_IS_WITHIN(DWC3, 310A, ANY)) {
reg = dwc3_readl(dwc->regs, DWC3_GUCTL2);
reg |= DWC3_GUCTL2_RST_ACTBITLATER;
dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
}
/*
* When configured in HOST mode, after issuing U3/L2 exit controller
* fails to send proper CRC checksum in CRC5 feild. Because of this
* behaviour Transaction Error is generated, resulting in reset and
* re-enumeration of usb device attached. All the termsel, xcvrsel,
* opmode becomes 0 during end of resume. Enabling bit 10 of GUCTL1
* will correct this problem. This option is to support certain
* legacy ULPI PHYs.
*/
if (dwc->resume_hs_terminations) {
reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
reg |= DWC3_GUCTL1_RESUME_OPMODE_HS_HOST;
dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
}
if (!DWC3_VER_IS_PRIOR(DWC3, 250A)) {
reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
/*
* Enable hardware control of sending remote wakeup
* in HS when the device is in the L1 state.
*/
if (!DWC3_VER_IS_PRIOR(DWC3, 290A))
reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW;
/*
* Decouple USB 2.0 L1 & L2 events which will allow for
* gadget driver to only receive U3/L2 suspend & wakeup
* events and prevent the more frequent L1 LPM transitions
* from interrupting the driver.
*/
if (!DWC3_VER_IS_PRIOR(DWC3, 300A))
reg |= DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT;
if (dwc->dis_tx_ipgap_linecheck_quirk)
reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS;
if (dwc->parkmode_disable_ss_quirk)
reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS;
if (dwc->parkmode_disable_hs_quirk)
reg |= DWC3_GUCTL1_PARKMODE_DISABLE_HS;
if (DWC3_VER_IS_WITHIN(DWC3, 290A, ANY) &&
(dwc->maximum_speed == USB_SPEED_HIGH ||
dwc->maximum_speed == USB_SPEED_FULL))
reg |= DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK;
dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
}
/*
* Must config both number of packets and max burst settings to enable
* RX and/or TX threshold.
*/
if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) {
u8 rx_thr_num = dwc->rx_thr_num_pkt_prd;
u8 rx_maxburst = dwc->rx_max_burst_prd;
u8 tx_thr_num = dwc->tx_thr_num_pkt_prd;
u8 tx_maxburst = dwc->tx_max_burst_prd;
if (rx_thr_num && rx_maxburst) {
reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
reg |= DWC31_RXTHRNUMPKTSEL_PRD;
reg &= ~DWC31_RXTHRNUMPKT_PRD(~0);
reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num);
reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0);
reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst);
dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
}
if (tx_thr_num && tx_maxburst) {
reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
reg |= DWC31_TXTHRNUMPKTSEL_PRD;
reg &= ~DWC31_TXTHRNUMPKT_PRD(~0);
reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num);
reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0);
reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst);
dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
}
}
return 0;
err_power_off_phy:
dwc3_phy_power_off(dwc);
err_exit_phy:
dwc3_phy_exit(dwc);
err_exit_ulpi:
dwc3_ulpi_exit(dwc);
return ret;
}
static int dwc3_core_get_phy(struct dwc3 *dwc)
{
struct device *dev = dwc->dev;
struct device_node *node = dev->of_node;
int ret;
if (node) {
dwc->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0);
dwc->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1);
} else {
dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3);
}
if (IS_ERR(dwc->usb2_phy)) {
ret = PTR_ERR(dwc->usb2_phy);
if (ret == -ENXIO || ret == -ENODEV)
dwc->usb2_phy = NULL;
else
return dev_err_probe(dev, ret, "no usb2 phy configured\n");
}
if (IS_ERR(dwc->usb3_phy)) {
ret = PTR_ERR(dwc->usb3_phy);
if (ret == -ENXIO || ret == -ENODEV)
dwc->usb3_phy = NULL;
else
return dev_err_probe(dev, ret, "no usb3 phy configured\n");
}
dwc->usb2_generic_phy = devm_phy_get(dev, "usb2-phy");
if (IS_ERR(dwc->usb2_generic_phy)) {
ret = PTR_ERR(dwc->usb2_generic_phy);
if (ret == -ENOSYS || ret == -ENODEV)
dwc->usb2_generic_phy = NULL;
else
return dev_err_probe(dev, ret, "no usb2 phy configured\n");
}
dwc->usb3_generic_phy = devm_phy_get(dev, "usb3-phy");
if (IS_ERR(dwc->usb3_generic_phy)) {
ret = PTR_ERR(dwc->usb3_generic_phy);
if (ret == -ENOSYS || ret == -ENODEV)
dwc->usb3_generic_phy = NULL;
else
return dev_err_probe(dev, ret, "no usb3 phy configured\n");
}
return 0;
}
static int dwc3_core_init_mode(struct dwc3 *dwc)
{
struct device *dev = dwc->dev;
int ret;
switch (dwc->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
if (dwc->usb2_phy)
otg_set_vbus(dwc->usb2_phy->otg, false);
phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE);
phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE);
ret = dwc3_gadget_init(dwc);
if (ret)
return dev_err_probe(dev, ret, "failed to initialize gadget\n");
break;
case USB_DR_MODE_HOST:
dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
if (dwc->usb2_phy)
otg_set_vbus(dwc->usb2_phy->otg, true);
phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
ret = dwc3_host_init(dwc);
if (ret)
return dev_err_probe(dev, ret, "failed to initialize host\n");
break;
case USB_DR_MODE_OTG:
INIT_WORK(&dwc->drd_work, __dwc3_set_mode);
ret = dwc3_drd_init(dwc);
if (ret)
return dev_err_probe(dev, ret, "failed to initialize dual-role\n");
break;
default:
dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode);
return -EINVAL;
}
return 0;
}
static void dwc3_core_exit_mode(struct dwc3 *dwc)
{
switch (dwc->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
dwc3_gadget_exit(dwc);
break;
case USB_DR_MODE_HOST:
dwc3_host_exit(dwc);
break;
case USB_DR_MODE_OTG:
dwc3_drd_exit(dwc);
break;
default:
/* do nothing */
break;
}
/* de-assert DRVVBUS for HOST and OTG mode */
dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
}
static void dwc3_get_properties(struct dwc3 *dwc)
{
struct device *dev = dwc->dev;
u8 lpm_nyet_threshold;
u8 tx_de_emphasis;
u8 hird_threshold;
u8 rx_thr_num_pkt_prd = 0;
u8 rx_max_burst_prd = 0;
u8 tx_thr_num_pkt_prd = 0;
u8 tx_max_burst_prd = 0;
u8 tx_fifo_resize_max_num;
const char *usb_psy_name;
int ret;
/* default to highest possible threshold */
lpm_nyet_threshold = 0xf;
/* default to -3.5dB de-emphasis */
tx_de_emphasis = 1;
/*
* default to assert utmi_sleep_n and use maximum allowed HIRD
* threshold value of 0b1100
*/
hird_threshold = 12;
/*
* default to a TXFIFO size large enough to fit 6 max packets. This
* allows for systems with larger bus latencies to have some headroom
* for endpoints that have a large bMaxBurst value.
*/
tx_fifo_resize_max_num = 6;
dwc->maximum_speed = usb_get_maximum_speed(dev);
dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev);
dwc->dr_mode = usb_get_dr_mode(dev);
dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node);
dwc->sysdev_is_parent = device_property_read_bool(dev,
"linux,sysdev_is_parent");
if (dwc->sysdev_is_parent)
dwc->sysdev = dwc->dev->parent;
else
dwc->sysdev = dwc->dev;
ret = device_property_read_string(dev, "usb-psy-name", &usb_psy_name);
if (ret >= 0) {
dwc->usb_psy = power_supply_get_by_name(usb_psy_name);
if (!dwc->usb_psy)
dev_err(dev, "couldn't get usb power supply\n");
}
dwc->has_lpm_erratum = device_property_read_bool(dev,
"snps,has-lpm-erratum");
device_property_read_u8(dev, "snps,lpm-nyet-threshold",
&lpm_nyet_threshold);
dwc->is_utmi_l1_suspend = device_property_read_bool(dev,
"snps,is-utmi-l1-suspend");
device_property_read_u8(dev, "snps,hird-threshold",
&hird_threshold);
dwc->dis_start_transfer_quirk = device_property_read_bool(dev,
"snps,dis-start-transfer-quirk");
dwc->usb3_lpm_capable = device_property_read_bool(dev,
"snps,usb3_lpm_capable");
dwc->usb2_lpm_disable = device_property_read_bool(dev,
"snps,usb2-lpm-disable");
dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev,
"snps,usb2-gadget-lpm-disable");
device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd",
&rx_thr_num_pkt_prd);
device_property_read_u8(dev, "snps,rx-max-burst-prd",
&rx_max_burst_prd);
device_property_read_u8(dev, "snps,tx-thr-num-pkt-prd",
&tx_thr_num_pkt_prd);
device_property_read_u8(dev, "snps,tx-max-burst-prd",
&tx_max_burst_prd);
dwc->do_fifo_resize = device_property_read_bool(dev,
"tx-fifo-resize");
if (dwc->do_fifo_resize)
device_property_read_u8(dev, "tx-fifo-max-num",
&tx_fifo_resize_max_num);
dwc->disable_scramble_quirk = device_property_read_bool(dev,
"snps,disable_scramble_quirk");
dwc->u2exit_lfps_quirk = device_property_read_bool(dev,
"snps,u2exit_lfps_quirk");
dwc->u2ss_inp3_quirk = device_property_read_bool(dev,
"snps,u2ss_inp3_quirk");
dwc->req_p1p2p3_quirk = device_property_read_bool(dev,
"snps,req_p1p2p3_quirk");
dwc->del_p1p2p3_quirk = device_property_read_bool(dev,
"snps,del_p1p2p3_quirk");
dwc->del_phy_power_chg_quirk = device_property_read_bool(dev,
"snps,del_phy_power_chg_quirk");
dwc->lfps_filter_quirk = device_property_read_bool(dev,
"snps,lfps_filter_quirk");
dwc->rx_detect_poll_quirk = device_property_read_bool(dev,
"snps,rx_detect_poll_quirk");
dwc->dis_u3_susphy_quirk = device_property_read_bool(dev,
"snps,dis_u3_susphy_quirk");
dwc->dis_u2_susphy_quirk = device_property_read_bool(dev,
"snps,dis_u2_susphy_quirk");
dwc->dis_enblslpm_quirk = device_property_read_bool(dev,
"snps,dis_enblslpm_quirk");
dwc->dis_u1_entry_quirk = device_property_read_bool(dev,
"snps,dis-u1-entry-quirk");
dwc->dis_u2_entry_quirk = device_property_read_bool(dev,
"snps,dis-u2-entry-quirk");
dwc->dis_rxdet_inp3_quirk = device_property_read_bool(dev,
"snps,dis_rxdet_inp3_quirk");
dwc->dis_u2_freeclk_exists_quirk = device_property_read_bool(dev,
"snps,dis-u2-freeclk-exists-quirk");
dwc->dis_del_phy_power_chg_quirk = device_property_read_bool(dev,
"snps,dis-del-phy-power-chg-quirk");
dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev,
"snps,dis-tx-ipgap-linecheck-quirk");
dwc->resume_hs_terminations = device_property_read_bool(dev,
"snps,resume-hs-terminations");
dwc->ulpi_ext_vbus_drv = device_property_read_bool(dev,
"snps,ulpi-ext-vbus-drv");
dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev,
"snps,parkmode-disable-ss-quirk");
dwc->parkmode_disable_hs_quirk = device_property_read_bool(dev,
"snps,parkmode-disable-hs-quirk");
dwc->gfladj_refclk_lpm_sel = device_property_read_bool(dev,
"snps,gfladj-refclk-lpm-sel-quirk");
dwc->tx_de_emphasis_quirk = device_property_read_bool(dev,
"snps,tx_de_emphasis_quirk");
device_property_read_u8(dev, "snps,tx_de_emphasis",
&tx_de_emphasis);
device_property_read_string(dev, "snps,hsphy_interface",
&dwc->hsphy_interface);
device_property_read_u32(dev, "snps,quirk-frame-length-adjustment",
&dwc->fladj);
device_property_read_u32(dev, "snps,ref-clock-period-ns",
&dwc->ref_clk_per);
dwc->dis_metastability_quirk = device_property_read_bool(dev,
"snps,dis_metastability_quirk");
dwc->dis_split_quirk = device_property_read_bool(dev,
"snps,dis-split-quirk");
dwc->lpm_nyet_threshold = lpm_nyet_threshold;
dwc->tx_de_emphasis = tx_de_emphasis;
dwc->hird_threshold = hird_threshold;
dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd;
dwc->rx_max_burst_prd = rx_max_burst_prd;
dwc->tx_thr_num_pkt_prd = tx_thr_num_pkt_prd;
dwc->tx_max_burst_prd = tx_max_burst_prd;
dwc->imod_interval = 0;
dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num;
}
/* check whether the core supports IMOD */
bool dwc3_has_imod(struct dwc3 *dwc)
{
return DWC3_VER_IS_WITHIN(DWC3, 300A, ANY) ||
DWC3_VER_IS_WITHIN(DWC31, 120A, ANY) ||
DWC3_IP_IS(DWC32);
}
static void dwc3_check_params(struct dwc3 *dwc)
{
struct device *dev = dwc->dev;
unsigned int hwparam_gen =
DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3);
/* Check for proper value of imod_interval */
if (dwc->imod_interval && !dwc3_has_imod(dwc)) {
dev_warn(dwc->dev, "Interrupt moderation not supported\n");
dwc->imod_interval = 0;
}
/*
* Workaround for STAR 9000961433 which affects only version
* 3.00a of the DWC_usb3 core. This prevents the controller
* interrupt from being masked while handling events. IMOD
* allows us to work around this issue. Enable it for the
* affected version.
*/
if (!dwc->imod_interval &&
DWC3_VER_IS(DWC3, 300A))
dwc->imod_interval = 1;
/* Check the maximum_speed parameter */
switch (dwc->maximum_speed) {
case USB_SPEED_FULL:
case USB_SPEED_HIGH:
break;
case USB_SPEED_SUPER:
if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS)
dev_warn(dev, "UDC doesn't support Gen 1\n");
break;
case USB_SPEED_SUPER_PLUS:
if ((DWC3_IP_IS(DWC32) &&
hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) ||
(!DWC3_IP_IS(DWC32) &&
hwparam_gen != DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
dev_warn(dev, "UDC doesn't support SSP\n");
break;
default:
dev_err(dev, "invalid maximum_speed parameter %d\n",
dwc->maximum_speed);
fallthrough;
case USB_SPEED_UNKNOWN:
switch (hwparam_gen) {
case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2:
dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
break;
case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1:
if (DWC3_IP_IS(DWC32))
dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
else
dwc->maximum_speed = USB_SPEED_SUPER;
break;
case DWC3_GHWPARAMS3_SSPHY_IFC_DIS:
dwc->maximum_speed = USB_SPEED_HIGH;
break;
default:
dwc->maximum_speed = USB_SPEED_SUPER;
break;
}
break;
}
/*
* Currently the controller does not have visibility into the HW
* parameter to determine the maximum number of lanes the HW supports.
* If the number of lanes is not specified in the device property, then
* set the default to support dual-lane for DWC_usb32 and single-lane
* for DWC_usb31 for super-speed-plus.
*/
if (dwc->maximum_speed == USB_SPEED_SUPER_PLUS) {
switch (dwc->max_ssp_rate) {
case USB_SSP_GEN_2x1:
if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_GEN1)
dev_warn(dev, "UDC only supports Gen 1\n");
break;
case USB_SSP_GEN_1x2:
case USB_SSP_GEN_2x2:
if (DWC3_IP_IS(DWC31))
dev_warn(dev, "UDC only supports single lane\n");
break;
case USB_SSP_GEN_UNKNOWN:
default:
switch (hwparam_gen) {
case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2:
if (DWC3_IP_IS(DWC32))
dwc->max_ssp_rate = USB_SSP_GEN_2x2;
else
dwc->max_ssp_rate = USB_SSP_GEN_2x1;
break;
case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1:
if (DWC3_IP_IS(DWC32))
dwc->max_ssp_rate = USB_SSP_GEN_1x2;
break;
}
break;
}
}
}
static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
{
struct device *dev = dwc->dev;
struct device_node *np_phy;
struct extcon_dev *edev = NULL;
const char *name;
if (device_property_read_bool(dev, "extcon"))
return extcon_get_edev_by_phandle(dev, 0);
/*
* Device tree platforms should get extcon via phandle.
* On ACPI platforms, we get the name from a device property.
* This device property is for kernel internal use only and
* is expected to be set by the glue code.
*/
if (device_property_read_string(dev, "linux,extcon-name", &name) == 0)
return extcon_get_extcon_dev(name);
/*
* Check explicitly if "usb-role-switch" is used since
* extcon_find_edev_by_node() can not be used to check the absence of
* an extcon device. In the absence of an device it will always return
* EPROBE_DEFER.
*/
if (IS_ENABLED(CONFIG_USB_ROLE_SWITCH) &&
device_property_read_bool(dev, "usb-role-switch"))
return NULL;
/*
* Try to get an extcon device from the USB PHY controller's "port"
* node. Check if it has the "port" node first, to avoid printing the
* error message from underlying code, as it's a valid case: extcon
* device (and "port" node) may be missing in case of "usb-role-switch"
* or OTG mode.
*/
np_phy = of_parse_phandle(dev->of_node, "phys", 0);
if (of_graph_is_present(np_phy)) {
struct device_node *np_conn;
np_conn = of_graph_get_remote_node(np_phy, -1, -1);
if (np_conn)
edev = extcon_find_edev_by_node(np_conn);
of_node_put(np_conn);
}
of_node_put(np_phy);
return edev;
}
static int dwc3_get_clocks(struct dwc3 *dwc)
{
struct device *dev = dwc->dev;
if (!dev->of_node)
return 0;
/*
* Clocks are optional, but new DT platforms should support all clocks
* as required by the DT-binding.
* Some devices have different clock names in legacy device trees,
* check for them to retain backwards compatibility.
*/
dwc->bus_clk = devm_clk_get_optional(dev, "bus_early");
if (IS_ERR(dwc->bus_clk)) {
return dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
"could not get bus clock\n");
}
if (dwc->bus_clk == NULL) {
dwc->bus_clk = devm_clk_get_optional(dev, "bus_clk");
if (IS_ERR(dwc->bus_clk)) {
return dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
"could not get bus clock\n");
}
}
dwc->ref_clk = devm_clk_get_optional(dev, "ref");
if (IS_ERR(dwc->ref_clk)) {
return dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
"could not get ref clock\n");
}
if (dwc->ref_clk == NULL) {
dwc->ref_clk = devm_clk_get_optional(dev, "ref_clk");
if (IS_ERR(dwc->ref_clk)) {
return dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
"could not get ref clock\n");
}
}
dwc->susp_clk = devm_clk_get_optional(dev, "suspend");
if (IS_ERR(dwc->susp_clk)) {
return dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
"could not get suspend clock\n");
}
if (dwc->susp_clk == NULL) {
dwc->susp_clk = devm_clk_get_optional(dev, "suspend_clk");
if (IS_ERR(dwc->susp_clk)) {
return dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
"could not get suspend clock\n");
}
}
return 0;
}
static int dwc3_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res, dwc_res;
void __iomem *regs;
struct dwc3 *dwc;
int ret;
dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
if (!dwc)
return -ENOMEM;
dwc->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "missing memory resource\n");
return -ENODEV;
}
dwc->xhci_resources[0].start = res->start;
dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
DWC3_XHCI_REGS_END;
dwc->xhci_resources[0].flags = res->flags;
dwc->xhci_resources[0].name = res->name;
/*
* Request memory region but exclude xHCI regs,
* since it will be requested by the xhci-plat driver.
*/
dwc_res = *res;
dwc_res.start += DWC3_GLOBALS_REGS_START;
if (dev->of_node) {
struct device_node *parent = of_get_parent(dev->of_node);
if (of_device_is_compatible(parent, "realtek,rtd-dwc3")) {
dwc_res.start -= DWC3_GLOBALS_REGS_START;
dwc_res.start += DWC3_RTK_RTD_GLOBALS_REGS_START;
}
of_node_put(parent);
}
regs = devm_ioremap_resource(dev, &dwc_res);
if (IS_ERR(regs))
return PTR_ERR(regs);
dwc->regs = regs;
dwc->regs_size = resource_size(&dwc_res);
dwc3_get_properties(dwc);
dwc->reset = devm_reset_control_array_get_optional_shared(dev);
if (IS_ERR(dwc->reset)) {
ret = PTR_ERR(dwc->reset);
goto err_put_psy;
}
ret = dwc3_get_clocks(dwc);
if (ret)
goto err_put_psy;
ret = reset_control_deassert(dwc->reset);
if (ret)
goto err_put_psy;
ret = dwc3_clk_enable(dwc);
if (ret)
goto err_assert_reset;
if (!dwc3_core_is_valid(dwc)) {
dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
ret = -ENODEV;
goto err_disable_clks;
}
platform_set_drvdata(pdev, dwc);
dwc3_cache_hwparams(dwc);
if (!dwc->sysdev_is_parent &&
DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) {
ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
if (ret)
goto err_disable_clks;
}
spin_lock_init(&dwc->lock);
mutex_init(&dwc->mutex);
pm_runtime_get_noresume(dev);
pm_runtime_set_active(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
pm_runtime_enable(dev);
pm_runtime_forbid(dev);
ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
if (ret) {
dev_err(dwc->dev, "failed to allocate event buffers\n");
ret = -ENOMEM;
goto err_allow_rpm;
}
dwc->edev = dwc3_get_extcon(dwc);
if (IS_ERR(dwc->edev)) {
ret = dev_err_probe(dwc->dev, PTR_ERR(dwc->edev), "failed to get extcon\n");
goto err_free_event_buffers;
}
ret = dwc3_get_dr_mode(dwc);
if (ret)
goto err_free_event_buffers;
ret = dwc3_core_init(dwc);
if (ret) {
dev_err_probe(dev, ret, "failed to initialize core\n");
goto err_free_event_buffers;
}
dwc3_check_params(dwc);
dwc3_debugfs_init(dwc);
ret = dwc3_core_init_mode(dwc);
if (ret)
goto err_exit_debugfs;
pm_runtime_put(dev);
return 0;
err_exit_debugfs:
dwc3_debugfs_exit(dwc);
dwc3_event_buffers_cleanup(dwc);
dwc3_phy_power_off(dwc);
dwc3_phy_exit(dwc);
dwc3_ulpi_exit(dwc);
err_free_event_buffers:
dwc3_free_event_buffers(dwc);
err_allow_rpm:
pm_runtime_allow(dev);
pm_runtime_disable(dev);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_set_suspended(dev);
pm_runtime_put_noidle(dev);
err_disable_clks:
dwc3_clk_disable(dwc);
err_assert_reset:
reset_control_assert(dwc->reset);
err_put_psy:
if (dwc->usb_psy)
power_supply_put(dwc->usb_psy);
return ret;
}
static void dwc3_remove(struct platform_device *pdev)
{
struct dwc3 *dwc = platform_get_drvdata(pdev);
pm_runtime_get_sync(&pdev->dev);
dwc3_core_exit_mode(dwc);
dwc3_debugfs_exit(dwc);
dwc3_core_exit(dwc);
dwc3_ulpi_exit(dwc);
pm_runtime_allow(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
/*
* HACK: Clear the driver data, which is currently accessed by parent
* glue drivers, before allowing the parent to suspend.
*/
platform_set_drvdata(pdev, NULL);
pm_runtime_set_suspended(&pdev->dev);
dwc3_free_event_buffers(dwc);
if (dwc->usb_psy)
power_supply_put(dwc->usb_psy);
}
#ifdef CONFIG_PM
static int dwc3_core_init_for_resume(struct dwc3 *dwc)
{
int ret;
ret = reset_control_deassert(dwc->reset);
if (ret)
return ret;
ret = dwc3_clk_enable(dwc);
if (ret)
goto assert_reset;
ret = dwc3_core_init(dwc);
if (ret)
goto disable_clks;
return 0;
disable_clks:
dwc3_clk_disable(dwc);
assert_reset:
reset_control_assert(dwc->reset);
return ret;
}
static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
{
unsigned long flags;
u32 reg;
switch (dwc->current_dr_role) {
case DWC3_GCTL_PRTCAP_DEVICE:
if (pm_runtime_suspended(dwc->dev))
break;
dwc3_gadget_suspend(dwc);
synchronize_irq(dwc->irq_gadget);
dwc3_core_exit(dwc);
break;
case DWC3_GCTL_PRTCAP_HOST:
if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) {
dwc3_core_exit(dwc);
break;
}
/* Let controller to suspend HSPHY before PHY driver suspends */
if (dwc->dis_u2_susphy_quirk ||
dwc->dis_enblslpm_quirk) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
reg |= DWC3_GUSB2PHYCFG_ENBLSLPM |
DWC3_GUSB2PHYCFG_SUSPHY;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
/* Give some time for USB2 PHY to suspend */
usleep_range(5000, 6000);
}
phy_pm_runtime_put_sync(dwc->usb2_generic_phy);
phy_pm_runtime_put_sync(dwc->usb3_generic_phy);
break;
case DWC3_GCTL_PRTCAP_OTG:
/* do nothing during runtime_suspend */
if (PMSG_IS_AUTO(msg))
break;
if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_suspend(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
synchronize_irq(dwc->irq_gadget);
}
dwc3_otg_exit(dwc);
dwc3_core_exit(dwc);
break;
default:
/* do nothing */
break;
}
return 0;
}
static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
{
unsigned long flags;
int ret;
u32 reg;
switch (dwc->current_dr_role) {
case DWC3_GCTL_PRTCAP_DEVICE:
ret = dwc3_core_init_for_resume(dwc);
if (ret)
return ret;
dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
dwc3_gadget_resume(dwc);
break;
case DWC3_GCTL_PRTCAP_HOST:
if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) {
ret = dwc3_core_init_for_resume(dwc);
if (ret)
return ret;
dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
break;
}
/* Restore GUSB2PHYCFG bits that were modified in suspend */
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
if (dwc->dis_u2_susphy_quirk)
reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
if (dwc->dis_enblslpm_quirk)
reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
phy_pm_runtime_get_sync(dwc->usb2_generic_phy);
phy_pm_runtime_get_sync(dwc->usb3_generic_phy);
break;
case DWC3_GCTL_PRTCAP_OTG:
/* nothing to do on runtime_resume */
if (PMSG_IS_AUTO(msg))
break;
ret = dwc3_core_init_for_resume(dwc);
if (ret)
return ret;
dwc3_set_prtcap(dwc, dwc->current_dr_role);
dwc3_otg_init(dwc);
if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) {
dwc3_otg_host_init(dwc);
} else if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_resume(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
}
break;
default:
/* do nothing */
break;
}
return 0;
}
static int dwc3_runtime_checks(struct dwc3 *dwc)
{
switch (dwc->current_dr_role) {
case DWC3_GCTL_PRTCAP_DEVICE:
if (dwc->connected)
return -EBUSY;
break;
case DWC3_GCTL_PRTCAP_HOST:
default:
/* do nothing */
break;
}
return 0;
}
static int dwc3_runtime_suspend(struct device *dev)
{
struct dwc3 *dwc = dev_get_drvdata(dev);
int ret;
if (dwc3_runtime_checks(dwc))
return -EBUSY;
ret = dwc3_suspend_common(dwc, PMSG_AUTO_SUSPEND);
if (ret)
return ret;
return 0;
}
static int dwc3_runtime_resume(struct device *dev)
{
struct dwc3 *dwc = dev_get_drvdata(dev);
int ret;
ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME);
if (ret)
return ret;
switch (dwc->current_dr_role) {
case DWC3_GCTL_PRTCAP_DEVICE:
dwc3_gadget_process_pending_events(dwc);
break;
case DWC3_GCTL_PRTCAP_HOST:
default:
/* do nothing */
break;
}
pm_runtime_mark_last_busy(dev);
return 0;
}
static int dwc3_runtime_idle(struct device *dev)
{
struct dwc3 *dwc = dev_get_drvdata(dev);
switch (dwc->current_dr_role) {
case DWC3_GCTL_PRTCAP_DEVICE:
if (dwc3_runtime_checks(dwc))
return -EBUSY;
break;
case DWC3_GCTL_PRTCAP_HOST:
default:
/* do nothing */
break;
}
pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
return 0;
}
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
static int dwc3_suspend(struct device *dev)
{
struct dwc3 *dwc = dev_get_drvdata(dev);
int ret;
ret = dwc3_suspend_common(dwc, PMSG_SUSPEND);
if (ret)
return ret;
pinctrl_pm_select_sleep_state(dev);
return 0;
}
static int dwc3_resume(struct device *dev)
{
struct dwc3 *dwc = dev_get_drvdata(dev);
int ret;
pinctrl_pm_select_default_state(dev);
ret = dwc3_resume_common(dwc, PMSG_RESUME);
if (ret)
return ret;
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return 0;
}
static void dwc3_complete(struct device *dev)
{
struct dwc3 *dwc = dev_get_drvdata(dev);
u32 reg;
if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST &&
dwc->dis_split_quirk) {
reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
reg |= DWC3_GUCTL3_SPLITDISABLE;
dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
}
}
#else
#define dwc3_complete NULL
#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops dwc3_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
.complete = dwc3_complete,
SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume,
dwc3_runtime_idle)
};
#ifdef CONFIG_OF
static const struct of_device_id of_dwc3_match[] = {
{
.compatible = "snps,dwc3"
},
{
.compatible = "synopsys,dwc3"
},
{ },
};
MODULE_DEVICE_TABLE(of, of_dwc3_match);
#endif
#ifdef CONFIG_ACPI
#define ACPI_ID_INTEL_BSW "808622B7"
static const struct acpi_device_id dwc3_acpi_match[] = {
{ ACPI_ID_INTEL_BSW, 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, dwc3_acpi_match);
#endif
static struct platform_driver dwc3_driver = {
.probe = dwc3_probe,
.remove_new = dwc3_remove,
.driver = {
.name = "dwc3",
.of_match_table = of_match_ptr(of_dwc3_match),
.acpi_match_table = ACPI_PTR(dwc3_acpi_match),
.pm = &dwc3_dev_pm_ops,
},
};
module_platform_driver(dwc3_driver);
MODULE_ALIAS("platform:dwc3");
MODULE_AUTHOR("Felipe Balbi <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver");
| linux-master | drivers/usb/dwc3/core.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Inspired by dwc3-of-simple.c
*/
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/of_clk.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/extcon.h>
#include <linux/interconnect.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/usb/of.h>
#include <linux/reset.h>
#include <linux/iopoll.h>
#include <linux/usb/hcd.h>
#include <linux/usb.h>
#include "core.h"
/* USB QSCRATCH Hardware registers */
#define QSCRATCH_HS_PHY_CTRL 0x10
#define UTMI_OTG_VBUS_VALID BIT(20)
#define SW_SESSVLD_SEL BIT(28)
#define QSCRATCH_SS_PHY_CTRL 0x30
#define LANE0_PWR_PRESENT BIT(24)
#define QSCRATCH_GENERAL_CFG 0x08
#define PIPE_UTMI_CLK_SEL BIT(0)
#define PIPE3_PHYSTATUS_SW BIT(3)
#define PIPE_UTMI_CLK_DIS BIT(8)
#define PWR_EVNT_IRQ_STAT_REG 0x58
#define PWR_EVNT_LPM_IN_L2_MASK BIT(4)
#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5)
#define SDM845_QSCRATCH_BASE_OFFSET 0xf8800
#define SDM845_QSCRATCH_SIZE 0x400
#define SDM845_DWC3_CORE_SIZE 0xcd00
/* Interconnect path bandwidths in MBps */
#define USB_MEMORY_AVG_HS_BW MBps_to_icc(240)
#define USB_MEMORY_PEAK_HS_BW MBps_to_icc(700)
#define USB_MEMORY_AVG_SS_BW MBps_to_icc(1000)
#define USB_MEMORY_PEAK_SS_BW MBps_to_icc(2500)
#define APPS_USB_AVG_BW 0
#define APPS_USB_PEAK_BW MBps_to_icc(40)
struct dwc3_acpi_pdata {
u32 qscratch_base_offset;
u32 qscratch_base_size;
u32 dwc3_core_base_size;
int hs_phy_irq_index;
int dp_hs_phy_irq_index;
int dm_hs_phy_irq_index;
int ss_phy_irq_index;
bool is_urs;
};
struct dwc3_qcom {
struct device *dev;
void __iomem *qscratch_base;
struct platform_device *dwc3;
struct platform_device *urs_usb;
struct clk **clks;
int num_clocks;
struct reset_control *resets;
int hs_phy_irq;
int dp_hs_phy_irq;
int dm_hs_phy_irq;
int ss_phy_irq;
enum usb_device_speed usb2_speed;
struct extcon_dev *edev;
struct extcon_dev *host_edev;
struct notifier_block vbus_nb;
struct notifier_block host_nb;
const struct dwc3_acpi_pdata *acpi_pdata;
enum usb_dr_mode mode;
bool is_suspended;
bool pm_suspended;
struct icc_path *icc_path_ddr;
struct icc_path *icc_path_apps;
};
static inline void dwc3_qcom_setbits(void __iomem *base, u32 offset, u32 val)
{
u32 reg;
reg = readl(base + offset);
reg |= val;
writel(reg, base + offset);
/* ensure that above write is through */
readl(base + offset);
}
static inline void dwc3_qcom_clrbits(void __iomem *base, u32 offset, u32 val)
{
u32 reg;
reg = readl(base + offset);
reg &= ~val;
writel(reg, base + offset);
/* ensure that above write is through */
readl(base + offset);
}
static void dwc3_qcom_vbus_override_enable(struct dwc3_qcom *qcom, bool enable)
{
if (enable) {
dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_SS_PHY_CTRL,
LANE0_PWR_PRESENT);
dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_HS_PHY_CTRL,
UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL);
} else {
dwc3_qcom_clrbits(qcom->qscratch_base, QSCRATCH_SS_PHY_CTRL,
LANE0_PWR_PRESENT);
dwc3_qcom_clrbits(qcom->qscratch_base, QSCRATCH_HS_PHY_CTRL,
UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL);
}
}
static int dwc3_qcom_vbus_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct dwc3_qcom *qcom = container_of(nb, struct dwc3_qcom, vbus_nb);
/* enable vbus override for device mode */
dwc3_qcom_vbus_override_enable(qcom, event);
qcom->mode = event ? USB_DR_MODE_PERIPHERAL : USB_DR_MODE_HOST;
return NOTIFY_DONE;
}
static int dwc3_qcom_host_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct dwc3_qcom *qcom = container_of(nb, struct dwc3_qcom, host_nb);
/* disable vbus override in host mode */
dwc3_qcom_vbus_override_enable(qcom, !event);
qcom->mode = event ? USB_DR_MODE_HOST : USB_DR_MODE_PERIPHERAL;
return NOTIFY_DONE;
}
static int dwc3_qcom_register_extcon(struct dwc3_qcom *qcom)
{
struct device *dev = qcom->dev;
struct extcon_dev *host_edev;
int ret;
if (!of_property_read_bool(dev->of_node, "extcon"))
return 0;
qcom->edev = extcon_get_edev_by_phandle(dev, 0);
if (IS_ERR(qcom->edev))
return dev_err_probe(dev, PTR_ERR(qcom->edev),
"Failed to get extcon\n");
qcom->vbus_nb.notifier_call = dwc3_qcom_vbus_notifier;
qcom->host_edev = extcon_get_edev_by_phandle(dev, 1);
if (IS_ERR(qcom->host_edev))
qcom->host_edev = NULL;
ret = devm_extcon_register_notifier(dev, qcom->edev, EXTCON_USB,
&qcom->vbus_nb);
if (ret < 0) {
dev_err(dev, "VBUS notifier register failed\n");
return ret;
}
if (qcom->host_edev)
host_edev = qcom->host_edev;
else
host_edev = qcom->edev;
qcom->host_nb.notifier_call = dwc3_qcom_host_notifier;
ret = devm_extcon_register_notifier(dev, host_edev, EXTCON_USB_HOST,
&qcom->host_nb);
if (ret < 0) {
dev_err(dev, "Host notifier register failed\n");
return ret;
}
/* Update initial VBUS override based on extcon state */
if (extcon_get_state(qcom->edev, EXTCON_USB) ||
!extcon_get_state(host_edev, EXTCON_USB_HOST))
dwc3_qcom_vbus_notifier(&qcom->vbus_nb, true, qcom->edev);
else
dwc3_qcom_vbus_notifier(&qcom->vbus_nb, false, qcom->edev);
return 0;
}
static int dwc3_qcom_interconnect_enable(struct dwc3_qcom *qcom)
{
int ret;
ret = icc_enable(qcom->icc_path_ddr);
if (ret)
return ret;
ret = icc_enable(qcom->icc_path_apps);
if (ret)
icc_disable(qcom->icc_path_ddr);
return ret;
}
static int dwc3_qcom_interconnect_disable(struct dwc3_qcom *qcom)
{
int ret;
ret = icc_disable(qcom->icc_path_ddr);
if (ret)
return ret;
ret = icc_disable(qcom->icc_path_apps);
if (ret)
icc_enable(qcom->icc_path_ddr);
return ret;
}
/**
* dwc3_qcom_interconnect_init() - Get interconnect path handles
* and set bandwidth.
* @qcom: Pointer to the concerned usb core.
*
*/
static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
{
enum usb_device_speed max_speed;
struct device *dev = qcom->dev;
int ret;
if (has_acpi_companion(dev))
return 0;
qcom->icc_path_ddr = of_icc_get(dev, "usb-ddr");
if (IS_ERR(qcom->icc_path_ddr)) {
return dev_err_probe(dev, PTR_ERR(qcom->icc_path_ddr),
"failed to get usb-ddr path\n");
}
qcom->icc_path_apps = of_icc_get(dev, "apps-usb");
if (IS_ERR(qcom->icc_path_apps)) {
ret = dev_err_probe(dev, PTR_ERR(qcom->icc_path_apps),
"failed to get apps-usb path\n");
goto put_path_ddr;
}
max_speed = usb_get_maximum_speed(&qcom->dwc3->dev);
if (max_speed >= USB_SPEED_SUPER || max_speed == USB_SPEED_UNKNOWN) {
ret = icc_set_bw(qcom->icc_path_ddr,
USB_MEMORY_AVG_SS_BW, USB_MEMORY_PEAK_SS_BW);
} else {
ret = icc_set_bw(qcom->icc_path_ddr,
USB_MEMORY_AVG_HS_BW, USB_MEMORY_PEAK_HS_BW);
}
if (ret) {
dev_err(dev, "failed to set bandwidth for usb-ddr path: %d\n", ret);
goto put_path_apps;
}
ret = icc_set_bw(qcom->icc_path_apps, APPS_USB_AVG_BW, APPS_USB_PEAK_BW);
if (ret) {
dev_err(dev, "failed to set bandwidth for apps-usb path: %d\n", ret);
goto put_path_apps;
}
return 0;
put_path_apps:
icc_put(qcom->icc_path_apps);
put_path_ddr:
icc_put(qcom->icc_path_ddr);
return ret;
}
/**
* dwc3_qcom_interconnect_exit() - Release interconnect path handles
* @qcom: Pointer to the concerned usb core.
*
* This function is used to release interconnect path handle.
*/
static void dwc3_qcom_interconnect_exit(struct dwc3_qcom *qcom)
{
icc_put(qcom->icc_path_ddr);
icc_put(qcom->icc_path_apps);
}
/* Only usable in contexts where the role can not change. */
static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom)
{
struct dwc3 *dwc;
/*
* FIXME: Fix this layering violation.
*/
dwc = platform_get_drvdata(qcom->dwc3);
/* Core driver may not have probed yet. */
if (!dwc)
return false;
return dwc->xhci;
}
static enum usb_device_speed dwc3_qcom_read_usb2_speed(struct dwc3_qcom *qcom)
{
struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
struct usb_device *udev;
struct usb_hcd __maybe_unused *hcd;
/*
* FIXME: Fix this layering violation.
*/
hcd = platform_get_drvdata(dwc->xhci);
/*
* It is possible to query the speed of all children of
* USB2.0 root hub via usb_hub_for_each_child(). DWC3 code
* currently supports only 1 port per controller. So
* this is sufficient.
*/
#ifdef CONFIG_USB
udev = usb_hub_find_child(hcd->self.root_hub, 1);
#else
udev = NULL;
#endif
if (!udev)
return USB_SPEED_UNKNOWN;
return udev->speed;
}
static void dwc3_qcom_enable_wakeup_irq(int irq, unsigned int polarity)
{
if (!irq)
return;
if (polarity)
irq_set_irq_type(irq, polarity);
enable_irq(irq);
enable_irq_wake(irq);
}
static void dwc3_qcom_disable_wakeup_irq(int irq)
{
if (!irq)
return;
disable_irq_wake(irq);
disable_irq_nosync(irq);
}
static void dwc3_qcom_disable_interrupts(struct dwc3_qcom *qcom)
{
dwc3_qcom_disable_wakeup_irq(qcom->hs_phy_irq);
if (qcom->usb2_speed == USB_SPEED_LOW) {
dwc3_qcom_disable_wakeup_irq(qcom->dm_hs_phy_irq);
} else if ((qcom->usb2_speed == USB_SPEED_HIGH) ||
(qcom->usb2_speed == USB_SPEED_FULL)) {
dwc3_qcom_disable_wakeup_irq(qcom->dp_hs_phy_irq);
} else {
dwc3_qcom_disable_wakeup_irq(qcom->dp_hs_phy_irq);
dwc3_qcom_disable_wakeup_irq(qcom->dm_hs_phy_irq);
}
dwc3_qcom_disable_wakeup_irq(qcom->ss_phy_irq);
}
static void dwc3_qcom_enable_interrupts(struct dwc3_qcom *qcom)
{
dwc3_qcom_enable_wakeup_irq(qcom->hs_phy_irq, 0);
/*
* Configure DP/DM line interrupts based on the USB2 device attached to
* the root hub port. When HS/FS device is connected, configure the DP line
* as falling edge to detect both disconnect and remote wakeup scenarios. When
* LS device is connected, configure DM line as falling edge to detect both
* disconnect and remote wakeup. When no device is connected, configure both
* DP and DM lines as rising edge to detect HS/HS/LS device connect scenario.
*/
if (qcom->usb2_speed == USB_SPEED_LOW) {
dwc3_qcom_enable_wakeup_irq(qcom->dm_hs_phy_irq,
IRQ_TYPE_EDGE_FALLING);
} else if ((qcom->usb2_speed == USB_SPEED_HIGH) ||
(qcom->usb2_speed == USB_SPEED_FULL)) {
dwc3_qcom_enable_wakeup_irq(qcom->dp_hs_phy_irq,
IRQ_TYPE_EDGE_FALLING);
} else {
dwc3_qcom_enable_wakeup_irq(qcom->dp_hs_phy_irq,
IRQ_TYPE_EDGE_RISING);
dwc3_qcom_enable_wakeup_irq(qcom->dm_hs_phy_irq,
IRQ_TYPE_EDGE_RISING);
}
dwc3_qcom_enable_wakeup_irq(qcom->ss_phy_irq, 0);
}
static int dwc3_qcom_suspend(struct dwc3_qcom *qcom, bool wakeup)
{
u32 val;
int i, ret;
if (qcom->is_suspended)
return 0;
val = readl(qcom->qscratch_base + PWR_EVNT_IRQ_STAT_REG);
if (!(val & PWR_EVNT_LPM_IN_L2_MASK))
dev_err(qcom->dev, "HS-PHY not in L2\n");
for (i = qcom->num_clocks - 1; i >= 0; i--)
clk_disable_unprepare(qcom->clks[i]);
ret = dwc3_qcom_interconnect_disable(qcom);
if (ret)
dev_warn(qcom->dev, "failed to disable interconnect: %d\n", ret);
/*
* The role is stable during suspend as role switching is done from a
* freezable workqueue.
*/
if (dwc3_qcom_is_host(qcom) && wakeup) {
qcom->usb2_speed = dwc3_qcom_read_usb2_speed(qcom);
dwc3_qcom_enable_interrupts(qcom);
}
qcom->is_suspended = true;
return 0;
}
static int dwc3_qcom_resume(struct dwc3_qcom *qcom, bool wakeup)
{
int ret;
int i;
if (!qcom->is_suspended)
return 0;
if (dwc3_qcom_is_host(qcom) && wakeup)
dwc3_qcom_disable_interrupts(qcom);
for (i = 0; i < qcom->num_clocks; i++) {
ret = clk_prepare_enable(qcom->clks[i]);
if (ret < 0) {
while (--i >= 0)
clk_disable_unprepare(qcom->clks[i]);
return ret;
}
}
ret = dwc3_qcom_interconnect_enable(qcom);
if (ret)
dev_warn(qcom->dev, "failed to enable interconnect: %d\n", ret);
/* Clear existing events from PHY related to L2 in/out */
dwc3_qcom_setbits(qcom->qscratch_base, PWR_EVNT_IRQ_STAT_REG,
PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
qcom->is_suspended = false;
return 0;
}
static irqreturn_t qcom_dwc3_resume_irq(int irq, void *data)
{
struct dwc3_qcom *qcom = data;
struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
/* If pm_suspended then let pm_resume take care of resuming h/w */
if (qcom->pm_suspended)
return IRQ_HANDLED;
/*
* This is safe as role switching is done from a freezable workqueue
* and the wakeup interrupts are disabled as part of resume.
*/
if (dwc3_qcom_is_host(qcom))
pm_runtime_resume(&dwc->xhci->dev);
return IRQ_HANDLED;
}
static void dwc3_qcom_select_utmi_clk(struct dwc3_qcom *qcom)
{
/* Configure dwc3 to use UTMI clock as PIPE clock not present */
dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_GENERAL_CFG,
PIPE_UTMI_CLK_DIS);
usleep_range(100, 1000);
dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_GENERAL_CFG,
PIPE_UTMI_CLK_SEL | PIPE3_PHYSTATUS_SW);
usleep_range(100, 1000);
dwc3_qcom_clrbits(qcom->qscratch_base, QSCRATCH_GENERAL_CFG,
PIPE_UTMI_CLK_DIS);
}
static int dwc3_qcom_get_irq(struct platform_device *pdev,
const char *name, int num)
{
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
struct platform_device *pdev_irq = qcom->urs_usb ? qcom->urs_usb : pdev;
struct device_node *np = pdev->dev.of_node;
int ret;
if (np)
ret = platform_get_irq_byname_optional(pdev_irq, name);
else
ret = platform_get_irq_optional(pdev_irq, num);
return ret;
}
static int dwc3_qcom_setup_irq(struct platform_device *pdev)
{
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
const struct dwc3_acpi_pdata *pdata = qcom->acpi_pdata;
int irq;
int ret;
irq = dwc3_qcom_get_irq(pdev, "hs_phy_irq",
pdata ? pdata->hs_phy_irq_index : -1);
if (irq > 0) {
/* Keep wakeup interrupts disabled until suspend */
irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"qcom_dwc3 HS", qcom);
if (ret) {
dev_err(qcom->dev, "hs_phy_irq failed: %d\n", ret);
return ret;
}
qcom->hs_phy_irq = irq;
}
irq = dwc3_qcom_get_irq(pdev, "dp_hs_phy_irq",
pdata ? pdata->dp_hs_phy_irq_index : -1);
if (irq > 0) {
irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"qcom_dwc3 DP_HS", qcom);
if (ret) {
dev_err(qcom->dev, "dp_hs_phy_irq failed: %d\n", ret);
return ret;
}
qcom->dp_hs_phy_irq = irq;
}
irq = dwc3_qcom_get_irq(pdev, "dm_hs_phy_irq",
pdata ? pdata->dm_hs_phy_irq_index : -1);
if (irq > 0) {
irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"qcom_dwc3 DM_HS", qcom);
if (ret) {
dev_err(qcom->dev, "dm_hs_phy_irq failed: %d\n", ret);
return ret;
}
qcom->dm_hs_phy_irq = irq;
}
irq = dwc3_qcom_get_irq(pdev, "ss_phy_irq",
pdata ? pdata->ss_phy_irq_index : -1);
if (irq > 0) {
irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"qcom_dwc3 SS", qcom);
if (ret) {
dev_err(qcom->dev, "ss_phy_irq failed: %d\n", ret);
return ret;
}
qcom->ss_phy_irq = irq;
}
return 0;
}
static int dwc3_qcom_clk_init(struct dwc3_qcom *qcom, int count)
{
struct device *dev = qcom->dev;
struct device_node *np = dev->of_node;
int i;
if (!np || !count)
return 0;
if (count < 0)
return count;
qcom->num_clocks = count;
qcom->clks = devm_kcalloc(dev, qcom->num_clocks,
sizeof(struct clk *), GFP_KERNEL);
if (!qcom->clks)
return -ENOMEM;
for (i = 0; i < qcom->num_clocks; i++) {
struct clk *clk;
int ret;
clk = of_clk_get(np, i);
if (IS_ERR(clk)) {
while (--i >= 0)
clk_put(qcom->clks[i]);
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (ret < 0) {
while (--i >= 0) {
clk_disable_unprepare(qcom->clks[i]);
clk_put(qcom->clks[i]);
}
clk_put(clk);
return ret;
}
qcom->clks[i] = clk;
}
return 0;
}
static const struct property_entry dwc3_qcom_acpi_properties[] = {
PROPERTY_ENTRY_STRING("dr_mode", "host"),
{}
};
static const struct software_node dwc3_qcom_swnode = {
.properties = dwc3_qcom_acpi_properties,
};
static int dwc3_qcom_acpi_register_core(struct platform_device *pdev)
{
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
struct resource *res, *child_res = NULL;
struct platform_device *pdev_irq = qcom->urs_usb ? qcom->urs_usb :
pdev;
int irq;
int ret;
qcom->dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO);
if (!qcom->dwc3)
return -ENOMEM;
qcom->dwc3->dev.parent = dev;
qcom->dwc3->dev.type = dev->type;
qcom->dwc3->dev.dma_mask = dev->dma_mask;
qcom->dwc3->dev.dma_parms = dev->dma_parms;
qcom->dwc3->dev.coherent_dma_mask = dev->coherent_dma_mask;
child_res = kcalloc(2, sizeof(*child_res), GFP_KERNEL);
if (!child_res) {
platform_device_put(qcom->dwc3);
return -ENOMEM;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "failed to get memory resource\n");
ret = -ENODEV;
goto out;
}
child_res[0].flags = res->flags;
child_res[0].start = res->start;
child_res[0].end = child_res[0].start +
qcom->acpi_pdata->dwc3_core_base_size;
irq = platform_get_irq(pdev_irq, 0);
if (irq < 0) {
ret = irq;
goto out;
}
child_res[1].flags = IORESOURCE_IRQ;
child_res[1].start = child_res[1].end = irq;
ret = platform_device_add_resources(qcom->dwc3, child_res, 2);
if (ret) {
dev_err(&pdev->dev, "failed to add resources\n");
goto out;
}
ret = device_add_software_node(&qcom->dwc3->dev, &dwc3_qcom_swnode);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add properties\n");
goto out;
}
ret = platform_device_add(qcom->dwc3);
if (ret) {
dev_err(&pdev->dev, "failed to add device\n");
device_remove_software_node(&qcom->dwc3->dev);
goto out;
}
kfree(child_res);
return 0;
out:
platform_device_put(qcom->dwc3);
kfree(child_res);
return ret;
}
static int dwc3_qcom_of_register_core(struct platform_device *pdev)
{
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node, *dwc3_np;
struct device *dev = &pdev->dev;
int ret;
dwc3_np = of_get_compatible_child(np, "snps,dwc3");
if (!dwc3_np) {
dev_err(dev, "failed to find dwc3 core child\n");
return -ENODEV;
}
ret = of_platform_populate(np, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to register dwc3 core - %d\n", ret);
goto node_put;
}
qcom->dwc3 = of_find_device_by_node(dwc3_np);
if (!qcom->dwc3) {
ret = -ENODEV;
dev_err(dev, "failed to get dwc3 platform device\n");
}
node_put:
of_node_put(dwc3_np);
return ret;
}
static struct platform_device *
dwc3_qcom_create_urs_usb_platdev(struct device *dev)
{
struct fwnode_handle *fwh;
struct acpi_device *adev;
char name[8];
int ret;
int id;
/* Figure out device id */
ret = sscanf(fwnode_get_name(dev->fwnode), "URS%d", &id);
if (!ret)
return NULL;
/* Find the child using name */
snprintf(name, sizeof(name), "USB%d", id);
fwh = fwnode_get_named_child_node(dev->fwnode, name);
if (!fwh)
return NULL;
adev = to_acpi_device_node(fwh);
if (!adev)
return NULL;
return acpi_create_platform_device(adev, NULL);
}
static int dwc3_qcom_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct dwc3_qcom *qcom;
struct resource *res, *parent_res = NULL;
struct resource local_res;
int ret, i;
bool ignore_pipe_clk;
bool wakeup_source;
qcom = devm_kzalloc(&pdev->dev, sizeof(*qcom), GFP_KERNEL);
if (!qcom)
return -ENOMEM;
platform_set_drvdata(pdev, qcom);
qcom->dev = &pdev->dev;
if (has_acpi_companion(dev)) {
qcom->acpi_pdata = acpi_device_get_match_data(dev);
if (!qcom->acpi_pdata) {
dev_err(&pdev->dev, "no supporting ACPI device data\n");
return -EINVAL;
}
}
qcom->resets = devm_reset_control_array_get_optional_exclusive(dev);
if (IS_ERR(qcom->resets)) {
return dev_err_probe(&pdev->dev, PTR_ERR(qcom->resets),
"failed to get resets\n");
}
ret = reset_control_assert(qcom->resets);
if (ret) {
dev_err(&pdev->dev, "failed to assert resets, err=%d\n", ret);
return ret;
}
usleep_range(10, 1000);
ret = reset_control_deassert(qcom->resets);
if (ret) {
dev_err(&pdev->dev, "failed to deassert resets, err=%d\n", ret);
goto reset_assert;
}
ret = dwc3_qcom_clk_init(qcom, of_clk_get_parent_count(np));
if (ret) {
dev_err_probe(dev, ret, "failed to get clocks\n");
goto reset_assert;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (np) {
parent_res = res;
} else {
memcpy(&local_res, res, sizeof(struct resource));
parent_res = &local_res;
parent_res->start = res->start +
qcom->acpi_pdata->qscratch_base_offset;
parent_res->end = parent_res->start +
qcom->acpi_pdata->qscratch_base_size;
if (qcom->acpi_pdata->is_urs) {
qcom->urs_usb = dwc3_qcom_create_urs_usb_platdev(dev);
if (IS_ERR_OR_NULL(qcom->urs_usb)) {
dev_err(dev, "failed to create URS USB platdev\n");
if (!qcom->urs_usb)
ret = -ENODEV;
else
ret = PTR_ERR(qcom->urs_usb);
goto clk_disable;
}
}
}
qcom->qscratch_base = devm_ioremap_resource(dev, parent_res);
if (IS_ERR(qcom->qscratch_base)) {
ret = PTR_ERR(qcom->qscratch_base);
goto clk_disable;
}
ret = dwc3_qcom_setup_irq(pdev);
if (ret) {
dev_err(dev, "failed to setup IRQs, err=%d\n", ret);
goto clk_disable;
}
/*
* Disable pipe_clk requirement if specified. Used when dwc3
* operates without SSPHY and only HS/FS/LS modes are supported.
*/
ignore_pipe_clk = device_property_read_bool(dev,
"qcom,select-utmi-as-pipe-clk");
if (ignore_pipe_clk)
dwc3_qcom_select_utmi_clk(qcom);
if (np)
ret = dwc3_qcom_of_register_core(pdev);
else
ret = dwc3_qcom_acpi_register_core(pdev);
if (ret) {
dev_err(dev, "failed to register DWC3 Core, err=%d\n", ret);
goto depopulate;
}
ret = dwc3_qcom_interconnect_init(qcom);
if (ret)
goto depopulate;
qcom->mode = usb_get_dr_mode(&qcom->dwc3->dev);
/* enable vbus override for device mode */
if (qcom->mode != USB_DR_MODE_HOST)
dwc3_qcom_vbus_override_enable(qcom, true);
/* register extcon to override sw_vbus on Vbus change later */
ret = dwc3_qcom_register_extcon(qcom);
if (ret)
goto interconnect_exit;
wakeup_source = of_property_read_bool(dev->of_node, "wakeup-source");
device_init_wakeup(&pdev->dev, wakeup_source);
device_init_wakeup(&qcom->dwc3->dev, wakeup_source);
qcom->is_suspended = false;
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
pm_runtime_forbid(dev);
return 0;
interconnect_exit:
dwc3_qcom_interconnect_exit(qcom);
depopulate:
if (np)
of_platform_depopulate(&pdev->dev);
else
platform_device_put(pdev);
clk_disable:
for (i = qcom->num_clocks - 1; i >= 0; i--) {
clk_disable_unprepare(qcom->clks[i]);
clk_put(qcom->clks[i]);
}
reset_assert:
reset_control_assert(qcom->resets);
return ret;
}
static void dwc3_qcom_remove(struct platform_device *pdev)
{
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
int i;
device_remove_software_node(&qcom->dwc3->dev);
if (np)
of_platform_depopulate(&pdev->dev);
else
platform_device_put(pdev);
for (i = qcom->num_clocks - 1; i >= 0; i--) {
clk_disable_unprepare(qcom->clks[i]);
clk_put(qcom->clks[i]);
}
qcom->num_clocks = 0;
dwc3_qcom_interconnect_exit(qcom);
reset_control_assert(qcom->resets);
pm_runtime_allow(dev);
pm_runtime_disable(dev);
}
static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
bool wakeup = device_may_wakeup(dev);
int ret;
ret = dwc3_qcom_suspend(qcom, wakeup);
if (ret)
return ret;
qcom->pm_suspended = true;
return 0;
}
static int __maybe_unused dwc3_qcom_pm_resume(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
bool wakeup = device_may_wakeup(dev);
int ret;
ret = dwc3_qcom_resume(qcom, wakeup);
if (ret)
return ret;
qcom->pm_suspended = false;
return 0;
}
static int __maybe_unused dwc3_qcom_runtime_suspend(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
return dwc3_qcom_suspend(qcom, true);
}
static int __maybe_unused dwc3_qcom_runtime_resume(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
return dwc3_qcom_resume(qcom, true);
}
static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dwc3_qcom_pm_suspend, dwc3_qcom_pm_resume)
SET_RUNTIME_PM_OPS(dwc3_qcom_runtime_suspend, dwc3_qcom_runtime_resume,
NULL)
};
static const struct of_device_id dwc3_qcom_of_match[] = {
{ .compatible = "qcom,dwc3" },
{ }
};
MODULE_DEVICE_TABLE(of, dwc3_qcom_of_match);
#ifdef CONFIG_ACPI
static const struct dwc3_acpi_pdata sdm845_acpi_pdata = {
.qscratch_base_offset = SDM845_QSCRATCH_BASE_OFFSET,
.qscratch_base_size = SDM845_QSCRATCH_SIZE,
.dwc3_core_base_size = SDM845_DWC3_CORE_SIZE,
.hs_phy_irq_index = 1,
.dp_hs_phy_irq_index = 4,
.dm_hs_phy_irq_index = 3,
.ss_phy_irq_index = 2
};
static const struct dwc3_acpi_pdata sdm845_acpi_urs_pdata = {
.qscratch_base_offset = SDM845_QSCRATCH_BASE_OFFSET,
.qscratch_base_size = SDM845_QSCRATCH_SIZE,
.dwc3_core_base_size = SDM845_DWC3_CORE_SIZE,
.hs_phy_irq_index = 1,
.dp_hs_phy_irq_index = 4,
.dm_hs_phy_irq_index = 3,
.ss_phy_irq_index = 2,
.is_urs = true,
};
static const struct acpi_device_id dwc3_qcom_acpi_match[] = {
{ "QCOM2430", (unsigned long)&sdm845_acpi_pdata },
{ "QCOM0304", (unsigned long)&sdm845_acpi_urs_pdata },
{ "QCOM0497", (unsigned long)&sdm845_acpi_urs_pdata },
{ "QCOM04A6", (unsigned long)&sdm845_acpi_pdata },
{ },
};
MODULE_DEVICE_TABLE(acpi, dwc3_qcom_acpi_match);
#endif
static struct platform_driver dwc3_qcom_driver = {
.probe = dwc3_qcom_probe,
.remove_new = dwc3_qcom_remove,
.driver = {
.name = "dwc3-qcom",
.pm = &dwc3_qcom_dev_pm_ops,
.of_match_table = dwc3_qcom_of_match,
.acpi_match_table = ACPI_PTR(dwc3_qcom_acpi_match),
},
};
module_platform_driver(dwc3_qcom_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DesignWare DWC3 QCOM Glue Driver");
| linux-master | drivers/usb/dwc3/dwc3-qcom.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.